diff --git a/CHANGELOG.md b/CHANGELOG.md index bbe3f50f7953f..e21bfe20cc6a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,26 +9,35 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Rate limiter for remote store low priority uploads ([#14374](https://github.com/opensearch-project/OpenSearch/pull/14374/)) - Apply the date histogram rewrite optimization to range aggregation ([#13865](https://github.com/opensearch-project/OpenSearch/pull/13865)) - [Writable Warm] Add composite directory implementation and integrate it with FileCache ([12782](https://github.com/opensearch-project/OpenSearch/pull/12782)) +- [Workload Management] Add QueryGroup schema ([13669](https://github.com/opensearch-project/OpenSearch/pull/13669)) - Add batching supported processor base type AbstractBatchingProcessor ([#14554](https://github.com/opensearch-project/OpenSearch/pull/14554)) - Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445)) +- Add `strict_allow_templates` dynamic mapping option ([#14555](https://github.com/opensearch-project/OpenSearch/pull/14555)) - Add allowlist setting for ingest-common and search-pipeline-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439)) +- Create SystemIndexRegistry with helper method matchesSystemIndex ([#14415](https://github.com/opensearch-project/OpenSearch/pull/14415)) +- Print reason why parent task was cancelled ([#14604](https://github.com/opensearch-project/OpenSearch/issues/14604)) - Flat object field use IndexOrDocValuesQuery to optimize query ([#14383](https://github.com/opensearch-project/OpenSearch/issues/14383)) + ### Dependencies - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) - Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042)) - Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) - Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) -- Bump `reactor` from 3.5.17 to 3.5.18 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) -- Bump `reactor-netty` from 1.1.19 to 1.1.20 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) +- Bump `reactor` from 3.5.17 to 3.5.19 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) +- Bump `reactor-netty` from 1.1.19 to 1.1.21 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) - Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) - Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) - Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) - Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397)) -- Bump `opentelemetry` from 1.36.0 to 1.39.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457)) -- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14506)) +- Bump `opentelemetry` from 1.36.0 to 1.40.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457), [#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `opentelemetry-semconv` from 1.25.0-alpha to 1.26.0-alpha ([#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14673)) - Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517)) - Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610)) +- Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672)) +- Bump `net.minidev:accessors-smart` from 2.5.0 to 2.5.1 ([#14673](https://github.com/opensearch-project/OpenSearch/pull/14673)) +- Bump `jackson` from 2.17.1 to 2.17.2 ([#14687](https://github.com/opensearch-project/OpenSearch/pull/14687)) ### Changed - [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) @@ -37,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448)) - Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575)) - Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597)) +- Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635)) ### Deprecated @@ -46,6 +56,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474)) - Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379)) - Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086)) +- Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes ([#10959](https://github.com/opensearch-project/OpenSearch/pull/10959)) - Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155)) - Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465)) - Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190)) @@ -56,6 +67,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004)) - Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552)) - Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) +- Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) +- Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java index fcadf35593ce6..9396797536052 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java @@ -81,50 +81,52 @@ public void apply(Project project) { // tests Configuration testConfig = project.getConfigurations().create("restTestConfig"); project.getConfigurations().create("restTests"); + + if (BuildParams.isInternal()) { + // core + Dependency restTestdependency = project.getDependencies().project(new HashMap() { + { + put("path", ":rest-api-spec"); + put("configuration", "restTests"); + } + }); + testConfig.withDependencies(s -> s.add(restTestdependency)); + } else { + Dependency dependency = project.getDependencies().create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); + testConfig.withDependencies(s -> s.add(dependency)); + } + Provider copyRestYamlTestTask = project.getTasks() .register("copyYamlTestsTask", CopyRestTestsTask.class, task -> { task.includeCore.set(extension.restTests.getIncludeCore()); task.coreConfig = testConfig; task.sourceSetName = SourceSet.TEST_SOURCE_SET_NAME; - if (BuildParams.isInternal()) { - // core - Dependency restTestdependency = project.getDependencies().project(new HashMap() { - { - put("path", ":rest-api-spec"); - put("configuration", "restTests"); - } - }); - project.getDependencies().add(task.coreConfig.getName(), restTestdependency); - } else { - Dependency dependency = project.getDependencies() - .create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); - project.getDependencies().add(task.coreConfig.getName(), dependency); - } task.dependsOn(task.coreConfig); }); // api Configuration specConfig = project.getConfigurations().create("restSpec"); // name chosen for passivity project.getConfigurations().create("restSpecs"); + + if (BuildParams.isInternal()) { + Dependency restSpecDependency = project.getDependencies().project(new HashMap() { + { + put("path", ":rest-api-spec"); + put("configuration", "restSpecs"); + } + }); + specConfig.withDependencies(s -> s.add(restSpecDependency)); + } else { + Dependency dependency = project.getDependencies().create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); + specConfig.withDependencies(s -> s.add(dependency)); + } + Provider copyRestYamlSpecTask = project.getTasks() .register("copyRestApiSpecsTask", CopyRestApiTask.class, task -> { task.includeCore.set(extension.restApi.getIncludeCore()); task.dependsOn(copyRestYamlTestTask); task.coreConfig = specConfig; task.sourceSetName = SourceSet.TEST_SOURCE_SET_NAME; - if (BuildParams.isInternal()) { - Dependency restSpecDependency = project.getDependencies().project(new HashMap() { - { - put("path", ":rest-api-spec"); - put("configuration", "restSpecs"); - } - }); - project.getDependencies().add(task.coreConfig.getName(), restSpecDependency); - } else { - Dependency dependency = project.getDependencies() - .create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); - project.getDependencies().add(task.coreConfig.getName(), dependency); - } task.dependsOn(task.coreConfig); }); diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a99bd4801b7f3..855ccc1f87413 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,8 +7,8 @@ bundled_jdk = 21.0.3+9 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.17.1 -jackson_databind = 2.17.1 +jackson = 2.17.2 +jackson_databind = 2.17.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 @@ -33,8 +33,8 @@ netty = 4.1.111.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.20 -reactor = 3.5.18 +reactor_netty = 1.1.21 +reactor = 3.5.19 # client dependencies httpclient5 = 5.2.1 @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.39.0 -opentelemetrysemconv = 1.25.0-alpha +opentelemetry = 1.40.0 +opentelemetrysemconv = 1.26.0-alpha diff --git a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index d64cd4917707c..2c3521197d7c4 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f5bdef81deb70..c7f182843385d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961 +distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 diff --git a/gradlew b/gradlew index 1aa94a4269074..f5feea6d6b116 100755 --- a/gradlew +++ b/gradlew @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## # @@ -55,7 +57,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -84,7 +86,8 @@ done # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum diff --git a/gradlew.bat b/gradlew.bat index 7101f8e4676fc..9b42019c7915b 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -13,6 +13,8 @@ @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem @if "%DEBUG%"=="" @echo off @rem ########################################################################## diff --git a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.2.jar.sha1 b/libs/core/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/libs/core/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index 7aa3347ba4f4b..aa5b1a936b99d 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -37,14 +37,18 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Stack; +import java.util.Set; import java.util.function.Consumer; import org.jcodings.specific.UTF8Encoding; @@ -86,6 +90,7 @@ public final class Grok { UTF8Encoding.INSTANCE, Syntax.DEFAULT ); + private static final int MAX_PATTERN_DEPTH_SIZE = 500; private static final int MAX_TO_REGEX_ITERATIONS = 100_000; // sanity limit @@ -128,7 +133,7 @@ private Grok( expressionBytes.length, Option.DEFAULT, UTF8Encoding.INSTANCE, - message -> logCallBack.accept(message) + logCallBack::accept ); List captureConfig = new ArrayList<>(); @@ -144,7 +149,7 @@ private Grok( */ private void validatePatternBank() { for (String patternName : patternBank.keySet()) { - validatePatternBank(patternName, new Stack<>()); + validatePatternBank(patternName); } } @@ -156,33 +161,84 @@ private void validatePatternBank() { * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ - private void validatePatternBank(String patternName, Stack path) { - String pattern = patternBank.get(patternName); - boolean isSelfReference = pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); - if (isSelfReference) { - throwExceptionForCircularReference(patternName, pattern); - } else if (path.contains(patternName)) { - // current pattern name is already in the path, fetch its predecessor - String prevPatternName = path.pop(); - String prevPattern = patternBank.get(prevPatternName); - throwExceptionForCircularReference(prevPatternName, prevPattern, patternName, path); - } - path.push(patternName); - for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { - int begin = i + 2; - int syntaxEndIndex = pattern.indexOf('}', begin); - if (syntaxEndIndex == -1) { - throw new IllegalArgumentException("Malformed pattern [" + patternName + "][" + pattern + "]"); + private void validatePatternBank(String initialPatternName) { + Deque stack = new ArrayDeque<>(); + Set visitedPatterns = new HashSet<>(); + Map> pathMap = new HashMap<>(); + + List initialPath = new ArrayList<>(); + initialPath.add(initialPatternName); + pathMap.put(initialPatternName, initialPath); + stack.push(new Frame(initialPatternName, initialPath, 0)); + + while (!stack.isEmpty()) { + Frame frame = stack.peek(); + String patternName = frame.patternName; + List path = frame.path; + int startIndex = frame.startIndex; + String pattern = patternBank.get(patternName); + + if (visitedPatterns.contains(patternName)) { + stack.pop(); + continue; + } + + visitedPatterns.add(patternName); + boolean foundDependency = false; + + for (int i = startIndex; i < pattern.length(); i++) { + if (pattern.startsWith("%{", i)) { + int begin = i + 2; + int syntaxEndIndex = pattern.indexOf('}', begin); + if (syntaxEndIndex == -1) { + throw new IllegalArgumentException("Malformed pattern [" + patternName + "][" + pattern + "]"); + } + + int semanticNameIndex = pattern.indexOf(':', begin); + int end = semanticNameIndex == -1 ? syntaxEndIndex : Math.min(syntaxEndIndex, semanticNameIndex); + + String dependsOnPattern = pattern.substring(begin, end); + + if (dependsOnPattern.equals(patternName)) { + throwExceptionForCircularReference(patternName, pattern); + } + + if (pathMap.containsKey(dependsOnPattern)) { + throwExceptionForCircularReference(patternName, pattern, dependsOnPattern, path.subList(0, path.size() - 1)); + } + + List newPath = new ArrayList<>(path); + newPath.add(dependsOnPattern); + pathMap.put(dependsOnPattern, newPath); + + stack.push(new Frame(dependsOnPattern, newPath, 0)); + frame.startIndex = i + 1; + foundDependency = true; + break; + } } - int semanticNameIndex = pattern.indexOf(':', begin); - int end = syntaxEndIndex; - if (semanticNameIndex != -1) { - end = Math.min(syntaxEndIndex, semanticNameIndex); + + if (!foundDependency) { + pathMap.remove(patternName); + stack.pop(); + } + + if (stack.size() > MAX_PATTERN_DEPTH_SIZE) { + throw new IllegalArgumentException("Pattern references exceeded maximum depth of " + MAX_PATTERN_DEPTH_SIZE); } - String dependsOnPattern = pattern.substring(begin, end); - validatePatternBank(dependsOnPattern, path); } - path.pop(); + } + + private static class Frame { + String patternName; + List path; + int startIndex; + + Frame(String patternName, List path, int startIndex) { + this.patternName = patternName; + this.path = path; + this.startIndex = startIndex; + } } private static void throwExceptionForCircularReference(String patternName, String pattern) { @@ -192,13 +248,13 @@ private static void throwExceptionForCircularReference(String patternName, Strin private static void throwExceptionForCircularReference( String patternName, String pattern, - String originPatterName, - Stack path + String originPatternName, + List path ) { StringBuilder message = new StringBuilder("circular reference in pattern ["); message.append(patternName).append("][").append(pattern).append("]"); - if (originPatterName != null) { - message.append(" back to pattern [").append(originPatterName).append("]"); + if (originPatternName != null) { + message.append(" back to pattern [").append(originPatternName).append("]"); } if (path != null && path.size() > 1) { message.append(" via patterns [").append(String.join("=>", path)).append("]"); @@ -217,9 +273,7 @@ private String groupMatch(String name, Region region, String pattern) { int begin = region.getBeg(number); int end = region.getEnd(number); return new String(pattern.getBytes(StandardCharsets.UTF_8), begin, end - begin, StandardCharsets.UTF_8); - } catch (StringIndexOutOfBoundsException e) { - return null; - } catch (ValueException e) { + } catch (StringIndexOutOfBoundsException | ValueException e) { return null; } } diff --git a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java index a37689e051c67..8476d541aa46e 100644 --- a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java @@ -377,6 +377,16 @@ public void testCircularReference() { "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] " + "via patterns [NAME1=>NAME2=>NAME3=>NAME4]", e.getMessage() ); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new TreeMap<>(); + for (int i = 1; i <= 501; i++) { + bank.put("NAME" + i, "!!!%{NAME" + (i + 1) + "}!!!"); + } + String pattern = "%{NAME1}"; + new Grok(bank, pattern, false, logger::warn); + }); + assertEquals("Pattern references exceeded maximum depth of 500", e.getMessage()); } public void testMalformedPattern() { diff --git a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 deleted file mode 100644 index ff42ed1f92cfe..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..069e088413ef1 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 @@ -0,0 +1 @@ +57fa7c1b5104bbc4599278d13933a937ee058e68 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 deleted file mode 100644 index 47d19067cf2a6..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..28d8c8382aed3 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 @@ -0,0 +1 @@ +20e956b9b6f67138edd39fab7a506ded19638bcb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 deleted file mode 100644 index 7946e994c7104..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f3e25b7eb253c --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +78d2c73dbec62044d7cf3b544b2e0d24a1a093b0 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index bcf5c07ea8c64..a836124f94b41 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } dependencies { - api "com.github.spullara.mustache.java:compiler:0.9.13" + api "com.github.spullara.mustache.java:compiler:0.9.14" } restResources { diff --git a/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 deleted file mode 100644 index 70d53aac260eb..0000000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60666500a7dce7a5d3e17c09b46ea6f037192bd5 \ No newline at end of file diff --git a/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 new file mode 100644 index 0000000000000..29069ac90817a --- /dev/null +++ b/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 @@ -0,0 +1 @@ +e6df8b5aabb80d6eb6d8fef312a56d66b7659ba6 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java index c21b89be4dcca..bbe8b8fc40dac 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java @@ -138,17 +138,15 @@ public int getTopNSize() { * @param size the wanted top N size */ public void validateTopNSize(final int size) { - if (size > QueryInsightsSettings.MAX_N_SIZE) { + if (size < 1 || size > QueryInsightsSettings.MAX_N_SIZE) { throw new IllegalArgumentException( "Top N size setting for [" + metricType + "]" - + " should be smaller than max top N size [" + + " should be between 1 and " + QueryInsightsSettings.MAX_N_SIZE - + "was (" + + ", was (" + size - + " > " - + QueryInsightsSettings.MAX_N_SIZE + ")" ); } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java index 3efd4c86833cc..8478fe1621698 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java @@ -78,6 +78,10 @@ public void testValidateTopNSize() { assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(QueryInsightsSettings.MAX_N_SIZE + 1); }); } + public void testValidateNegativeTopNSize() { + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(-1); }); + } + public void testGetTopQueriesWhenNotEnabled() { topQueriesService.setEnabled(false); assertThrows(IllegalArgumentException.class, () -> { topQueriesService.getTopQueriesRecords(false); }); diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 13b711019ff2a..0f822a02e05d8 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -69,7 +69,7 @@ dependencies { // Both msal4j:1.14.3 and oauth2-oidc-sdk:11.9.1 has compile dependency on different versions of json-smart, // selected the higher version which is 2.5.0 api 'net.minidev:json-smart:2.5.0' - api 'net.minidev:accessors-smart:2.5.0' + api 'net.minidev:accessors-smart:2.5.1' api "org.ow2.asm:asm:${versions.asm}" // End of transitive dependencies for azure-identity api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 deleted file mode 100644 index 1578c94fcdc7b..0000000000000 --- a/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aca011492dfe9c26f4e0659028a4fe0970829dd8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 new file mode 100644 index 0000000000000..8f7452437323d --- /dev/null +++ b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 @@ -0,0 +1 @@ +19b820261eb2e7de7d5bde11d1c06e4501dd7e5f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 deleted file mode 100644 index 3915ab2616beb..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6a168dba62aa63743b9e2b83f4e0f0dfdc143d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f9c31c168926d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +ad58f5bd089e743ac6e5999b2d1e3cf8515cea9a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 deleted file mode 100644 index db26ebbf738f7..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0969b0c3cb8c75d759e9a6c585c44c9b9f3a4f75 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..a61bf643d69e6 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 @@ -0,0 +1 @@ +267b85e9ba2892a37be6d80aa9ca1438a0d8c210 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 deleted file mode 100644 index bb8ecfe34d295..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f77e7bf0e64dfcf53bfdcf2764ad7ab92b78a4da \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..d9d7975146c22 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +c2978b818ef2f2b2738b387c143624eab611d917 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 deleted file mode 100644 index 2f4d023c88c80..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..21c16c7430016 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 @@ -0,0 +1 @@ +acb98bd08107287c454ce74e7b1ed8e7a018a662 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 deleted file mode 100644 index 6c031e00e39c1..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..648df22873d56 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 @@ -0,0 +1 @@ +b83542bb35630ef815b4177e3c670f62e952e695 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 deleted file mode 100644 index 415fe8f3d8aaa..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -396b89a66526bd5694ad3bef4604b876177e0b44 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..04ec81edf969c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 @@ -0,0 +1 @@ +6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 deleted file mode 100644 index 9c3c9f43d153c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a1fd96155e1b58726300bbf8457630713035e51 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..bcd7c886b5f6c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 @@ -0,0 +1 @@ +43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 deleted file mode 100644 index 115d4ccb1f34b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0601fb1c06f661afeffbc73a1dbe29797b2f13b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..9716ec518c886 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 @@ -0,0 +1 @@ +bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 deleted file mode 100644 index a10b92995becd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -570d71e39e36fe2caad142557bde0c11fcdb3b92 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..c0e79b05aa675 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 @@ -0,0 +1 @@ +b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 deleted file mode 100644 index f43393104296a..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5b528f8d6f8531836eabba698979516964b24ed \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..1df0ad183c475 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 @@ -0,0 +1 @@ +a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 deleted file mode 100644 index 5adba2ba0f342..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fc0e4983253ea58430c3d24b6b3c5c95f84dc9 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..ebeb639a8459c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 @@ -0,0 +1 @@ +8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 deleted file mode 100644 index ea9c293f25025..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2b8571e36b11c3153d31ec87ec69cc168af8036 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..b630c808d4763 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 @@ -0,0 +1 @@ +80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 deleted file mode 100644 index dcf23f16ac89f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a8947a2e28924ad9374e319150a23837926ca4b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..eda90dc825e6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 @@ -0,0 +1 @@ +006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 deleted file mode 100644 index f603af04d8012..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba9afdf3ef1ea51e42999fd68c959e3ceb219399 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..cdd7dc6551b33 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 @@ -0,0 +1 @@ +59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 deleted file mode 100644 index f9419f6ccfbee..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb8168627bf0059445f61081eaa47c4ab787fc2e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..668291498bbae --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 @@ -0,0 +1 @@ +7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 deleted file mode 100644 index 63269f239eacd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6b45155399bc9fa563945f3e3a77416d7165948 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..74f0786e21954 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 @@ -0,0 +1 @@ +1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 deleted file mode 100644 index f18c8259c1adc..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522d46926cc06a4c18829da7e4c4340bdf5673c3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..23ef1bf6e6b2c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 @@ -0,0 +1 @@ +a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 deleted file mode 100644 index 03b81424f46d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b72722a5bbea5f46319bf08b2caed5b8f987a92 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..aea753f0df18b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 @@ -0,0 +1 @@ +5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 deleted file mode 100644 index 7cf8e7e8ede28..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76b3d4ca0a8f20b27c1590ceece54f0c7fb5857e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..7124dcb31da3f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 deleted file mode 100644 index 2f4d023c88c80..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..21c16c7430016 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 @@ -0,0 +1 @@ +acb98bd08107287c454ce74e7b1ed8e7a018a662 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 deleted file mode 100644 index 6c031e00e39c1..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..648df22873d56 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 @@ -0,0 +1 @@ +b83542bb35630ef815b4177e3c670f62e952e695 \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-1.3.18.md b/release-notes/opensearch.release-notes-1.3.18.md new file mode 100644 index 0000000000000..75c38dd285a63 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.18.md @@ -0,0 +1,4 @@ +## 2024-07-09 Version 1.3.18 Release Notes + +### Upgrades +- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml new file mode 100644 index 0000000000000..b3899e295eb61 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml @@ -0,0 +1,155 @@ +--- +"Index documents with setting dynamic parameter to strict_allow_templates in the mapping of the index": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0.0" + + - do: + indices.create: + index: test_1 + body: + mappings: + dynamic: strict_allow_templates + dynamic_templates: [ + { + strings: { + "match": "stringField*", + "match_mapping_type": "string", + "mapping": { + "type": "keyword" + } + } + }, + { + object: { + "match": "objectField*", + "match_mapping_type": "object", + "mapping": { + "type": "object", + "properties": { + "bar1": { + "type": "keyword" + }, + "bar2": { + "type": "text" + } + } + } + } + }, + { + boolean: { + "match": "booleanField*", + "match_mapping_type": "boolean", + "mapping": { + "type": "boolean" + } + } + }, + { + double: { + "match": "doubleField*", + "match_mapping_type": "double", + "mapping": { + "type": "double" + } + } + }, + { + long: { + "match": "longField*", + "match_mapping_type": "long", + "mapping": { + "type": "long" + } + } + }, + { + array: { + "match": "arrayField*", + "mapping": { + "type": "keyword" + } + } + }, + { + date: { + "match": "dateField*", + "match_mapping_type": "date", + "mapping": { + "type": "date" + } + } + } + ] + properties: + test1: + type: text + + - do: + catch: /mapping set to strict_allow_templates, dynamic introduction of \[test2\] within \[\_doc\] is not allowed/ + index: + index: test_1 + id: 1 + body: { + stringField: bar, + objectField: { + bar1: "bar1", + bar2: "bar2" + }, + test1: test1, + test2: test2 + } + + - do: + index: + index: test_1 + id: 1 + body: { + stringField: bar, + objectField: { + bar1: "bar1", + bar2: "bar2" + }, + booleanField: true, + doubleField: 1.0, + longField: 100, + arrayField: ["1","2"], + dateField: "2024-06-25T05:11:51.243Z", + test1: test1 + } + + - do: + get: + index: test_1 + id: 1 + - match: { _source: { + stringField: bar, + objectField: { + bar1: "bar1", + bar2: "bar2" + }, + booleanField: true, + doubleField: 1.0, + longField: 100, + arrayField: [ "1","2" ], + dateField: "2024-06-25T05:11:51.243Z", + test1: test1 + } + } + + - do: + indices.get_mapping: { + index: test_1 + } + + - match: {test_1.mappings.dynamic: strict_allow_templates} + - match: {test_1.mappings.properties.stringField.type: keyword} + - match: {test_1.mappings.properties.objectField.properties.bar1.type: keyword} + - match: {test_1.mappings.properties.objectField.properties.bar2.type: text} + - match: {test_1.mappings.properties.booleanField.type: boolean} + - match: {test_1.mappings.properties.doubleField.type: double} + - match: {test_1.mappings.properties.longField.type: long} + - match: {test_1.mappings.properties.arrayField.type: keyword} + - match: {test_1.mappings.properties.dateField.type: date} + - match: {test_1.mappings.properties.test1.type: text} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml index ca7a21df20ea4..f579891478b19 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml @@ -159,3 +159,34 @@ setup: indices.get_mapping: {} - match: {test_index1.mappings.properties.text.type: text} + +--- +"post a mapping with setting dynamic to strict_allow_templates": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0.0" + - do: + indices.put_mapping: + index: test_index1 + body: + dynamic: strict_allow_templates + dynamic_templates: [ + { + strings: { + "match": "foo*", + "match_mapping_type": "string", + "mapping": { + "type": "keyword" + } + } + } + ] + properties: + test1: + type: text + + - do: + indices.get_mapping: {} + + - match: {test_index1.mappings.dynamic: strict_allow_templates} + - match: {test_index1.mappings.properties.test1.type: text} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 25d3dd160e031..6a946fb264560 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -10,7 +10,12 @@ setup: index_prefixes: min_chars: 2 max_chars: 5 - + text_with_pos_inc_gap: + type: text + position_increment_gap: 201 + index_prefixes: + min_chars: 2 + max_chars: 5 - do: index: index: test @@ -23,6 +28,18 @@ setup: id: 2 body: { text: sentence with UPPERCASE WORDS } + - do: + index: + index: test + id: 3 + body: { text: ["foo", "b-12"] } + + - do: + index: + index: test + id: 4 + body: { text_with_pos_inc_gap: ["foo", "b-12"] } + - do: indices.refresh: index: [test] @@ -116,3 +133,36 @@ setup: ] - match: {hits.total: 1} + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/9203 +--- +"search index prefixes with multiple values": + - skip: + version: " - 2.15.99" + reason: "the bug was fixed since 2.16.0" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + text: "b-12" + + - match: {hits.total: 1} + +--- +"search index prefixes with multiple values and custom position_increment_gap": + - skip: + version: " - 2.15.99" + reason: "the bug was fixed since 2.16.0" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + text_with_pos_inc_gap: "b-12" + + - match: {hits.total: 1} diff --git a/server/licenses/jackson-core-2.17.1.jar.sha1 b/server/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/server/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/server/licenses/jackson-core-2.17.2.jar.sha1 b/server/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/server/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 deleted file mode 100644 index ff42ed1f92cfe..0000000000000 --- a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..069e088413ef1 --- /dev/null +++ b/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 @@ -0,0 +1 @@ +57fa7c1b5104bbc4599278d13933a937ee058e68 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 deleted file mode 100644 index 47d19067cf2a6..0000000000000 --- a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..28d8c8382aed3 --- /dev/null +++ b/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 @@ -0,0 +1 @@ +20e956b9b6f67138edd39fab7a506ded19638bcb \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 deleted file mode 100644 index 7946e994c7104..0000000000000 --- a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f3e25b7eb253c --- /dev/null +++ b/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +78d2c73dbec62044d7cf3b544b2e0d24a1a093b0 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.18.jar.sha1 b/server/licenses/reactor-core-3.5.18.jar.sha1 deleted file mode 100644 index c503f768beafa..0000000000000 --- a/server/licenses/reactor-core-3.5.18.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a8157f7d66d71a407eb77ba12bce72a38c5b4da \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.19.jar.sha1 b/server/licenses/reactor-core-3.5.19.jar.sha1 new file mode 100644 index 0000000000000..04b59d2faae04 --- /dev/null +++ b/server/licenses/reactor-core-3.5.19.jar.sha1 @@ -0,0 +1 @@ +1d49ce1d0df79f28d3927da5f4c46a895b94335f \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index bdb36b62ada21..d8a4bed4740bf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -327,7 +327,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { mainAction.startSubTask(taskId, subRequest, future); TransportException te = expectThrows(TransportException.class, future::actionGet); assertThat(te.getCause(), instanceOf(TaskCancelledException.class)); - assertThat(te.getCause().getMessage(), equalTo("The parent task was cancelled, shouldn't start any child tasks")); + assertThat(te.getCause().getMessage(), equalTo("The parent task was cancelled, shouldn't start any child tasks, by user request")); allowEntireRequest(rootRequest); waitForRootTask(rootTaskFuture); ensureAllBansRemoved(); @@ -386,7 +386,7 @@ static void waitForRootTask(ActionFuture rootTask) { assertThat( cause.getMessage(), anyOf( - equalTo("The parent task was cancelled, shouldn't start any child tasks"), + equalTo("The parent task was cancelled, shouldn't start any child tasks, by user request"), containsString("Task cancelled before it started:"), equalTo("Task was cancelled while executing") ) diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index c7fd263bda56a..bb51c42252448 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; +import org.opensearch.cluster.metadata.QueryGroupMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.ViewMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; @@ -214,6 +215,8 @@ public static List getNamedWriteables() { DecommissionAttributeMetadata::new, DecommissionAttributeMetadata::readDiffFrom ); + + registerMetadataCustom(entries, QueryGroupMetadata.TYPE, QueryGroupMetadata::new, QueryGroupMetadata::readDiffFrom); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); return entries; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 36eabd51ffda1..62885a12222be 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -284,7 +284,6 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest ) ); ClusterState clusterState = remoteClusterStateService.getClusterStateUsingDiff( - request.getClusterName(), manifest, lastSeen, transportService.getLocalNode().getId() diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index e3f63b1c27b83..2a54f6444ffda 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1368,6 +1368,25 @@ public Builder removeDataStream(String name) { return this; } + public Builder queryGroups(final Map queryGroups) { + this.customs.put(QueryGroupMetadata.TYPE, new QueryGroupMetadata(queryGroups)); + return this; + } + + public Builder put(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.put(queryGroup.get_id(), queryGroup); + return queryGroups(existing); + } + + private Map getQueryGroups() { + return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) + .map(o -> (QueryGroupMetadata) o) + .map(QueryGroupMetadata::queryGroups) + .orElse(Collections.emptyMap()); + } + private Map getViews() { return Optional.ofNullable(customs.get(ViewMetadata.TYPE)) .map(o -> (ViewMetadata) o) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java new file mode 100644 index 0000000000000..beaab198073df --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java @@ -0,0 +1,317 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; +import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.ResourceType; +import org.joda.time.Instant; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Class to define the QueryGroup schema + * { + * "_id": "fafjafjkaf9ag8a9ga9g7ag0aagaga", + * "resourceLimits": { + * "jvm": 0.4 + * }, + * "resiliency_mode": "enforced", + * "name": "analytics", + * "updatedAt": 4513232415 + * } + */ +@ExperimentalApi +public class QueryGroup extends AbstractDiffable implements ToXContentObject { + + private static final int MAX_CHARS_ALLOWED_IN_NAME = 50; + private final String name; + private final String _id; + private final ResiliencyMode resiliencyMode; + // It is an epoch in millis + private final long updatedAtInMillis; + private final Map resourceLimits; + + public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) { + this(name, UUIDs.randomBase64UUID(), resiliencyMode, resourceLimits, Instant.now().getMillis()); + } + + public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) { + Objects.requireNonNull(name, "QueryGroup.name can't be null"); + Objects.requireNonNull(resourceLimits, "QueryGroup.resourceLimits can't be null"); + Objects.requireNonNull(resiliencyMode, "QueryGroup.resiliencyMode can't be null"); + Objects.requireNonNull(_id, "QueryGroup._id can't be null"); + + if (name.length() > MAX_CHARS_ALLOWED_IN_NAME) { + throw new IllegalArgumentException("QueryGroup.name shouldn't be more than 50 chars long"); + } + + if (resourceLimits.isEmpty()) { + throw new IllegalArgumentException("QueryGroup.resourceLimits should at least have 1 resource limit"); + } + validateResourceLimits(resourceLimits); + if (!isValid(updatedAt)) { + throw new IllegalArgumentException("QueryGroup.updatedAtInMillis is not a valid epoch"); + } + + this.name = name; + this._id = _id; + this.resiliencyMode = resiliencyMode; + this.resourceLimits = resourceLimits; + this.updatedAtInMillis = updatedAt; + } + + private static boolean isValid(long updatedAt) { + long minValidTimestamp = Instant.ofEpochMilli(0L).getMillis(); + + // Use Instant.now() to get the current time in seconds since epoch + long currentSeconds = Instant.now().getMillis(); + + // Check if the timestamp is within a reasonable range + return minValidTimestamp <= updatedAt && updatedAt <= currentSeconds; + } + + public QueryGroup(StreamInput in) throws IOException { + this( + in.readString(), + in.readString(), + ResiliencyMode.fromName(in.readString()), + in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readGenericValue), + in.readLong() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(_id); + out.writeString(resiliencyMode.getName()); + out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeGenericValue); + out.writeLong(updatedAtInMillis); + } + + private void validateResourceLimits(Map resourceLimits) { + for (Map.Entry resource : resourceLimits.entrySet()) { + Double threshold = (Double) resource.getValue(); + Objects.requireNonNull(resource.getKey(), "resourceName can't be null"); + Objects.requireNonNull(threshold, "resource limit threshold for" + resource.getKey().getName() + " : can't be null"); + + if (Double.compare(threshold, 1.0) > 0) { + throw new IllegalArgumentException("resource value should be less than 1.0"); + } + } + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("_id", _id); + builder.field("name", name); + builder.field("resiliency_mode", resiliencyMode.getName()); + builder.field("updatedAt", updatedAtInMillis); + // write resource limits + builder.startObject("resourceLimits"); + for (ResourceType resourceType : ResourceType.values()) { + if (resourceLimits.containsKey(resourceType)) { + builder.field(resourceType.getName(), resourceLimits.get(resourceType)); + } + } + builder.endObject(); + + builder.endObject(); + return builder; + } + + public static QueryGroup fromXContent(final XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + + Builder builder = builder(); + + XContentParser.Token token = parser.currentToken(); + + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); + } + + String fieldName = ""; + // Map to hold resources + final Map resourceLimits = new HashMap<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (fieldName.equals("_id")) { + builder._id(parser.text()); + } else if (fieldName.equals("name")) { + builder.name(parser.text()); + } else if (fieldName.equals("resiliency_mode")) { + builder.mode(parser.text()); + } else if (fieldName.equals("updatedAt")) { + builder.updatedAt(parser.longValue()); + } else { + throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + + if (!fieldName.equals("resourceLimits")) { + throw new IllegalArgumentException( + "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token + ); + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else { + resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue()); + } + } + + } + } + builder.resourceLimits(resourceLimits); + return builder.build(); + } + + public static Diff readDiff(final StreamInput in) throws IOException { + return readDiffFrom(QueryGroup::new, in); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryGroup that = (QueryGroup) o; + return Objects.equals(name, that.name) + && Objects.equals(resourceLimits, that.resourceLimits) + && Objects.equals(_id, that._id) + && updatedAtInMillis == that.updatedAtInMillis; + } + + @Override + public int hashCode() { + return Objects.hash(name, resourceLimits, updatedAtInMillis, _id); + } + + public String getName() { + return name; + } + + public ResiliencyMode getResiliencyMode() { + return resiliencyMode; + } + + public Map getResourceLimits() { + return resourceLimits; + } + + public String get_id() { + return _id; + } + + public long getUpdatedAtInMillis() { + return updatedAtInMillis; + } + + /** + * builder method for the {@link QueryGroup} + * @return Builder object + */ + public static Builder builder() { + return new Builder(); + } + + /** + * This enum models the different QueryGroup resiliency modes + * SOFT - means that this query group can consume more than query group resource limits if node is not in duress + * ENFORCED - means that it will never breach the assigned limits and will cancel as soon as the limits are breached + * MONITOR - it will not cause any cancellation but just log the eligible task cancellations + */ + @ExperimentalApi + public enum ResiliencyMode { + SOFT("soft"), + ENFORCED("enforced"), + MONITOR("monitor"); + + private final String name; + + ResiliencyMode(String mode) { + this.name = mode; + } + + public String getName() { + return name; + } + + public static ResiliencyMode fromName(String s) { + for (ResiliencyMode mode : values()) { + if (mode.getName().equalsIgnoreCase(s)) return mode; + + } + throw new IllegalArgumentException("Invalid value for QueryGroupMode: " + s); + } + + } + + /** + * Builder class for {@link QueryGroup} + */ + @ExperimentalApi + public static class Builder { + private String name; + private String _id; + private ResiliencyMode resiliencyMode; + private long updatedAt; + private Map resourceLimits; + + private Builder() {} + + public Builder name(String name) { + this.name = name; + return this; + } + + public Builder _id(String _id) { + this._id = _id; + return this; + } + + public Builder mode(String mode) { + this.resiliencyMode = ResiliencyMode.fromName(mode); + return this; + } + + public Builder updatedAt(long updatedAt) { + this.updatedAt = updatedAt; + return this; + } + + public Builder resourceLimits(Map resourceLimits) { + this.resourceLimits = resourceLimits; + return this; + } + + public QueryGroup build() { + return new QueryGroup(name, _id, resiliencyMode, resourceLimits, updatedAt); + } + + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java new file mode 100644 index 0000000000000..79732bc505ee2 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java @@ -0,0 +1,185 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.Version; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.cluster.metadata.Metadata.ALL_CONTEXTS; + +/** + * This class holds the QueryGroupMetadata + * sample schema + * { + * "queryGroups": { + * "_id": { + * {@link QueryGroup} + * }, + * ... + * } + * } + */ +public class QueryGroupMetadata implements Metadata.Custom { + public static final String TYPE = "queryGroups"; + private static final ParseField QUERY_GROUP_FIELD = new ParseField("queryGroups"); + + private final Map queryGroups; + + public QueryGroupMetadata(Map queryGroups) { + this.queryGroups = queryGroups; + } + + public QueryGroupMetadata(StreamInput in) throws IOException { + this.queryGroups = in.readMap(StreamInput::readString, QueryGroup::new); + } + + public Map queryGroups() { + return this.queryGroups; + } + + /** + * Returns the name of the writeable object + */ + @Override + public String getWriteableName() { + return TYPE; + } + + /** + * The minimal version of the recipient this object can be sent to + */ + @Override + public Version getMinimalSupportedVersion() { + return Version.V_3_0_0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(queryGroups, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (Map.Entry entry : queryGroups.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + return builder; + } + + public static QueryGroupMetadata fromXContent(XContentParser parser) throws IOException { + Map queryGroupMap = new HashMap<>(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException( + "QueryGroupMetadata.fromXContent was expecting a { token but found : " + parser.currentToken() + ); + } + XContentParser.Token token = parser.currentToken(); + String fieldName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else { + QueryGroup queryGroup = QueryGroup.fromXContent(parser); + queryGroupMap.put(fieldName, queryGroup); + } + } + + return new QueryGroupMetadata(queryGroupMap); + } + + @Override + public Diff diff(final Metadata.Custom previousState) { + return new QueryGroupMetadataDiff((QueryGroupMetadata) previousState, this); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return new QueryGroupMetadataDiff(in); + } + + @Override + public EnumSet context() { + return ALL_CONTEXTS; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryGroupMetadata that = (QueryGroupMetadata) o; + return Objects.equals(queryGroups, that.queryGroups); + } + + @Override + public int hashCode() { + return Objects.hash(queryGroups); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + /** + * QueryGroupMetadataDiff + */ + static class QueryGroupMetadataDiff implements NamedDiff { + final Diff> dataStreamDiff; + + QueryGroupMetadataDiff(final QueryGroupMetadata before, final QueryGroupMetadata after) { + dataStreamDiff = DiffableUtils.diff(before.queryGroups, after.queryGroups, DiffableUtils.getStringKeySerializer()); + } + + QueryGroupMetadataDiff(final StreamInput in) throws IOException { + this.dataStreamDiff = DiffableUtils.readJdkMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + QueryGroup::new, + QueryGroup::readDiff + ); + } + + /** + * Returns the name of the writeable object + */ + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dataStreamDiff.writeTo(out); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new QueryGroupMetadata(new HashMap<>(dataStreamDiff.apply(((QueryGroupMetadata) part).queryGroups))); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 6978c988fd648..00eb79add9f1d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -68,6 +68,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final float avgPrimaryShardsPerNode; private final BalancedShardsAllocator.NodeSorter sorter; private final Set inEligibleTargetNode; + private int totalShardCount = 0; public LocalShardsBalancer( Logger logger, @@ -125,8 +126,7 @@ public float avgPrimaryShardsPerNode() { */ @Override public float avgShardsPerNode() { - float totalShards = nodes.values().stream().map(BalancedShardsAllocator.ModelNode::numShards).reduce(0, Integer::sum); - return totalShards / nodes.size(); + return totalShardCount / nodes.size(); } /** @@ -598,6 +598,7 @@ void moveShards() { final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); final BalancedShardsAllocator.ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); sourceNode.removeShard(shardRouting); + --totalShardCount; Tuple relocatingShards = routingNodes.relocateShard( shardRouting, targetNode.getNodeId(), @@ -605,6 +606,7 @@ void moveShards() { allocation.changes() ); targetNode.addShard(relocatingShards.v2()); + ++totalShardCount; if (logger.isTraceEnabled()) { logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); } @@ -724,6 +726,7 @@ private Map buildModelFromAssigned() /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ if (RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation)) && shard.state() != RELOCATING) { node.addShard(shard); + ++totalShardCount; if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); } @@ -815,6 +818,7 @@ void allocateUnassigned() { ); shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); minNode.addShard(shard); + ++totalShardCount; if (!shard.primary()) { // copy over the same replica shards to the secondary array so they will get allocated // in a subsequent iteration, allowing replicas of other shards to be allocated first @@ -844,6 +848,7 @@ void allocateUnassigned() { allocation.routingTable() ); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); + ++totalShardCount; } else { if (logger.isTraceEnabled()) { logger.trace("No Node found to assign shard [{}]", shard); @@ -1011,18 +1016,21 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); + --totalShardCount; long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); if (decision.type() == Decision.Type.YES) { /* only allocate on the cluster if we are not throttled */ logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); + ++totalShardCount; return true; } else { /* allocate on the model even if throttled */ logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); assert decision.type() == Decision.Type.THROTTLE; minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); + ++totalShardCount; return false; } } diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java index 27f9bedc4e495..c493bf717c97f 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java @@ -23,8 +23,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * PrimaryShardBatchAllocator is similar to {@link org.opensearch.gateway.PrimaryShardAllocator} only difference is @@ -82,6 +84,7 @@ public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassigned * @param allocation the allocation state container object */ public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { + logger.trace("Starting shard allocation execution for unassigned primary shards: {}", shardRoutings.size()); HashMap ineligibleShardAllocationDecisions = new HashMap<>(); List eligibleShards = new ArrayList<>(); List inEligibleShards = new ArrayList<>(); @@ -99,13 +102,13 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll // only fetch data for eligible shards final FetchResult shardsState = fetchData(eligibleShards, inEligibleShards, allocation); + Set batchShardRoutingSet = new HashSet<>(shardRoutings); RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); AllocateUnassignedDecision allocationDecision; - if (shardRoutings.contains(unassignedShard)) { - assert unassignedShard.primary(); + if (unassignedShard.primary() && batchShardRoutingSet.contains(unassignedShard)) { if (ineligibleShardAllocationDecisions.containsKey(unassignedShard.shardId())) { allocationDecision = ineligibleShardAllocationDecisions.get(unassignedShard.shardId()); } else { @@ -115,6 +118,7 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll executeDecision(unassignedShard, allocationDecision, allocation, iterator); } } + logger.trace("Finished shard allocation execution for unassigned primary shards: {}", shardRoutings.size()); } /** diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index f2cb3d053440d..7c75f2a5d1a8f 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -28,10 +28,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Supplier; -import java.util.stream.Collectors; /** * Allocates replica shards in a batch mode @@ -117,6 +118,7 @@ public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassigned * @param allocation the allocation state container object */ public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { + logger.trace("Starting shard allocation execution for unassigned replica shards: {}", shardRoutings.size()); List eligibleShards = new ArrayList<>(); List ineligibleShards = new ArrayList<>(); Map ineligibleShardAllocationDecisions = new HashMap<>(); @@ -135,7 +137,11 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll // only fetch data for eligible shards final FetchResult shardsState = fetchData(eligibleShards, ineligibleShards, allocation); - List shardIdsFromBatch = shardRoutings.stream().map(shardRouting -> shardRouting.shardId()).collect(Collectors.toList()); + Set shardIdsFromBatch = new HashSet<>(); + for (ShardRouting shardRouting : shardRoutings) { + ShardId shardId = shardRouting.shardId(); + shardIdsFromBatch.add(shardId); + } RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); @@ -159,6 +165,7 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll executeDecision(unassignedShard, allocateUnassignedDecision, allocation, iterator); } } + logger.trace("Finished shard allocation execution for unassigned replica shards: {}", shardRoutings.size()); } private AllocateUnassignedDecision getUnassignedShardAllocationDecision( diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 2786cd432b002..3a66419b1dc20 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -20,6 +20,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.gateway.remote.ClusterMetadataManifest.Builder; import java.io.IOException; import java.util.ArrayList; @@ -243,7 +244,7 @@ private static void declareParser(ConstructingObjectParser UploadedIndexMetadata.fromXContent(p), + (p, c) -> UploadedIndexMetadata.fromXContent(p, codec_version), INDICES_FIELD ); parser.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); @@ -277,7 +278,7 @@ private static void declareParser(ConstructingObjectParser UploadedIndexMetadata.fromXContent(p), + (p, c) -> UploadedIndexMetadata.fromXContent(p, codec_version), INDICES_ROUTING_FIELD ); parser.declareNamedObject( @@ -1112,16 +1113,30 @@ private static String componentPrefix(Object[] fields) { return (String) fields[3]; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( + "uploaded_index_metadata", + fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields)) + ); + + private static final ConstructingObjectParser PARSER_V2 = new ConstructingObjectParser<>( "uploaded_index_metadata", fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields), componentPrefix(fields)) ); + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2; + static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), COMPONENT_PREFIX_FIELD); + declareParser(PARSER_V0, CODEC_V0); + declareParser(PARSER_V2, CODEC_V2); + } + + private static void declareParser(ConstructingObjectParser parser, long codec_version) { + parser.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); + if (codec_version >= CODEC_V2) { + parser.declareString(ConstructingObjectParser.constructorArg(), COMPONENT_PREFIX_FIELD); + } } static final String COMPONENT_PREFIX = "index--"; @@ -1130,15 +1145,32 @@ private static String componentPrefix(Object[] fields) { private final String indexUUID; private final String uploadedFilename; + private long codecVersion = CODEC_V2; + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName) { - this(indexName, indexUUID, uploadedFileName, COMPONENT_PREFIX); + this(indexName, indexUUID, uploadedFileName, CODEC_V2); + } + + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName, long codecVersion) { + this(indexName, indexUUID, uploadedFileName, COMPONENT_PREFIX, codecVersion); } public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName, String componentPrefix) { + this(indexName, indexUUID, uploadedFileName, componentPrefix, CODEC_V2); + } + + public UploadedIndexMetadata( + String indexName, + String indexUUID, + String uploadedFileName, + String componentPrefix, + long codecVersion + ) { this.componentPrefix = componentPrefix; this.indexName = indexName; this.indexUUID = indexUUID; this.uploadedFilename = uploadedFileName; + this.codecVersion = codecVersion; } public UploadedIndexMetadata(StreamInput in) throws IOException { @@ -1175,10 +1207,13 @@ public String getComponentPrefix() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) + builder.field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) - .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) - .field(COMPONENT_PREFIX_FIELD.getPreferredName(), getComponentPrefix()); + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()); + if (codecVersion >= CODEC_V2) { + builder.field(COMPONENT_PREFIX_FIELD.getPreferredName(), getComponentPrefix()); + } + return builder; } @Override @@ -1214,9 +1249,13 @@ public String toString() { return Strings.toString(MediaTypeRegistry.JSON, this); } - public static UploadedIndexMetadata fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); + public static UploadedIndexMetadata fromXContent(XContentParser parser, long codecVersion) throws IOException { + if (codecVersion >= CODEC_V2) { + return CURRENT_PARSER.parse(parser, null); + } + return PARSER_V0.parse(parser, null); } + } /** diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java index 65ae2675a95da..aca53c92781e4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -152,17 +153,17 @@ public ClusterStateDiffManifest( this.settingsMetadataUpdated = settingsMetadataUpdated; this.transientSettingsMetadataUpdated = transientSettingsMetadataUpdate; this.templatesMetadataUpdated = templatesMetadataUpdated; - this.customMetadataUpdated = customMetadataUpdated; - this.customMetadataDeleted = customMetadataDeleted; - this.indicesUpdated = indicesUpdated; - this.indicesDeleted = indicesDeleted; + this.customMetadataUpdated = Collections.unmodifiableList(customMetadataUpdated); + this.customMetadataDeleted = Collections.unmodifiableList(customMetadataDeleted); + this.indicesUpdated = Collections.unmodifiableList(indicesUpdated); + this.indicesDeleted = Collections.unmodifiableList(indicesDeleted); this.clusterBlocksUpdated = clusterBlocksUpdated; this.discoveryNodesUpdated = discoveryNodesUpdated; - this.indicesRoutingUpdated = indicesRoutingUpdated; - this.indicesRoutingDeleted = indicesRoutingDeleted; + this.indicesRoutingUpdated = Collections.unmodifiableList(indicesRoutingUpdated); + this.indicesRoutingDeleted = Collections.unmodifiableList(indicesRoutingDeleted); this.hashesOfConsistentSettingsUpdated = hashesOfConsistentSettingsUpdated; - this.clusterStateCustomUpdated = clusterStateCustomUpdated; - this.clusterStateCustomDeleted = clusterStateCustomDeleted; + this.clusterStateCustomUpdated = Collections.unmodifiableList(clusterStateCustomUpdated); + this.clusterStateCustomDeleted = Collections.unmodifiableList(clusterStateCustomDeleted); } public ClusterStateDiffManifest(StreamInput in) throws IOException { @@ -563,7 +564,16 @@ public static class Builder { private List clusterStateCustomUpdated; private List clusterStateCustomDeleted; - public Builder() {} + public Builder() { + customMetadataUpdated = Collections.emptyList(); + customMetadataDeleted = Collections.emptyList(); + indicesUpdated = Collections.emptyList(); + indicesDeleted = Collections.emptyList(); + indicesRoutingUpdated = Collections.emptyList(); + indicesRoutingDeleted = Collections.emptyList(); + clusterStateCustomUpdated = Collections.emptyList(); + clusterStateCustomDeleted = Collections.emptyList(); + } public Builder fromStateUUID(String fromStateUUID) { this.fromStateUUID = fromStateUUID; diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 74abe9cd257b4..3e63f9114ea16 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -976,7 +976,8 @@ public ClusterState getLatestClusterState(String clusterName, String clusterUUID return getClusterStateForManifest(clusterName, clusterMetadataManifest.get(), nodeId, includeEphemeral); } - private ClusterState readClusterStateInParallel( + // package private for testing + ClusterState readClusterStateInParallel( ClusterState previousState, ClusterMetadataManifest manifest, String clusterUUID, @@ -1285,7 +1286,7 @@ public ClusterState getClusterStateForManifest( manifest.getCustomMetadataMap(), manifest.getCoordinationMetadata() != null, manifest.getSettingsMetadata() != null, - manifest.getTransientSettingsMetadata() != null, + includeEphemeral && manifest.getTransientSettingsMetadata() != null, manifest.getTemplatesMetadata() != null, includeEphemeral && manifest.getDiscoveryNodesMetadata() != null, includeEphemeral && manifest.getClusterBlocksMetadata() != null, @@ -1321,13 +1322,9 @@ public ClusterState getClusterStateForManifest( } - public ClusterState getClusterStateUsingDiff( - String clusterName, - ClusterMetadataManifest manifest, - ClusterState previousState, - String localNodeId - ) throws IOException { - assert manifest.getDiffManifest() != null; + public ClusterState getClusterStateUsingDiff(ClusterMetadataManifest manifest, ClusterState previousState, String localNodeId) + throws IOException { + assert manifest.getDiffManifest() != null : "Diff manifest null which is required for downloading cluster state"; ClusterStateDiffManifest diff = manifest.getDiffManifest(); List updatedIndices = diff.getIndicesUpdated().stream().map(idx -> { Optional uploadedIndexMetadataOptional = manifest.getIndices() @@ -1586,6 +1583,19 @@ private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { return manifest.isClusterUUIDCommitted(); } + // package private setter which are required for injecting mock managers, these setters are not supposed to be used elsewhere + void setRemoteIndexMetadataManager(RemoteIndexMetadataManager remoteIndexMetadataManager) { + this.remoteIndexMetadataManager = remoteIndexMetadataManager; + } + + void setRemoteGlobalMetadataManager(RemoteGlobalMetadataManager remoteGlobalMetadataManager) { + this.remoteGlobalMetadataManager = remoteGlobalMetadataManager; + } + + void setRemoteClusterStateAttributesManager(RemoteClusterStateAttributesManager remoteClusterStateAttributeManager) { + this.remoteClusterStateAttributesManager = remoteClusterStateAttributeManager; + } + public void writeMetadataFailed() { getStats().stateFailed(); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java index a84161b202a22..c595f19279354 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -26,10 +26,7 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.HashMap; import java.util.Locale; -import java.util.Map; -import java.util.Objects; /** * A Manager which provides APIs to write and read Index Metadata to remote store @@ -136,24 +133,6 @@ IndexMetadata getIndexMetadata(ClusterMetadataManifest.UploadedIndexMetadata upl } } - /** - * Fetch latest index metadata from remote cluster state - * - * @param clusterMetadataManifest manifest file of cluster - * @param clusterUUID uuid of cluster state to refer to in remote - * @return {@code Map} latest IndexUUID to IndexMetadata map - */ - Map getIndexMetadataMap(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) - : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - Map remoteIndexMetadata = new HashMap<>(); - for (ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { - IndexMetadata indexMetadata = getIndexMetadata(uploadedIndexMetadata, clusterUUID); - remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); - } - return remoteIndexMetadata; - } - public TimeValue getIndexMetadataUploadTimeout() { return this.indexMetadataUploadTimeout; } diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index 67f38536a0d11..59fafdf1ba74e 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -39,6 +39,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.composite.CompositeCodecFactory; import org.opensearch.index.mapper.MapperService; import java.util.Map; @@ -63,6 +64,7 @@ public class CodecService { * the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; + private final CompositeCodecFactory compositeCodecFactory = new CompositeCodecFactory(); public CodecService(@Nullable MapperService mapperService, IndexSettings indexSettings, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); @@ -73,10 +75,16 @@ public CodecService(@Nullable MapperService mapperService, IndexSettings indexSe codecs.put(BEST_COMPRESSION_CODEC, new Lucene99Codec(Mode.BEST_COMPRESSION)); codecs.put(ZLIB, new Lucene99Codec(Mode.BEST_COMPRESSION)); } else { - codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); - codecs.put(LZ4, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); - codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); - codecs.put(ZLIB, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); + // CompositeCodec still delegates to PerFieldMappingPostingFormatCodec + // We can still support all the compression codecs when composite index is present + if (mapperService.isCompositeIndexPresent()) { + codecs.putAll(compositeCodecFactory.getCompositeIndexCodecs(mapperService, logger)); + } else { + codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); + codecs.put(LZ4, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); + codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); + codecs.put(ZLIB, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); + } } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java new file mode 100644 index 0000000000000..de04944e67cd2 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.FilterCodec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.PerFieldMappingPostingFormatCodec; +import org.opensearch.index.mapper.MapperService; + +/** + * Extends the Codec to support new file formats for composite indices eg: star tree index + * based on the mappings. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Composite99Codec extends FilterCodec { + public static final String COMPOSITE_INDEX_CODEC_NAME = "Composite99Codec"; + private final MapperService mapperService; + + // needed for SPI - this is used in reader path + public Composite99Codec() { + this(COMPOSITE_INDEX_CODEC_NAME, new Lucene99Codec(), null); + } + + public Composite99Codec(Lucene99Codec.Mode compressionMode, MapperService mapperService, Logger logger) { + this(COMPOSITE_INDEX_CODEC_NAME, new PerFieldMappingPostingFormatCodec(compressionMode, mapperService, logger), mapperService); + } + + /** + * Sole constructor. When subclassing this codec, create a no-arg ctor and pass the delegate codec and a unique name to + * this ctor. + * + * @param name name of the codec + * @param delegate codec delegate + * @param mapperService mapper service instance + */ + protected Composite99Codec(String name, Codec delegate, MapperService mapperService) { + super(name, delegate); + this.mapperService = mapperService; + } + + @Override + public DocValuesFormat docValuesFormat() { + return new Composite99DocValuesFormat(mapperService); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java new file mode 100644 index 0000000000000..216ed4f68f333 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.mapper.MapperService; + +import java.io.IOException; + +/** + * DocValues format to handle composite indices + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Composite99DocValuesFormat extends DocValuesFormat { + /** + * Creates a new docvalues format. + * + *

The provided name will be written into the index segment in some configurations (such as + * when using {@code PerFieldDocValuesFormat}): in such configurations, for the segment to be read + * this class should be registered with Java's SPI mechanism (registered in META-INF/ of your jar + * file, etc). + */ + private final DocValuesFormat delegate; + private final MapperService mapperService; + + // needed for SPI + public Composite99DocValuesFormat() { + this(new Lucene90DocValuesFormat(), null); + } + + public Composite99DocValuesFormat(MapperService mapperService) { + this(new Lucene90DocValuesFormat(), mapperService); + } + + public Composite99DocValuesFormat(DocValuesFormat delegate, MapperService mapperService) { + super(delegate.getName()); + this.delegate = delegate; + this.mapperService = mapperService; + } + + @Override + public DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + return new Composite99DocValuesWriter(delegate.fieldsConsumer(state), state, mapperService); + } + + @Override + public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException { + return new Composite99DocValuesReader(delegate.fieldsProducer(state), state); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java new file mode 100644 index 0000000000000..82c844088cfd4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.mapper.CompositeMappedFieldType; + +import java.io.IOException; +import java.util.List; + +/** + * Reader for star tree index and star tree doc values from the segments + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Composite99DocValuesReader extends DocValuesProducer implements CompositeIndexReader { + private DocValuesProducer delegate; + + public Composite99DocValuesReader(DocValuesProducer producer, SegmentReadState state) throws IOException { + this.delegate = producer; + // TODO : read star tree files + } + + @Override + public NumericDocValues getNumeric(FieldInfo field) throws IOException { + return delegate.getNumeric(field); + } + + @Override + public BinaryDocValues getBinary(FieldInfo field) throws IOException { + return delegate.getBinary(field); + } + + @Override + public SortedDocValues getSorted(FieldInfo field) throws IOException { + return delegate.getSorted(field); + } + + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { + return delegate.getSortedNumeric(field); + } + + @Override + public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException { + return delegate.getSortedSet(field); + } + + @Override + public void checkIntegrity() throws IOException { + delegate.checkIntegrity(); + // Todo : check integrity of composite index related [star tree] files + } + + @Override + public void close() throws IOException { + delegate.close(); + // Todo: close composite index related files [star tree] files + } + + @Override + public List getCompositeIndexFields() { + // todo : read from file formats and get the field names. + throw new UnsupportedOperationException(); + + } + + @Override + public CompositeIndexValues getCompositeIndexValues(String field, CompositeMappedFieldType.CompositeFieldType fieldType) + throws IOException { + // TODO : read compositeIndexValues [starTreeValues] from star tree files + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java new file mode 100644 index 0000000000000..75bbf78dbdad2 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.SegmentWriteState; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.StarTreeMapper; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * This class write the star tree index and star tree doc values + * based on the doc values structures of the original index + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Composite99DocValuesWriter extends DocValuesConsumer { + private final DocValuesConsumer delegate; + private final SegmentWriteState state; + private final MapperService mapperService; + AtomicReference mergeState = new AtomicReference<>(); + private final Set compositeMappedFieldTypes; + private final Set compositeFieldSet; + + private final Map fieldProducerMap = new HashMap<>(); + + public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState segmentWriteState, MapperService mapperService) { + + this.delegate = delegate; + this.state = segmentWriteState; + this.mapperService = mapperService; + this.compositeMappedFieldTypes = mapperService.getCompositeFieldTypes(); + compositeFieldSet = new HashSet<>(); + for (CompositeMappedFieldType type : compositeMappedFieldTypes) { + compositeFieldSet.addAll(type.fields()); + } + } + + @Override + public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + delegate.addNumericField(field, valuesProducer); + } + + @Override + public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + delegate.addBinaryField(field, valuesProducer); + } + + @Override + public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + delegate.addSortedField(field, valuesProducer); + } + + @Override + public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + delegate.addSortedNumericField(field, valuesProducer); + // Perform this only during flush flow + if (mergeState.get() == null) { + createCompositeIndicesIfPossible(valuesProducer, field); + } + } + + @Override + public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + delegate.addSortedSetField(field, valuesProducer); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, FieldInfo field) throws IOException { + if (compositeFieldSet.isEmpty()) return; + if (compositeFieldSet.contains(field.name)) { + fieldProducerMap.put(field.name, valuesProducer); + compositeFieldSet.remove(field.name); + } + // we have all the required fields to build composite fields + if (compositeFieldSet.isEmpty()) { + for (CompositeMappedFieldType mappedType : compositeMappedFieldTypes) { + if (mappedType instanceof StarTreeMapper.StarTreeFieldType) { + // TODO : Call StarTree builder + } + } + } + } + + @Override + public void merge(MergeState mergeState) throws IOException { + this.mergeState.compareAndSet(null, mergeState); + super.merge(mergeState); + // TODO : handle merge star tree + // mergeStarTreeFields(mergeState); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java new file mode 100644 index 0000000000000..3acedc6a27d7f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.mapper.MapperService; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.index.codec.CodecService.BEST_COMPRESSION_CODEC; +import static org.opensearch.index.codec.CodecService.DEFAULT_CODEC; +import static org.opensearch.index.codec.CodecService.LZ4; +import static org.opensearch.index.codec.CodecService.ZLIB; + +/** + * Factory class to return the latest composite codec for all the modes + * + * @opensearch.experimental + */ +@ExperimentalApi +public class CompositeCodecFactory { + public CompositeCodecFactory() {} + + public Map getCompositeIndexCodecs(MapperService mapperService, Logger logger) { + Map codecs = new HashMap<>(); + codecs.put(DEFAULT_CODEC, new Composite99Codec(Lucene99Codec.Mode.BEST_SPEED, mapperService, logger)); + codecs.put(LZ4, new Composite99Codec(Lucene99Codec.Mode.BEST_SPEED, mapperService, logger)); + codecs.put(BEST_COMPRESSION_CODEC, new Composite99Codec(Lucene99Codec.Mode.BEST_COMPRESSION, mapperService, logger)); + codecs.put(ZLIB, new Composite99Codec(Lucene99Codec.Mode.BEST_COMPRESSION, mapperService, logger)); + return codecs; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java new file mode 100644 index 0000000000000..d02438b75377d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.mapper.CompositeMappedFieldType; + +import java.io.IOException; +import java.util.List; + +/** + * Interface that abstracts the functionality to read composite index structures from the segment + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface CompositeIndexReader { + /** + * Get list of composite index fields from the segment + * + */ + List getCompositeIndexFields(); + + /** + * Get composite index values based on the field name and the field type + */ + CompositeIndexValues getCompositeIndexValues(String field, CompositeMappedFieldType.CompositeFieldType fieldType) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexValues.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexValues.java new file mode 100644 index 0000000000000..f8848aceab343 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexValues.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Interface for composite index values + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface CompositeIndexValues { + CompositeIndexValues getValues(); +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java b/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java new file mode 100644 index 0000000000000..2a5b96ce2620a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite.datacube.startree; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexValues; + +import java.util.List; + +/** + * Concrete class that holds the star tree associated values from the segment + * + * @opensearch.experimental + */ +@ExperimentalApi +public class StarTreeValues implements CompositeIndexValues { + private final List dimensionsOrder; + + // TODO : come up with full set of vales such as dimensions and metrics doc values + star tree + public StarTreeValues(List dimensionsOrder) { + super(); + this.dimensionsOrder = List.copyOf(dimensionsOrder); + } + + @Override + public CompositeIndexValues getValues() { + return this; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/package-info.java b/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/package-info.java new file mode 100644 index 0000000000000..67808ad51289a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * classes responsible for handling all star tree structures and operations as part of codec + */ +package org.opensearch.index.codec.composite.datacube.startree; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/package-info.java b/server/src/main/java/org/opensearch/index/codec/composite/package-info.java new file mode 100644 index 0000000000000..5d15e99c00975 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * classes responsible for handling all composite index codecs and operations + */ +package org.opensearch.index.codec.composite; diff --git a/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java index f52ce29a86dd2..e067e70621304 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java @@ -45,7 +45,10 @@ public CompositeMappedFieldType(String name, List fields, CompositeField /** * Supported composite field types + * + * @opensearch.experimental */ + @ExperimentalApi public enum CompositeFieldType { STAR_TREE("star_tree"); diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index f276d6ee2e579..c6815ebe8d91a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -54,6 +54,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Locale; import static org.opensearch.index.mapper.FieldMapper.IGNORE_MALFORMED_SETTING; @@ -545,22 +546,32 @@ private static void parseObject(final ParseContext context, ObjectMapper mapper, Tuple parentMapperTuple = getDynamicParentMapper(context, paths, mapper); ObjectMapper parentMapper = parentMapperTuple.v2(); ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); - if (dynamic == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(mapper.fullPath(), currentFieldName); - } else if (dynamic == ObjectMapper.Dynamic.TRUE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.OBJECT); - if (builder == null) { - builder = new ObjectMapper.Builder(currentFieldName).enabled(true); - } - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); - objectMapper = builder.build(builderContext); - context.addDynamicMapper(objectMapper); - context.path().add(currentFieldName); - parseObjectOrField(context, objectMapper); - context.path().remove(); - } else { - // not dynamic, read everything up to end object - context.parser().skipChildren(); + switch (dynamic) { + case STRICT: + throw new StrictDynamicMappingException(dynamic.name().toLowerCase(Locale.ROOT), mapper.fullPath(), currentFieldName); + case TRUE: + case STRICT_ALLOW_TEMPLATES: + Mapper.Builder builder = findTemplateBuilder( + context, + currentFieldName, + XContentFieldType.OBJECT, + dynamic, + mapper.fullPath() + ); + + if (builder == null) { + builder = new ObjectMapper.Builder(currentFieldName).enabled(true); + } + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); + objectMapper = builder.build(builderContext); + context.addDynamicMapper(objectMapper); + context.path().add(currentFieldName); + parseObjectOrField(context, objectMapper); + context.path().remove(); + break; + case FALSE: + // not dynamic, read everything up to end object + context.parser().skipChildren(); } for (int i = 0; i < parentMapperTuple.v1(); i++) { context.path().remove(); @@ -591,31 +602,44 @@ private static void parseArray(ParseContext context, ObjectMapper parentMapper, Tuple parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper); parentMapper = parentMapperTuple.v2(); ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); - if (dynamic == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName); - } else if (dynamic == ObjectMapper.Dynamic.TRUE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, XContentFieldType.OBJECT); - if (builder == null) { - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); - } else { - Mapper.BuilderContext builderContext = new Mapper.BuilderContext( - context.indexSettings().getSettings(), - context.path() + switch (dynamic) { + case STRICT: + throw new StrictDynamicMappingException( + dynamic.name().toLowerCase(Locale.ROOT), + parentMapper.fullPath(), + arrayFieldName ); - mapper = builder.build(builderContext); - assert mapper != null; - if (parsesArrayValue(mapper)) { - context.addDynamicMapper(mapper); - context.path().add(arrayFieldName); - parseObjectOrField(context, mapper); - context.path().remove(); - } else { + case TRUE: + case STRICT_ALLOW_TEMPLATES: + Mapper.Builder builder = findTemplateBuilder( + context, + arrayFieldName, + XContentFieldType.OBJECT, + dynamic, + parentMapper.fullPath() + ); + if (builder == null) { parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } else { + Mapper.BuilderContext builderContext = new Mapper.BuilderContext( + context.indexSettings().getSettings(), + context.path() + ); + mapper = builder.build(builderContext); + assert mapper != null; + if (parsesArrayValue(mapper)) { + context.addDynamicMapper(mapper); + context.path().add(arrayFieldName); + parseObjectOrField(context, mapper); + context.path().remove(); + } else { + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } } - } - } else { - // TODO: shouldn't this skip, not parse? - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + break; + case FALSE: + // TODO: shouldn't this skip, not parse? + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } for (int i = 0; i < parentMapperTuple.v1(); i++) { context.path().remove(); @@ -692,11 +716,12 @@ private static void parseNullValue(ParseContext context, ObjectMapper parentMapp throws IOException { // we can only handle null values if we have mappings for them Mapper mapper = getMapper(context, parentMapper, lastFieldName, paths); + ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); if (mapper != null) { // TODO: passing null to an object seems bogus? parseObjectOrField(context, mapper); - } else if (parentMapper.dynamic() == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(parentMapper.fullPath(), lastFieldName); + } else if (dynamic == ObjectMapper.Dynamic.STRICT || dynamic == ObjectMapper.Dynamic.STRICT_ALLOW_TEMPLATES) { + throw new StrictDynamicMappingException(dynamic.name().toLowerCase(Locale.ROOT), parentMapper.fullPath(), lastFieldName); } } @@ -711,7 +736,9 @@ private static Mapper.Builder newFloatBuilder(String name, Settings settings) private static Mapper.Builder createBuilderFromDynamicValue( final ParseContext context, XContentParser.Token token, - String currentFieldName + String currentFieldName, + ObjectMapper.Dynamic dynamic, + String fullPath ) throws IOException { if (token == XContentParser.Token.VALUE_STRING) { String text = context.parser().text(); @@ -733,13 +760,13 @@ private static Mapper.Builder createBuilderFromDynamicValue( } if (parseableAsLong && context.root().numericDetection()) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG, dynamic, fullPath); if (builder == null) { builder = newLongBuilder(currentFieldName, context.indexSettings().getSettings()); } return builder; } else if (parseableAsDouble && context.root().numericDetection()) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE, dynamic, fullPath); if (builder == null) { builder = newFloatBuilder(currentFieldName, context.indexSettings().getSettings()); } @@ -755,7 +782,7 @@ private static Mapper.Builder createBuilderFromDynamicValue( // failure to parse this, continue continue; } - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, dateTimeFormatter); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, dateTimeFormatter, dynamic, fullPath); if (builder == null) { boolean ignoreMalformed = IGNORE_MALFORMED_SETTING.get(context.indexSettings().getSettings()); builder = new DateFieldMapper.Builder( @@ -771,7 +798,7 @@ private static Mapper.Builder createBuilderFromDynamicValue( } } - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.STRING); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.STRING, dynamic, fullPath); if (builder == null) { builder = new TextFieldMapper.Builder(currentFieldName, context.mapperService().getIndexAnalyzers()).addMultiField( new KeywordFieldMapper.Builder("keyword").ignoreAbove(256) @@ -783,7 +810,7 @@ private static Mapper.Builder createBuilderFromDynamicValue( if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG || numberType == XContentParser.NumberType.BIG_INTEGER) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG, dynamic, fullPath); if (builder == null) { builder = newLongBuilder(currentFieldName, context.indexSettings().getSettings()); } @@ -791,7 +818,7 @@ private static Mapper.Builder createBuilderFromDynamicValue( } else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE || numberType == XContentParser.NumberType.BIG_DECIMAL) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE, dynamic, fullPath); if (builder == null) { // no templates are defined, we use float by default instead of double // since this is much more space-efficient and should be enough most of @@ -801,19 +828,19 @@ private static Mapper.Builder createBuilderFromDynamicValue( return builder; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.BOOLEAN); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.BOOLEAN, dynamic, fullPath); if (builder == null) { builder = new BooleanFieldMapper.Builder(currentFieldName); } return builder; } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.BINARY); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.BINARY, dynamic, fullPath); if (builder == null) { builder = new BinaryFieldMapper.Builder(currentFieldName); } return builder; } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.STRING); + Mapper.Builder builder = findTemplateBuilder(context, currentFieldName, XContentFieldType.STRING, dynamic, fullPath); if (builder != null) { return builder; } @@ -832,13 +859,13 @@ private static void parseDynamicValue( ) throws IOException { ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); if (dynamic == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName); + throw new StrictDynamicMappingException(dynamic.name().toLowerCase(Locale.ROOT), parentMapper.fullPath(), currentFieldName); } if (dynamic == ObjectMapper.Dynamic.FALSE) { return; } final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); - final Mapper.Builder builder = createBuilderFromDynamicValue(context, token, currentFieldName); + final Mapper.Builder builder = createBuilderFromDynamicValue(context, token, currentFieldName, dynamic, parentMapper.fullPath()); Mapper mapper = builder.build(builderContext); context.addDynamicMapper(mapper); @@ -926,9 +953,16 @@ private static Tuple getDynamicParentMapper( switch (dynamic) { case STRICT: - throw new StrictDynamicMappingException(parent.fullPath(), paths[i]); + throw new StrictDynamicMappingException(dynamic.name().toLowerCase(Locale.ROOT), parent.fullPath(), paths[i]); + case STRICT_ALLOW_TEMPLATES: case TRUE: - Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], XContentFieldType.OBJECT); + Mapper.Builder builder = findTemplateBuilder( + context, + paths[i], + XContentFieldType.OBJECT, + dynamic, + parent.fullPath() + ); if (builder == null) { builder = new ObjectMapper.Builder(paths[i]).enabled(true); } @@ -1010,4 +1044,37 @@ private static Mapper getMapper(final ParseContext context, ObjectMapper objectM } return objectMapper.getMapper(subfields[subfields.length - 1]); } + + // Throws exception if no dynamic templates found but `dynamic` is set to strict_allow_templates + @SuppressWarnings("rawtypes") + private static Mapper.Builder findTemplateBuilder( + ParseContext context, + String name, + XContentFieldType matchType, + ObjectMapper.Dynamic dynamic, + String fieldFullPath + ) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, name, matchType); + if (builder == null && dynamic == ObjectMapper.Dynamic.STRICT_ALLOW_TEMPLATES) { + throw new StrictDynamicMappingException(dynamic.name().toLowerCase(Locale.ROOT), fieldFullPath, name); + } + + return builder; + } + + // Throws exception if no dynamic templates found but `dynamic` is set to strict_allow_templates + @SuppressWarnings("rawtypes") + private static Mapper.Builder findTemplateBuilder( + ParseContext context, + String name, + DateFormatter dateFormat, + ObjectMapper.Dynamic dynamic, + String fieldFullPath + ) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, name, dateFormat); + if (builder == null && dynamic == ObjectMapper.Dynamic.STRICT_ALLOW_TEMPLATES) { + throw new StrictDynamicMappingException(dynamic.name().toLowerCase(Locale.ROOT), fieldFullPath, name); + } + return builder; + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index c2e7411a3b47a..530a3092a5aa7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -226,6 +226,8 @@ public enum MergeReason { private final BooleanSupplier idFieldDataEnabled; + private volatile Set compositeMappedFieldTypes; + public MapperService( IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, @@ -542,6 +544,9 @@ private synchronized Map internalMerge(DocumentMapper ma } assert results.values().stream().allMatch(this::assertSerialization); + + // initialize composite fields post merge + this.compositeMappedFieldTypes = getCompositeFieldTypesFromMapper(); return results; } @@ -655,6 +660,10 @@ public boolean isCompositeIndexPresent() { } public Set getCompositeFieldTypes() { + return compositeMappedFieldTypes; + } + + private Set getCompositeFieldTypesFromMapper() { Set compositeMappedFieldTypes = new HashSet<>(); if (this.mapper == null) { return Collections.emptySet(); diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index be3adfe8b2c4e..533e6ca73d737 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -92,7 +92,8 @@ public static class Defaults { public enum Dynamic { TRUE, FALSE, - STRICT + STRICT, + STRICT_ALLOW_TEMPLATES } /** @@ -297,6 +298,8 @@ protected static boolean parseObjectOrDocumentTypeProperties( String value = fieldNode.toString(); if (value.equalsIgnoreCase("strict")) { builder.dynamic(Dynamic.STRICT); + } else if (value.equalsIgnoreCase("strict_allow_templates")) { + builder.dynamic(Dynamic.STRICT_ALLOW_TEMPLATES); } else { boolean dynamic = XContentMapValues.nodeBooleanValue(fieldNode, fieldName + ".dynamic"); builder.dynamic(dynamic ? Dynamic.TRUE : Dynamic.FALSE); diff --git a/server/src/main/java/org/opensearch/index/mapper/StrictDynamicMappingException.java b/server/src/main/java/org/opensearch/index/mapper/StrictDynamicMappingException.java index 9127641128dad..0524c672011c5 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StrictDynamicMappingException.java +++ b/server/src/main/java/org/opensearch/index/mapper/StrictDynamicMappingException.java @@ -43,8 +43,8 @@ */ public class StrictDynamicMappingException extends MapperParsingException { - public StrictDynamicMappingException(String path, String fieldName) { - super("mapping set to strict, dynamic introduction of [" + fieldName + "] within [" + path + "] is not allowed"); + public StrictDynamicMappingException(String dynamic, String path, String fieldName) { + super("mapping set to " + dynamic + ", dynamic introduction of [" + fieldName + "] within [" + path + "] is not allowed"); } public StrictDynamicMappingException(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index d0e041e68a81d..ba053a3aeee1d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -448,7 +448,6 @@ protected PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType pft.setStoreTermVectorOffsets(true); } PrefixFieldType prefixFieldType = new PrefixFieldType(tft, fullName + "._index_prefix", indexPrefixes.get()); - prefixFieldType.setAnalyzer(analyzers.getIndexAnalyzer()); tft.setPrefixFieldType(prefixFieldType); return new PrefixFieldMapper(pft, prefixFieldType); } @@ -522,12 +521,14 @@ private static class PrefixWrappedAnalyzer extends AnalyzerWrapper { private final int minChars; private final int maxChars; private final Analyzer delegate; + private final int positionIncrementGap; - PrefixWrappedAnalyzer(Analyzer delegate, int minChars, int maxChars) { + PrefixWrappedAnalyzer(Analyzer delegate, int minChars, int maxChars, int positionIncrementGap) { super(delegate.getReuseStrategy()); this.delegate = delegate; this.minChars = minChars; this.maxChars = maxChars; + this.positionIncrementGap = positionIncrementGap; } @Override @@ -535,6 +536,11 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { return delegate; } + @Override + public int getPositionIncrementGap(String fieldName) { + return positionIncrementGap; + } + @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); @@ -588,17 +594,18 @@ static final class PrefixFieldType extends StringFieldType { final int minChars; final int maxChars; - final TextFieldType parentField; + final TextFieldType parent; PrefixFieldType(TextFieldType parentField, String name, PrefixConfig config) { this(parentField, name, config.minChars, config.maxChars); } - PrefixFieldType(TextFieldType parentField, String name, int minChars, int maxChars) { - super(name, true, false, false, parentField.getTextSearchInfo(), Collections.emptyMap()); + PrefixFieldType(TextFieldType parent, String name, int minChars, int maxChars) { + super(name, true, false, false, parent.getTextSearchInfo(), Collections.emptyMap()); this.minChars = minChars; this.maxChars = maxChars; - this.parentField = parentField; + this.parent = parent; + setAnalyzer(parent.indexAnalyzer()); } @Override @@ -609,8 +616,13 @@ public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchL } void setAnalyzer(NamedAnalyzer delegate) { + String analyzerName = delegate.name(); setIndexAnalyzer( - new NamedAnalyzer(delegate.name(), AnalyzerScope.INDEX, new PrefixWrappedAnalyzer(delegate.analyzer(), minChars, maxChars)) + new NamedAnalyzer( + analyzerName, + AnalyzerScope.INDEX, + new PrefixWrappedAnalyzer(delegate.analyzer(), minChars, maxChars, delegate.getPositionIncrementGap(analyzerName)) + ) ); } @@ -639,7 +651,7 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool Automaton automaton = Operations.concatenate(automata); AutomatonQuery query = AutomatonQueries.createAutomatonQuery(new Term(name(), value + "*"), automaton, method); return new BooleanQuery.Builder().add(query, BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term(parentField.name(), value)), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(parent.name(), value)), BooleanClause.Occur.SHOULD) .build(); } diff --git a/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java b/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java index f3592a3561d3a..f3212b1e2fae1 100644 --- a/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java +++ b/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java @@ -33,6 +33,7 @@ package org.opensearch.indices; import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import java.util.Objects; @@ -40,8 +41,9 @@ /** * Describes a system index. Provides the information required to create and maintain the system index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.16.0") public class SystemIndexDescriptor { private final String indexPattern; private final String description; diff --git a/server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java b/server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java new file mode 100644 index 0000000000000..d9608e220d924 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.regex.Regex; +import org.opensearch.tasks.TaskResultsService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.opensearch.tasks.TaskResultsService.TASK_INDEX; + +/** + * This class holds the {@link SystemIndexDescriptor} objects that represent system indices the + * node knows about. This class also contains static methods that identify if index expressions match + * registered system index patterns + * + * @opensearch.api + */ +@ExperimentalApi +public class SystemIndexRegistry { + private static final SystemIndexDescriptor TASK_INDEX_DESCRIPTOR = new SystemIndexDescriptor(TASK_INDEX + "*", "Task Result Index"); + private static final Map> SERVER_SYSTEM_INDEX_DESCRIPTORS = singletonMap( + TaskResultsService.class.getName(), + singletonList(TASK_INDEX_DESCRIPTOR) + ); + + private volatile static String[] SYSTEM_INDEX_PATTERNS = new String[0]; + volatile static Collection SYSTEM_INDEX_DESCRIPTORS = Collections.emptyList(); + + static void register(Map> pluginAndModulesDescriptors) { + final Map> descriptorsMap = buildSystemIndexDescriptorMap(pluginAndModulesDescriptors); + checkForOverlappingPatterns(descriptorsMap); + List descriptors = pluginAndModulesDescriptors.values() + .stream() + .flatMap(Collection::stream) + .collect(Collectors.toList()); + descriptors.add(TASK_INDEX_DESCRIPTOR); + + SYSTEM_INDEX_DESCRIPTORS = descriptors.stream().collect(Collectors.toUnmodifiableList()); + SYSTEM_INDEX_PATTERNS = descriptors.stream().map(SystemIndexDescriptor::getIndexPattern).toArray(String[]::new); + } + + public static List matchesSystemIndexPattern(String... indexExpressions) { + return Arrays.stream(indexExpressions) + .filter(pattern -> Regex.simpleMatch(SYSTEM_INDEX_PATTERNS, pattern)) + .collect(Collectors.toList()); + } + + /** + * Given a collection of {@link SystemIndexDescriptor}s and their sources, checks to see if the index patterns of the listed + * descriptors overlap with any of the other patterns. If any do, throws an exception. + * + * @param sourceToDescriptors A map of source (plugin) names to the SystemIndexDescriptors they provide. + * @throws IllegalStateException Thrown if any of the index patterns overlaps with another. + */ + static void checkForOverlappingPatterns(Map> sourceToDescriptors) { + List> sourceDescriptorPair = sourceToDescriptors.entrySet() + .stream() + .flatMap(entry -> entry.getValue().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor))) + .sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message + .collect(Collectors.toList()); + + // This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the + // automaton, but the absolute number of system index descriptors should be quite small (~10s at most), and the number of states + // per pattern should be low as well. If these assumptions change, this might need to be reworked. + sourceDescriptorPair.forEach(descriptorToCheck -> { + List> descriptorsMatchingThisPattern = sourceDescriptorPair.stream() + + .filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked + .filter(d -> overlaps(descriptorToCheck.v2(), d.v2())) + .collect(Collectors.toList()); + if (descriptorsMatchingThisPattern.isEmpty() == false) { + throw new IllegalStateException( + "a system index descriptor [" + + descriptorToCheck.v2() + + "] from [" + + descriptorToCheck.v1() + + "] overlaps with other system index descriptors: [" + + descriptorsMatchingThisPattern.stream() + .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") + .collect(Collectors.joining(", ")) + ); + } + }); + } + + private static boolean overlaps(SystemIndexDescriptor a1, SystemIndexDescriptor a2) { + Automaton a1Automaton = Regex.simpleMatchToAutomaton(a1.getIndexPattern()); + Automaton a2Automaton = Regex.simpleMatchToAutomaton(a2.getIndexPattern()); + return Operations.isEmpty(Operations.intersection(a1Automaton, a2Automaton)) == false; + } + + private static Map> buildSystemIndexDescriptorMap( + Map> pluginAndModulesMap + ) { + final Map> map = new HashMap<>( + pluginAndModulesMap.size() + SERVER_SYSTEM_INDEX_DESCRIPTORS.size() + ); + map.putAll(pluginAndModulesMap); + // put the server items last since we expect less of them + SERVER_SYSTEM_INDEX_DESCRIPTORS.forEach((source, descriptors) -> { + if (map.putIfAbsent(source, descriptors) != null) { + throw new IllegalArgumentException( + "plugin or module attempted to define the same source [" + source + "] as a built-in system index" + ); + } + }); + return unmodifiableMap(map); + } +} diff --git a/server/src/main/java/org/opensearch/indices/SystemIndices.java b/server/src/main/java/org/opensearch/indices/SystemIndices.java index a85e938c61b7a..bbf58fe91512f 100644 --- a/server/src/main/java/org/opensearch/indices/SystemIndices.java +++ b/server/src/main/java/org/opensearch/indices/SystemIndices.java @@ -40,25 +40,15 @@ import org.apache.lucene.util.automaton.MinimizationOperations; import org.apache.lucene.util.automaton.Operations; import org.opensearch.common.Nullable; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.regex.Regex; import org.opensearch.core.index.Index; -import org.opensearch.tasks.TaskResultsService; import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static java.util.Collections.unmodifiableList; -import static java.util.Collections.unmodifiableMap; -import static org.opensearch.tasks.TaskResultsService.TASK_INDEX; - /** * This class holds the {@link SystemIndexDescriptor} objects that represent system indices the * node knows about. Methods for determining if an index should be a system index are also provided @@ -69,21 +59,11 @@ public class SystemIndices { private static final Logger logger = LogManager.getLogger(SystemIndices.class); - private static final Map> SERVER_SYSTEM_INDEX_DESCRIPTORS = singletonMap( - TaskResultsService.class.getName(), - singletonList(new SystemIndexDescriptor(TASK_INDEX + "*", "Task Result Index")) - ); - private final CharacterRunAutomaton runAutomaton; - private final Collection systemIndexDescriptors; public SystemIndices(Map> pluginAndModulesDescriptors) { - final Map> descriptorsMap = buildSystemIndexDescriptorMap(pluginAndModulesDescriptors); - checkForOverlappingPatterns(descriptorsMap); - this.systemIndexDescriptors = unmodifiableList( - descriptorsMap.values().stream().flatMap(Collection::stream).collect(Collectors.toList()) - ); - this.runAutomaton = buildCharacterRunAutomaton(systemIndexDescriptors); + SystemIndexRegistry.register(pluginAndModulesDescriptors); + this.runAutomaton = buildCharacterRunAutomaton(SystemIndexRegistry.SYSTEM_INDEX_DESCRIPTORS); } /** @@ -111,7 +91,7 @@ public boolean isSystemIndex(String indexName) { * @throws IllegalStateException if multiple descriptors match the name */ public @Nullable SystemIndexDescriptor findMatchingDescriptor(String name) { - final List matchingDescriptors = systemIndexDescriptors.stream() + final List matchingDescriptors = SystemIndexRegistry.SYSTEM_INDEX_DESCRIPTORS.stream() .filter(descriptor -> descriptor.matchesIndexPattern(name)) .collect(Collectors.toList()); @@ -168,66 +148,4 @@ private static CharacterRunAutomaton buildCharacterRunAutomaton(Collection> sourceToDescriptors) { - List> sourceDescriptorPair = sourceToDescriptors.entrySet() - .stream() - .flatMap(entry -> entry.getValue().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor))) - .sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message - .collect(Collectors.toList()); - - // This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the - // automaton, but the absolute number of system index descriptors should be quite small (~10s at most), and the number of states - // per pattern should be low as well. If these assumptions change, this might need to be reworked. - sourceDescriptorPair.forEach(descriptorToCheck -> { - List> descriptorsMatchingThisPattern = sourceDescriptorPair.stream() - - .filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked - .filter(d -> overlaps(descriptorToCheck.v2(), d.v2())) - .collect(Collectors.toList()); - if (descriptorsMatchingThisPattern.isEmpty() == false) { - throw new IllegalStateException( - "a system index descriptor [" - + descriptorToCheck.v2() - + "] from [" - + descriptorToCheck.v1() - + "] overlaps with other system index descriptors: [" - + descriptorsMatchingThisPattern.stream() - .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") - .collect(Collectors.joining(", ")) - ); - } - }); - } - - private static boolean overlaps(SystemIndexDescriptor a1, SystemIndexDescriptor a2) { - Automaton a1Automaton = Regex.simpleMatchToAutomaton(a1.getIndexPattern()); - Automaton a2Automaton = Regex.simpleMatchToAutomaton(a2.getIndexPattern()); - return Operations.isEmpty(Operations.intersection(a1Automaton, a2Automaton)) == false; - } - - private static Map> buildSystemIndexDescriptorMap( - Map> pluginAndModulesMap - ) { - final Map> map = new HashMap<>( - pluginAndModulesMap.size() + SERVER_SYSTEM_INDEX_DESCRIPTORS.size() - ); - map.putAll(pluginAndModulesMap); - // put the server items last since we expect less of them - SERVER_SYSTEM_INDEX_DESCRIPTORS.forEach((source, descriptors) -> { - if (map.putIfAbsent(source, descriptors) != null) { - throw new IllegalArgumentException( - "plugin or module attempted to define the same source [" + source + "] as a built-in system index" - ); - } - }); - return unmodifiableMap(map); - } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 85ef547e27787..96a716af7f1a1 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -695,6 +695,14 @@ protected Node( repositoriesServiceReference::get, rerouteServiceReference::get ); + final Map> systemIndexDescriptorMap = Collections.unmodifiableMap( + pluginsService.filterPlugins(SystemIndexPlugin.class) + .stream() + .collect( + Collectors.toMap(plugin -> plugin.getClass().getSimpleName(), plugin -> plugin.getSystemIndexDescriptors(settings)) + ) + ); + final SystemIndices systemIndices = new SystemIndices(systemIndexDescriptorMap); final ClusterModule clusterModule = new ClusterModule( settings, clusterService, @@ -819,15 +827,6 @@ protected Node( .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - final Map> systemIndexDescriptorMap = Collections.unmodifiableMap( - pluginsService.filterPlugins(SystemIndexPlugin.class) - .stream() - .collect( - Collectors.toMap(plugin -> plugin.getClass().getSimpleName(), plugin -> plugin.getSystemIndexDescriptors(settings)) - ) - ); - final SystemIndices systemIndices = new SystemIndices(systemIndexDescriptorMap); - final RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index e11012a23fce7..bffb50cc63401 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -171,9 +171,9 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("port", "default:false;alias:po;desc:bound transport port"); table.addCell("http_address", "default:false;alias:http;desc:bound http address"); - table.addCell("version", "default:false;alias:v;desc:es version"); - table.addCell("type", "default:false;alias:t;desc:es distribution type"); - table.addCell("build", "default:false;alias:b;desc:es build hash"); + table.addCell("version", "default:false;alias:v;desc:os version"); + table.addCell("type", "default:false;alias:t;desc:os distribution type"); + table.addCell("build", "default:false;alias:b;desc:os build hash"); table.addCell("jdk", "default:false;alias:j;desc:jdk version"); table.addCell("disk.total", "default:false;alias:dt,diskTotal;text-align:right;desc:total disk space"); table.addCell("disk.used", "default:false;alias:du,diskUsed;text-align:right;desc:used disk space"); diff --git a/server/src/main/java/org/opensearch/search/ResourceType.java b/server/src/main/java/org/opensearch/search/ResourceType.java index 5bbcd7de1c2ce..fe5ce4dd2bb50 100644 --- a/server/src/main/java/org/opensearch/search/ResourceType.java +++ b/server/src/main/java/org/opensearch/search/ResourceType.java @@ -8,12 +8,18 @@ package org.opensearch.search; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + /** * Enum to hold the resource type */ +@PublicApi(since = "2.x") public enum ResourceType { CPU("cpu"), - JVM("jvm"); + MEMORY("memory"); private final String name; @@ -35,7 +41,11 @@ public static ResourceType fromName(String s) { throw new IllegalArgumentException("Unknown resource type: [" + s + "]"); } - private String getName() { + public static void writeTo(StreamOutput out, ResourceType resourceType) throws IOException { + out.writeString(resourceType.getName()); + } + + public String getName() { return name; } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 3e8ed3070e4ef..c26c5d63a3573 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -71,7 +71,7 @@ public class SearchBackpressureService extends AbstractLifecycleComponent implem TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, (nodeDuressTrackers) -> nodeDuressTrackers.isResourceInDuress(ResourceType.CPU), TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER, - (nodeDuressTrackers) -> isHeapTrackingSupported() && nodeDuressTrackers.isResourceInDuress(ResourceType.JVM), + (nodeDuressTrackers) -> isHeapTrackingSupported() && nodeDuressTrackers.isResourceInDuress(ResourceType.MEMORY), TaskResourceUsageTrackerType.ELAPSED_TIME_TRACKER, (nodeDuressTrackers) -> true ); @@ -105,7 +105,7 @@ public SearchBackpressureService( ) ); put( - ResourceType.JVM, + ResourceType.MEMORY, new NodeDuressTracker( () -> JvmStats.jvmStats().getMem().getHeapUsedPercent() / 100.0 >= settings.getNodeDuressSettings() .getHeapThreshold(), diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java index 6955a5927ca23..5a4a25ec832bd 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java @@ -92,7 +92,7 @@ void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitF Collection childrenNodes = taskManager.startBanOnChildrenNodes(task.getId(), () -> { logger.trace("child tasks of parent [{}] are completed", taskId); groupedListener.onResponse(null); - }); + }, reason); taskManager.cancel(task, reason, () -> { logger.trace("task [{}] is cancelled", taskId); groupedListener.onResponse(null); diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index a49968ab85e89..6ad06da9d2fa2 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -510,17 +510,22 @@ public Set getBannedTaskIds() { return Collections.unmodifiableSet(banedParents.keySet()); } + public Collection startBanOnChildrenNodes(long taskId, Runnable onChildTasksCompleted) { + return startBanOnChildrenNodes(taskId, onChildTasksCompleted, "unknown"); + } + /** * Start rejecting new child requests as the parent task was cancelled. * * @param taskId the parent task id * @param onChildTasksCompleted called when all child tasks are completed or failed + * @param reason the ban reason * @return the set of current nodes that have outstanding child tasks */ - public Collection startBanOnChildrenNodes(long taskId, Runnable onChildTasksCompleted) { + public Collection startBanOnChildrenNodes(long taskId, Runnable onChildTasksCompleted, String reason) { final CancellableTaskHolder holder = cancellableTasks.get(taskId); if (holder != null) { - return holder.startBan(onChildTasksCompleted); + return holder.startBan(onChildTasksCompleted, reason); } else { onChildTasksCompleted.run(); return Collections.emptySet(); @@ -585,6 +590,7 @@ private static class CancellableTaskHolder { private List cancellationListeners = null; private Map childTasksPerNode = null; private boolean banChildren = false; + private String banReason; private List childTaskCompletedListeners = null; CancellableTaskHolder(CancellableTask task) { @@ -662,7 +668,7 @@ public CancellableTask getTask() { synchronized void registerChildNode(DiscoveryNode node) { if (banChildren) { - throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); + throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks, " + banReason); } if (childTasksPerNode == null) { childTasksPerNode = new HashMap<>(); @@ -686,11 +692,13 @@ void unregisterChildNode(DiscoveryNode node) { notifyListeners(listeners); } - Set startBan(Runnable onChildTasksCompleted) { + Set startBan(Runnable onChildTasksCompleted, String reason) { final Set pendingChildNodes; final Runnable toRun; synchronized (this) { banChildren = true; + assert reason != null; + banReason = reason; if (childTasksPerNode == null) { pendingChildNodes = Collections.emptySet(); } else { diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec new file mode 100644 index 0000000000000..e030a813373c1 --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -0,0 +1 @@ +org.opensearch.index.codec.composite.Composite99Codec diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 7d706411b6f0d..5b3b08377f19b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -428,7 +428,7 @@ public void testRegisterAndExecuteChildTaskWhileParentTaskIsBeingCanceled() thro ); assertThat(cancelledException.getMessage(), startsWith("Task cancelled before it started:")); CountDownLatch latch = new CountDownLatch(1); - taskManager.startBanOnChildrenNodes(parentTaskId.getId(), latch::countDown); + taskManager.startBanOnChildrenNodes(parentTaskId.getId(), latch::countDown, cancelledException.getMessage()); assertTrue("onChildTasksCompleted() is not invoked", latch.await(1, TimeUnit.SECONDS)); } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index f2d99a51f1c9a..97706927ba857 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -33,6 +33,7 @@ package org.opensearch.cluster; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroupMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; @@ -69,6 +70,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.plugins.ClusterPlugin; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; @@ -327,6 +329,14 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { ); } + public void testQueryGroupMetadataRegister() { + List customEntries = ClusterModule.getNamedWriteables(); + assertTrue( + customEntries.stream() + .anyMatch(entry -> entry.categoryClass == Metadata.Custom.class && entry.name.equals(QueryGroupMetadata.TYPE)) + ); + } + private static ClusterPlugin existingShardsAllocatorPlugin(final String allocatorName) { return new ClusterPlugin() { @Override diff --git a/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java new file mode 100644 index 0000000000000..d70a9ce5e10cd --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.Diff; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.ResourceType; +import org.opensearch.test.AbstractDiffableSerializationTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import static org.opensearch.cluster.metadata.QueryGroupTests.createRandomQueryGroup; + +public class QueryGroupMetadataTests extends AbstractDiffableSerializationTestCase { + + public void testToXContent() throws IOException { + long updatedAt = 1720047207; + QueryGroupMetadata queryGroupMetadata = new QueryGroupMetadata( + Map.of( + "ajakgakg983r92_4242", + new QueryGroup( + "test", + "ajakgakg983r92_4242", + QueryGroup.ResiliencyMode.ENFORCED, + Map.of(ResourceType.MEMORY, 0.5), + updatedAt + ) + ) + ); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + queryGroupMetadata.toXContent(builder, null); + builder.endObject(); + assertEquals( + "{\"ajakgakg983r92_4242\":{\"_id\":\"ajakgakg983r92_4242\",\"name\":\"test\",\"resiliency_mode\":\"enforced\",\"updatedAt\":1720047207,\"resourceLimits\":{\"memory\":0.5}}}", + builder.toString() + ); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Collections.singletonList( + new NamedWriteableRegistry.Entry(QueryGroupMetadata.class, QueryGroupMetadata.TYPE, QueryGroupMetadata::new) + ) + ); + } + + @Override + protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + final QueryGroup queryGroup = createRandomQueryGroup("asdfakgjwrir23r25"); + final QueryGroupMetadata queryGroupMetadata = new QueryGroupMetadata(Map.of(queryGroup.get_id(), queryGroup)); + return queryGroupMetadata; + } + + @Override + protected Writeable.Reader> diffReader() { + return QueryGroupMetadata::readDiffFrom; + } + + @Override + protected Metadata.Custom doParseInstance(XContentParser parser) throws IOException { + return QueryGroupMetadata.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return QueryGroupMetadata::new; + } + + @Override + protected QueryGroupMetadata createTestInstance() { + return new QueryGroupMetadata(getRandomQueryGroups()); + } + + private Map getRandomQueryGroups() { + QueryGroup qg1 = createRandomQueryGroup("1243gsgsdgs"); + QueryGroup qg2 = createRandomQueryGroup("lkajga8080"); + return Map.of(qg1.get_id(), qg1, qg2.get_id(), qg2); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java b/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java new file mode 100644 index 0000000000000..c564f0778e6f0 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java @@ -0,0 +1,158 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.common.UUIDs; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.ResourceType; +import org.opensearch.test.AbstractSerializingTestCase; +import org.joda.time.Instant; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class QueryGroupTests extends AbstractSerializingTestCase { + + private static final List allowedModes = List.of( + QueryGroup.ResiliencyMode.SOFT, + QueryGroup.ResiliencyMode.ENFORCED, + QueryGroup.ResiliencyMode.MONITOR + ); + + static QueryGroup createRandomQueryGroup(String _id) { + String name = randomAlphaOfLength(10); + Map resourceLimit = new HashMap<>(); + resourceLimit.put(ResourceType.MEMORY, randomDoubleBetween(0.0, 0.80, false)); + return new QueryGroup(name, _id, randomMode(), resourceLimit, Instant.now().getMillis()); + } + + private static QueryGroup.ResiliencyMode randomMode() { + return allowedModes.get(randomIntBetween(0, allowedModes.size() - 1)); + } + + /** + * Parses to a new instance using the provided {@link XContentParser} + * + * @param parser + */ + @Override + protected QueryGroup doParseInstance(XContentParser parser) throws IOException { + return QueryGroup.fromXContent(parser); + } + + /** + * Returns a {@link Writeable.Reader} that can be used to de-serialize the instance + */ + @Override + protected Writeable.Reader instanceReader() { + return QueryGroup::new; + } + + /** + * Creates a random test instance to use in the tests. This method will be + * called multiple times during test execution and should return a different + * random instance each time it is called. + */ + @Override + protected QueryGroup createTestInstance() { + return createRandomQueryGroup("1232sfraeradf_"); + } + + public void testNullName() { + assertThrows( + NullPointerException.class, + () -> new QueryGroup(null, "_id", randomMode(), Collections.emptyMap(), Instant.now().getMillis()) + ); + } + + public void testNullId() { + assertThrows( + NullPointerException.class, + () -> new QueryGroup("Dummy", null, randomMode(), Collections.emptyMap(), Instant.now().getMillis()) + ); + } + + public void testNullResourceLimits() { + assertThrows(NullPointerException.class, () -> new QueryGroup("analytics", "_id", randomMode(), null, Instant.now().getMillis())); + } + + public void testEmptyResourceLimits() { + assertThrows( + IllegalArgumentException.class, + () -> new QueryGroup("analytics", "_id", randomMode(), Collections.emptyMap(), Instant.now().getMillis()) + ); + } + + public void testIllegalQueryGroupMode() { + assertThrows( + NullPointerException.class, + () -> new QueryGroup("analytics", "_id", null, Map.of(ResourceType.MEMORY, (Object) 0.4), Instant.now().getMillis()) + ); + } + + public void testInvalidResourceLimitWhenInvalidSystemResourceValueIsGiven() { + assertThrows( + IllegalArgumentException.class, + () -> new QueryGroup( + "analytics", + "_id", + randomMode(), + Map.of(ResourceType.MEMORY, (Object) randomDoubleBetween(1.1, 1.8, false)), + Instant.now().getMillis() + ) + ); + } + + public void testValidQueryGroup() { + QueryGroup queryGroup = new QueryGroup( + "analytics", + "_id", + randomMode(), + Map.of(ResourceType.MEMORY, randomDoubleBetween(0.01, 0.8, false)), + Instant.ofEpochMilli(1717187289).getMillis() + ); + + assertNotNull(queryGroup.getName()); + assertEquals("analytics", queryGroup.getName()); + assertNotNull(queryGroup.getResourceLimits()); + assertFalse(queryGroup.getResourceLimits().isEmpty()); + assertEquals(1, queryGroup.getResourceLimits().size()); + assertTrue(allowedModes.contains(queryGroup.getResiliencyMode())); + assertEquals(1717187289, queryGroup.getUpdatedAtInMillis()); + } + + public void testToXContent() throws IOException { + long currentTimeInMillis = Instant.now().getMillis(); + String queryGroupId = UUIDs.randomBase64UUID(); + QueryGroup queryGroup = new QueryGroup( + "TestQueryGroup", + queryGroupId, + QueryGroup.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.30, ResourceType.MEMORY, 0.40), + currentTimeInMillis + ); + XContentBuilder builder = JsonXContent.contentBuilder(); + queryGroup.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals( + "{\"_id\":\"" + + queryGroupId + + "\",\"name\":\"TestQueryGroup\",\"resiliency_mode\":\"enforced\",\"updatedAt\":" + + currentTimeInMillis + + ",\"resourceLimits\":{\"cpu\":0.3,\"memory\":0.4}}", + builder.toString() + ); + } +} diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java index e90850de3fe33..8ad8bcda95f40 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java @@ -85,7 +85,10 @@ private void allocateAllUnassignedBatch(final RoutingAllocation allocation) { final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); List shardsToBatch = new ArrayList<>(); while (iterator.hasNext()) { - shardsToBatch.add(iterator.next()); + ShardRouting unassignedShardRouting = iterator.next(); + if (unassignedShardRouting.primary()) { + shardsToBatch.add(unassignedShardRouting); + } } batchAllocator.allocateUnassignedBatch(shardsToBatch, allocation); } @@ -180,6 +183,35 @@ public void testInitializePrimaryShards() { assertEquals(2, routingAllocation.routingNodes().getInitialPrimariesIncomingRecoveries(node1.getId())); } + public void testInitializeOnlyPrimaryUnassignedShardsIgnoreReplicaShards() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); + setUpShards(1); + final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); + + for (ShardId shardId : shardsInBatch) { + batchAllocator.addShardData( + node1, + "allocId-0", + shardId, + true, + new ReplicationCheckpoint(shardId, 20, 101, 1, Codec.getDefault().getName()), + null + ); + } + + allocateAllUnassignedBatch(routingAllocation); + + List initializingShards = routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(1, initializingShards.size()); + assertTrue(shardsInBatch.contains(initializingShards.get(0).shardId())); + assertTrue(initializingShards.get(0).primary()); + assertEquals(1, routingAllocation.routingNodes().getInitialPrimariesIncomingRecoveries(node1.getId())); + List unassignedShards = routingAllocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED); + assertEquals(1, unassignedShards.size()); + assertTrue(!unassignedShards.get(0).primary()); + } + public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AllocationDeciders allocationDeciders = randomAllocationDeciders( @@ -258,7 +290,7 @@ private RoutingAllocation routingAllocationWithOnePrimary( .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) .build(); - return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, null, System.nanoTime()); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, null, System.nanoTime()); } private RoutingAllocation routingAllocationWithMultiplePrimaries( diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 02471c9cdbbbe..152a6dba6c032 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -48,7 +48,7 @@ public class ClusterMetadataManifestTests extends OpenSearchTestCase { public void testClusterMetadataManifestXContentV0() throws IOException { - UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path", CODEC_V0); ClusterMetadataManifest originalManifest = ClusterMetadataManifest.builder() .clusterTerm(1L) .stateVersion(1L) @@ -74,7 +74,7 @@ public void testClusterMetadataManifestXContentV0() throws IOException { } public void testClusterMetadataManifestXContentV1() throws IOException { - UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path", CODEC_V1); ClusterMetadataManifest originalManifest = ClusterMetadataManifest.builder() .clusterTerm(1L) .stateVersion(1L) @@ -619,6 +619,24 @@ public void testUploadedIndexMetadataSerializationEqualsHashCode() { ); } + public void testUploadedIndexMetadataWithoutComponentPrefix() throws IOException { + final UploadedIndexMetadata originalUploadedIndexMetadata = new UploadedIndexMetadata( + "test-index", + "test-index-uuid", + "test_file_name", + CODEC_V1 + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalUploadedIndexMetadata.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final UploadedIndexMetadata fromXContentUploadedIndexMetadata = UploadedIndexMetadata.fromXContent(parser, 1L); + assertEquals(originalUploadedIndexMetadata, fromXContentUploadedIndexMetadata); + } + } + private UploadedIndexMetadata randomlyChangingUploadedIndexMetadata(UploadedIndexMetadata uploadedIndexMetadata) { switch (randomInt(2)) { case 0: @@ -642,4 +660,5 @@ private UploadedIndexMetadata randomlyChangingUploadedIndexMetadata(UploadedInde } return uploadedIndexMetadata; } + } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java index fe9ed57fa77b8..3f2edd1a6c5a5 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java @@ -8,9 +8,7 @@ package org.opensearch.gateway.remote; -import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.AbstractNamedDiffable; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; @@ -23,11 +21,8 @@ import org.opensearch.common.util.TestCapturingListener; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.compress.Compressor; import org.opensearch.core.compress.NoneCompressor; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.gateway.remote.model.RemoteClusterBlocks; import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; @@ -51,6 +46,10 @@ import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom3; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom4; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; @@ -338,22 +337,22 @@ public void testGetAsyncMetadataReadAction_Exception() throws IOException, Inter public void testGetUpdatedCustoms() { Map previousCustoms = Map.of( - TestCustom1.TYPE, - new TestCustom1("data1"), - TestCustom2.TYPE, - new TestCustom2("data2"), - TestCustom3.TYPE, - new TestCustom3("data3") + TestClusterStateCustom1.TYPE, + new TestClusterStateCustom1("data1"), + TestClusterStateCustom2.TYPE, + new TestClusterStateCustom2("data2"), + TestClusterStateCustom3.TYPE, + new TestClusterStateCustom3("data3") ); ClusterState previousState = ClusterState.builder(new ClusterName("test-cluster")).customs(previousCustoms).build(); Map currentCustoms = Map.of( - TestCustom2.TYPE, - new TestCustom2("data2"), - TestCustom3.TYPE, - new TestCustom3("data3-changed"), - TestCustom4.TYPE, - new TestCustom4("data4") + TestClusterStateCustom2.TYPE, + new TestClusterStateCustom2("data2"), + TestClusterStateCustom3.TYPE, + new TestClusterStateCustom3("data3-changed"), + TestClusterStateCustom4.TYPE, + new TestClusterStateCustom4("data4") ); ClusterState currentState = ClusterState.builder(new ClusterName("test-cluster")).customs(currentCustoms).build(); @@ -368,136 +367,14 @@ public void testGetUpdatedCustoms() { assertThat(customsDiff.getDeletes(), is(Collections.emptyList())); Map expectedCustoms = Map.of( - TestCustom3.TYPE, - new TestCustom3("data3-changed"), - TestCustom4.TYPE, - new TestCustom4("data4") + TestClusterStateCustom3.TYPE, + new TestClusterStateCustom3("data3-changed"), + TestClusterStateCustom4.TYPE, + new TestClusterStateCustom4("data4") ); customsDiff = remoteClusterStateAttributesManager.getUpdatedCustoms(currentState, previousState, true, false); assertThat(customsDiff.getUpserts(), is(expectedCustoms)); - assertThat(customsDiff.getDeletes(), is(List.of(TestCustom1.TYPE))); - } - - private static abstract class AbstractTestCustom extends AbstractNamedDiffable implements ClusterState.Custom { - - private final String value; - - AbstractTestCustom(String value) { - this.value = value; - } - - AbstractTestCustom(StreamInput in) throws IOException { - this.value = in.readString(); - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder; - } - - @Override - public boolean isPrivate() { - return true; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - AbstractTestCustom that = (AbstractTestCustom) o; - - if (!value.equals(that.value)) return false; - - return true; - } - - @Override - public int hashCode() { - return value.hashCode(); - } - } - - private static class TestCustom1 extends AbstractTestCustom { - - private static final String TYPE = "custom_1"; - - TestCustom1(String value) { - super(value); - } - - TestCustom1(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - } - - private static class TestCustom2 extends AbstractTestCustom { - - private static final String TYPE = "custom_2"; - - TestCustom2(String value) { - super(value); - } - - TestCustom2(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - } - - private static class TestCustom3 extends AbstractTestCustom { - - private static final String TYPE = "custom_3"; - - TestCustom3(String value) { - super(value); - } - - TestCustom3(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - } - - private static class TestCustom4 extends AbstractTestCustom { - - private static final String TYPE = "custom_4"; - - TestCustom4(String value) { - super(value); - } - - TestCustom4(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } + assertThat(customsDiff.getDeletes(), is(List.of(TestClusterStateCustom1.TYPE))); } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index d983a4d8c4027..6cd9cbbf13848 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -9,12 +9,14 @@ package org.opensearch.gateway.remote; import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.RepositoryCleanupInProgress; -import org.opensearch.cluster.RepositoryCleanupInProgress.Entry; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; @@ -22,10 +24,12 @@ import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.routing.remote.NoopRemoteRoutingTableService; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.CheckedRunnable; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; @@ -38,6 +42,7 @@ import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; @@ -45,13 +50,17 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; -import org.opensearch.gateway.remote.model.RemoteIndexMetadata; +import org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.FilterRepository; @@ -61,7 +70,6 @@ import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.TestCustomMetadata; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -75,7 +83,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -92,23 +99,51 @@ import java.util.stream.Stream; import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatcher; import org.mockito.ArgumentMatchers; import org.mockito.Mockito; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata3; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom3; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.FORMAT_PARAMS; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getFormattedIndexFileName; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocksTests.randomClusterBlocks; import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.readFrom; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodes.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodes.DISCOVERY_NODES_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodesTests.getDiscoveryNodes; import static org.opensearch.gateway.remote.model.RemoteGlobalMetadata.GLOBAL_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettingsTests.getHashesOfConsistentSettings; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX_METADATA_FORMAT; import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTINGS_METADATA_FORMAT; import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadataTests.getTemplatesMetadata; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -120,11 +155,19 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class RemoteClusterStateServiceTests extends OpenSearchTestCase { @@ -135,11 +178,22 @@ public class RemoteClusterStateServiceTests extends OpenSearchTestCase { private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; private BlobStoreRepository blobStoreRepository; + private Compressor compressor; private BlobStore blobStore; private Settings settings; private boolean publicationEnabled; + private NamedWriteableRegistry namedWriteableRegistry; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + private static final String NODE_ID = "test-node"; + private static final String COORDINATION_METADATA_FILENAME = "coordination-metadata-file__1"; + private static final String PERSISTENT_SETTINGS_FILENAME = "persistent-settings-file__1"; + private static final String TRANSIENT_SETTINGS_FILENAME = "transient-settings-file__1"; + private static final String TEMPLATES_METADATA_FILENAME = "templates-metadata-file__1"; + private static final String DISCOVERY_NODES_FILENAME = "discovery-nodes-file__1"; + private static final String CLUSTER_BLOCKS_FILENAME = "cluster-blocks-file__1"; + private static final String HASHES_OF_CONSISTENT_SETTINGS_FILENAME = "consistent-settings-hashes-file__1"; + @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); @@ -164,6 +218,11 @@ public void setup() { .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") .build(); + List writeableEntries = ClusterModule.getNamedWriteables(); + writeableEntries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, CustomMetadata1.TYPE, CustomMetadata1::new)); + writeableEntries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, CustomMetadata2.TYPE, CustomMetadata2::new)); + writeableEntries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, CustomMetadata3.TYPE, CustomMetadata3::new)); + namedWriteableRegistry = new NamedWriteableRegistry(writeableEntries); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = mock(ClusterService.class); @@ -176,6 +235,7 @@ public void setup() { ).flatMap(Function.identity()).collect(toList()) ); + compressor = new DeflateCompressor(); blobStoreRepository = mock(BlobStoreRepository.class); blobStore = mock(BlobStore.class); when(blobStoreRepository.blobStore()).thenReturn(blobStore); @@ -191,7 +251,7 @@ public void setup() { () -> 0L, threadPool, List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)), - writableRegistry() + namedWriteableRegistry ); } @@ -275,6 +335,7 @@ public void testWriteFullMetadataSuccess() throws IOException { assertThat(manifest.getSettingsMetadata(), notNullValue()); assertThat(manifest.getTemplatesMetadata(), notNullValue()); assertFalse(manifest.getCustomMetadataMap().isEmpty()); + assertThat(manifest.getCustomMetadataMap().containsKey(CustomMetadata1.TYPE), is(true)); assertThat(manifest.getClusterBlocksMetadata(), nullValue()); assertThat(manifest.getDiscoveryNodesMetadata(), nullValue()); assertThat(manifest.getTransientSettingsMetadata(), nullValue()); @@ -298,7 +359,12 @@ public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException writableRegistry() ); final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()) - .customs(Map.of(RepositoryCleanupInProgress.TYPE, new RepositoryCleanupInProgress(List.of(new Entry("test-repo", 10L))))) + .customs( + Map.of( + RepositoryCleanupInProgress.TYPE, + new RepositoryCleanupInProgress(List.of(new RepositoryCleanupInProgress.Entry("test-repo", 10L))) + ) + ) .build(); mockBlobStoreObjects(); remoteClusterStateService.start(); @@ -330,6 +396,7 @@ public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException assertThat(manifest.getSettingsMetadata(), notNullValue()); assertThat(manifest.getTemplatesMetadata(), notNullValue()); assertFalse(manifest.getCustomMetadataMap().isEmpty()); + assertThat(manifest.getCustomMetadataMap().containsKey(CustomMetadata1.TYPE), is(true)); assertThat(manifest.getClusterStateCustomMap().size(), is(1)); assertThat(manifest.getClusterStateCustomMap().containsKey(RepositoryCleanupInProgress.TYPE), is(true)); } @@ -388,7 +455,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { .provideStream(0) .getInputStream() .readAllBytes(); - IndexMetadata writtenIndexMetadata = RemoteIndexMetadata.INDEX_METADATA_FORMAT.deserialize( + IndexMetadata writtenIndexMetadata = INDEX_METADATA_FORMAT.deserialize( capturedWriteContext.get("metadata").getFileName(), blobStoreRepository.getNamedXContentRegistry(), new BytesArray(writtenBytes) @@ -445,24 +512,35 @@ public void testTimeoutWhileWritingManifestFile() throws IOException { ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); - doAnswer((i) -> { // For Global Metadata - actionListenerArgumentCaptor.getValue().onResponse(null); - return null; - }).doAnswer((i) -> { // For Index Metadata - actionListenerArgumentCaptor.getValue().onResponse(null); - return null; - }).doAnswer((i) -> { + doAnswer((i) -> { // For Manifest file perform No Op, so latch in code will timeout return null; }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); remoteClusterStateService.start(); - try { - remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); - } catch (Exception e) { - assertTrue(e instanceof RemoteStateTransferException); - assertTrue(e.getMessage().contains("Timed out waiting for transfer of following metadata to complete")); - } + RemoteClusterStateService spiedService = spy(remoteClusterStateService); + when( + spiedService.writeMetadataInParallel( + any(), + anyList(), + anyMap(), + anyMap(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyMap(), + anyBoolean(), + anyList() + ) + ).thenReturn(new RemoteClusterStateUtils.UploadedMetadataResults()); + RemoteStateTransferException ex = expectThrows( + RemoteStateTransferException.class, + () -> spiedService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + assertTrue(ex.getMessage().contains("Timed out waiting for transfer of manifest file to complete")); } public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { @@ -658,6 +736,991 @@ public void testWriteIncrementalMetadataSuccessWhenPublicationEnabled() throws I assertThat(manifest.getIndicesRouting().size(), is(1)); } + public void testTimeoutWhileWritingMetadata() throws IOException { + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + doNothing().when(container).asyncBlobUpload(any(), any()); + int writeTimeout = 2; + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", writeTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + remoteClusterStateService.start(); + RemoteStateTransferException exception = assertThrows( + RemoteStateTransferException.class, + () -> remoteClusterStateService.writeMetadataInParallel( + ClusterState.EMPTY_STATE, + emptyList(), + emptyMap(), + emptyMap(), + true, + true, + true, + true, + true, + true, + emptyMap(), + true, + emptyList() + ) + ); + assertTrue(exception.getMessage().startsWith("Timed out waiting for transfer of following metadata to complete")); + } + + public void testGetClusterStateForManifest_IncludeEphemeral() throws IOException { + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + RemoteReadResult mockedResult = mock(RemoteReadResult.class); + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + RemoteClusterStateAttributesManager mockedClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockedClusterStateAttributeManager); + ArgumentCaptor> listenerArgumentCaptor = ArgumentCaptor.forClass( + LatchedActionListener.class + ); + when(mockedIndexManager.getAsyncIndexMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedGlobalMetadataManager.getAsyncMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedClusterStateAttributeManager.getAsyncMetadataReadAction(anyString(), any(), listenerArgumentCaptor.capture())) + .thenReturn(() -> listenerArgumentCaptor.getValue().onResponse(mockedResult)); + when(mockedResult.getComponent()).thenReturn(COORDINATION_METADATA); + RemoteClusterStateService mockService = spy(remoteClusterStateService); + mockService.getClusterStateForManifest(ClusterName.DEFAULT.value(), manifest, NODE_ID, true); + verify(mockService, times(1)).readClusterStateInParallel( + any(), + eq(manifest), + eq(manifest.getClusterUUID()), + eq(NODE_ID), + eq(manifest.getIndices()), + eq(manifest.getCustomMetadataMap()), + eq(true), + eq(true), + eq(true), + eq(true), + eq(true), + eq(true), + eq(manifest.getIndicesRouting()), + eq(true), + eq(manifest.getClusterStateCustomMap()), + eq(true) + ); + } + + public void testGetClusterStateForManifest_ExcludeEphemeral() throws IOException { + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + RemoteReadResult mockedResult = mock(RemoteReadResult.class); + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + RemoteClusterStateAttributesManager mockedClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + ArgumentCaptor> listenerArgumentCaptor = ArgumentCaptor.forClass( + LatchedActionListener.class + ); + when(mockedIndexManager.getAsyncIndexMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedGlobalMetadataManager.getAsyncMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedClusterStateAttributeManager.getAsyncMetadataReadAction(anyString(), any(), listenerArgumentCaptor.capture())) + .thenReturn(() -> listenerArgumentCaptor.getValue().onResponse(mockedResult)); + when(mockedResult.getComponent()).thenReturn(COORDINATION_METADATA); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockedClusterStateAttributeManager); + RemoteClusterStateService spiedService = spy(remoteClusterStateService); + spiedService.getClusterStateForManifest(ClusterName.DEFAULT.value(), manifest, NODE_ID, false); + verify(spiedService, times(1)).readClusterStateInParallel( + any(), + eq(manifest), + eq(manifest.getClusterUUID()), + eq(NODE_ID), + eq(manifest.getIndices()), + eq(manifest.getCustomMetadataMap()), + eq(true), + eq(true), + eq(false), + eq(true), + eq(false), + eq(false), + eq(emptyList()), + eq(false), + eq(emptyMap()), + eq(false) + ); + } + + public void testGetClusterStateFromManifest_CodecV1() throws IOException { + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().codecVersion(CODEC_V1).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + ArgumentCaptor> listenerArgumentCaptor = ArgumentCaptor.forClass( + LatchedActionListener.class + ); + when(mockedIndexManager.getAsyncIndexMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(new RemoteReadResult(indexMetadata, INDEX, INDEX)) + ); + when(mockedGlobalMetadataManager.getGlobalMetadata(anyString(), eq(manifest))).thenReturn(Metadata.EMPTY_METADATA); + RemoteClusterStateService spiedService = spy(remoteClusterStateService); + spiedService.getClusterStateForManifest(ClusterName.DEFAULT.value(), manifest, NODE_ID, true); + verify(spiedService, times(1)).readClusterStateInParallel( + any(), + eq(manifest), + eq(manifest.getClusterUUID()), + eq(NODE_ID), + eq(manifest.getIndices()), + eq(emptyMap()), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(emptyList()), + eq(false), + eq(emptyMap()), + eq(false) + ); + verify(mockedGlobalMetadataManager, times(1)).getGlobalMetadata(eq(manifest.getClusterUUID()), eq(manifest)); + } + + public void testGetClusterStateUsingDiffFailWhenDiffManifestAbsent() { + ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().build(); + ClusterState previousState = ClusterState.EMPTY_STATE; + AssertionError error = assertThrows( + AssertionError.class, + () -> remoteClusterStateService.getClusterStateUsingDiff(manifest, previousState, "test-node") + ); + assertEquals("Diff manifest null which is required for downloading cluster state", error.getMessage()); + } + + public void testGetClusterStateUsingDiff_NoDiff() throws IOException { + ClusterStateDiffManifest diffManifest = ClusterStateDiffManifest.builder().build(); + ClusterState clusterState = generateClusterStateWithAllAttributes().build(); + ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .diffManifest(diffManifest) + .stateUUID(clusterState.stateUUID()) + .stateVersion(clusterState.version()) + .metadataVersion(clusterState.metadata().version()) + .clusterUUID(clusterState.getMetadata().clusterUUID()) + .routingTableVersion(clusterState.routingTable().version()) + .build(); + ClusterState updatedClusterState = remoteClusterStateService.getClusterStateUsingDiff(manifest, clusterState, "test-node"); + assertEquals(clusterState.getClusterName(), updatedClusterState.getClusterName()); + assertEquals(clusterState.metadata().clusterUUID(), updatedClusterState.metadata().clusterUUID()); + assertEquals(clusterState.metadata().version(), updatedClusterState.metadata().version()); + assertEquals(clusterState.metadata().coordinationMetadata(), updatedClusterState.metadata().coordinationMetadata()); + assertEquals(clusterState.metadata().getIndices(), updatedClusterState.metadata().getIndices()); + assertEquals(clusterState.metadata().templates(), updatedClusterState.metadata().templates()); + assertEquals(clusterState.metadata().persistentSettings(), updatedClusterState.metadata().persistentSettings()); + assertEquals(clusterState.metadata().transientSettings(), updatedClusterState.metadata().transientSettings()); + assertEquals(clusterState.metadata().getCustoms(), updatedClusterState.metadata().getCustoms()); + assertEquals(clusterState.metadata().hashesOfConsistentSettings(), updatedClusterState.metadata().hashesOfConsistentSettings()); + assertEquals(clusterState.getCustoms(), updatedClusterState.getCustoms()); + assertEquals(clusterState.stateUUID(), updatedClusterState.stateUUID()); + assertEquals(clusterState.version(), updatedClusterState.version()); + assertEquals(clusterState.getRoutingTable().version(), updatedClusterState.getRoutingTable().version()); + assertEquals(clusterState.getRoutingTable().getIndicesRouting(), updatedClusterState.getRoutingTable().getIndicesRouting()); + assertEquals(clusterState.getNodes(), updatedClusterState.getNodes()); + assertEquals(clusterState.getBlocks(), updatedClusterState.getBlocks()); + } + + public void testGetClusterStateUsingDiff() throws IOException { + ClusterState clusterState = generateClusterStateWithAllAttributes().build(); + ClusterState.Builder expectedClusterStateBuilder = ClusterState.builder(clusterState); + Metadata.Builder mb = Metadata.builder(clusterState.metadata()); + ClusterStateDiffManifest.Builder diffManifestBuilder = ClusterStateDiffManifest.builder(); + ClusterMetadataManifest.Builder manifestBuilder = ClusterMetadataManifest.builder(); + BlobContainer blobContainer = mockBlobStoreObjects(); + if (randomBoolean()) { + // updated coordination metadata + CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder() + .term(clusterState.metadata().coordinationMetadata().term() + 1) + .build(); + mb.coordinationMetadata(coordinationMetadata); + diffManifestBuilder.coordinationMetadataUpdated(true); + manifestBuilder.coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, COORDINATION_METADATA_FILENAME)); + when(blobContainer.readBlob(COORDINATION_METADATA_FILENAME)).thenAnswer(i -> { + BytesReference bytes = COORDINATION_METADATA_FORMAT.serialize( + coordinationMetadata, + COORDINATION_METADATA_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated templates + TemplatesMetadata templatesMetadata = TemplatesMetadata.builder() + .put( + IndexTemplateMetadata.builder("template" + randomAlphaOfLength(3)) + .patterns(Arrays.asList("bar-*", "foo-*")) + .settings(Settings.builder().put("random_index_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)).build()) + .build() + ) + .build(); + mb.templates(templatesMetadata); + diffManifestBuilder.templatesMetadataUpdated(true); + manifestBuilder.templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, TEMPLATES_METADATA_FILENAME)); + when(blobContainer.readBlob(TEMPLATES_METADATA_FILENAME)).thenAnswer(i -> { + BytesReference bytes = TEMPLATES_METADATA_FORMAT.serialize( + templatesMetadata, + TEMPLATES_METADATA_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated persistent settings + Settings persistentSettings = Settings.builder() + .put("random_persistent_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + mb.persistentSettings(persistentSettings); + diffManifestBuilder.settingsMetadataUpdated(true); + manifestBuilder.settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, PERSISTENT_SETTINGS_FILENAME)); + when(blobContainer.readBlob(PERSISTENT_SETTINGS_FILENAME)).thenAnswer(i -> { + BytesReference bytes = RemotePersistentSettingsMetadata.SETTINGS_METADATA_FORMAT.serialize( + persistentSettings, + PERSISTENT_SETTINGS_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated transient settings + Settings transientSettings = Settings.builder() + .put("random_transient_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + mb.transientSettings(transientSettings); + diffManifestBuilder.transientSettingsMetadataUpdate(true); + manifestBuilder.transientSettingsMetadata( + new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, TRANSIENT_SETTINGS_FILENAME) + ); + when(blobContainer.readBlob(TRANSIENT_SETTINGS_FILENAME)).thenAnswer(i -> { + BytesReference bytes = RemoteTransientSettingsMetadata.SETTINGS_METADATA_FORMAT.serialize( + transientSettings, + TRANSIENT_SETTINGS_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated customs + CustomMetadata2 addedCustom = new CustomMetadata2(randomAlphaOfLength(10)); + mb.putCustom(addedCustom.getWriteableName(), addedCustom); + diffManifestBuilder.customMetadataUpdated(Collections.singletonList(addedCustom.getWriteableName())); + manifestBuilder.customMetadataMap( + Map.of(addedCustom.getWriteableName(), new UploadedMetadataAttribute(addedCustom.getWriteableName(), "custom-md2-file__1")) + ); + when(blobContainer.readBlob("custom-md2-file__1")).thenAnswer(i -> { + ChecksumWritableBlobStoreFormat customMetadataFormat = new ChecksumWritableBlobStoreFormat<>( + "custom", + is -> readFrom(is, namedWriteableRegistry, addedCustom.getWriteableName()) + ); + BytesReference bytes = customMetadataFormat.serialize(addedCustom, "custom-md2-file__1", compressor); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + Set customsToRemove = clusterState.metadata().customs().keySet(); + customsToRemove.forEach(mb::removeCustom); + diffManifestBuilder.customMetadataDeleted(new ArrayList<>(customsToRemove)); + } + if (randomBoolean()) { + // updated hashes of consistent settings + DiffableStringMap hashesOfConsistentSettings = new DiffableStringMap(Map.of("secure_setting_key", "secure_setting_value")); + mb.hashesOfConsistentSettings(hashesOfConsistentSettings); + diffManifestBuilder.hashesOfConsistentSettingsUpdated(true); + manifestBuilder.hashesOfConsistentSettings( + new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS_FILENAME) + ); + when(blobContainer.readBlob(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)).thenAnswer(i -> { + BytesReference bytes = HASHES_OF_CONSISTENT_SETTINGS_FORMAT.serialize( + hashesOfConsistentSettings, + HASHES_OF_CONSISTENT_SETTINGS_FILENAME, + compressor + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated index metadata + IndexMetadata indexMetadata = new IndexMetadata.Builder("add-test-index").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "add-test-index-uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + mb.put(indexMetadata, true); + diffManifestBuilder.indicesUpdated(Collections.singletonList(indexMetadata.getIndex().getName())); + manifestBuilder.indices( + List.of( + new UploadedIndexMetadata(indexMetadata.getIndex().getName(), indexMetadata.getIndexUUID(), "add-test-index-file__2") + ) + ); + when(blobContainer.readBlob("add-test-index-file__2")).thenAnswer(i -> { + BytesReference bytes = INDEX_METADATA_FORMAT.serialize(indexMetadata, "add-test-index-file__2", compressor, FORMAT_PARAMS); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // remove index metadata + Set indicesToDelete = clusterState.metadata().getIndices().keySet(); + indicesToDelete.forEach(mb::remove); + diffManifestBuilder.indicesDeleted(new ArrayList<>(indicesToDelete)); + } + if (randomBoolean()) { + // update nodes + DiscoveryNode node = new DiscoveryNode("node_id", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(node); + expectedClusterStateBuilder.nodes(nodesBuilder.build()); + diffManifestBuilder.discoveryNodesUpdated(true); + manifestBuilder.discoveryNodesMetadata(new UploadedMetadataAttribute(DISCOVERY_NODES, DISCOVERY_NODES_FILENAME)); + when(blobContainer.readBlob(DISCOVERY_NODES_FILENAME)).thenAnswer(invocationOnMock -> { + BytesReference bytes = DISCOVERY_NODES_FORMAT.serialize(nodesBuilder.build(), DISCOVERY_NODES_FILENAME, compressor); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // update blocks + ClusterBlocks newClusterBlock = randomClusterBlocks(); + expectedClusterStateBuilder.blocks(newClusterBlock); + diffManifestBuilder.clusterBlocksUpdated(true); + manifestBuilder.clusterBlocksMetadata(new UploadedMetadataAttribute(CLUSTER_BLOCKS, CLUSTER_BLOCKS_FILENAME)); + when(blobContainer.readBlob(CLUSTER_BLOCKS_FILENAME)).thenAnswer(invocationOnMock -> { + BytesReference bytes = CLUSTER_BLOCKS_FORMAT.serialize(newClusterBlock, CLUSTER_BLOCKS_FILENAME, compressor); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + + } + ClusterState expectedClusterState = expectedClusterStateBuilder.metadata(mb).build(); + ClusterStateDiffManifest diffManifest = diffManifestBuilder.build(); + manifestBuilder.diffManifest(diffManifest) + .stateUUID(clusterState.stateUUID()) + .stateVersion(clusterState.version()) + .metadataVersion(clusterState.metadata().version()) + .clusterUUID(clusterState.getMetadata().clusterUUID()) + .routingTableVersion(clusterState.getRoutingTable().version()); + + remoteClusterStateService.start(); + ClusterState updatedClusterState = remoteClusterStateService.getClusterStateUsingDiff( + manifestBuilder.build(), + clusterState, + NODE_ID + ); + + assertEquals(expectedClusterState.getClusterName(), updatedClusterState.getClusterName()); + assertEquals(expectedClusterState.stateUUID(), updatedClusterState.stateUUID()); + assertEquals(expectedClusterState.version(), updatedClusterState.version()); + assertEquals(expectedClusterState.metadata().clusterUUID(), updatedClusterState.metadata().clusterUUID()); + assertEquals(expectedClusterState.getRoutingTable().version(), updatedClusterState.getRoutingTable().version()); + assertNotEquals(diffManifest.isClusterBlocksUpdated(), updatedClusterState.getBlocks().equals(clusterState.getBlocks())); + assertNotEquals(diffManifest.isDiscoveryNodesUpdated(), updatedClusterState.getNodes().equals(clusterState.getNodes())); + assertNotEquals( + diffManifest.isCoordinationMetadataUpdated(), + updatedClusterState.getMetadata().coordinationMetadata().equals(clusterState.getMetadata().coordinationMetadata()) + ); + assertNotEquals( + diffManifest.isTemplatesMetadataUpdated(), + updatedClusterState.getMetadata().templates().equals(clusterState.getMetadata().getTemplates()) + ); + assertNotEquals( + diffManifest.isSettingsMetadataUpdated(), + updatedClusterState.getMetadata().persistentSettings().equals(clusterState.getMetadata().persistentSettings()) + ); + assertNotEquals( + diffManifest.isTransientSettingsMetadataUpdated(), + updatedClusterState.getMetadata().transientSettings().equals(clusterState.getMetadata().transientSettings()) + ); + diffManifest.getIndicesUpdated().forEach(indexName -> { + IndexMetadata updatedIndexMetadata = updatedClusterState.metadata().index(indexName); + IndexMetadata originalIndexMetadata = clusterState.metadata().index(indexName); + assertNotEquals(originalIndexMetadata, updatedIndexMetadata); + }); + diffManifest.getCustomMetadataUpdated().forEach(customMetadataName -> { + Metadata.Custom updatedCustomMetadata = updatedClusterState.metadata().custom(customMetadataName); + Metadata.Custom originalCustomMetadata = clusterState.metadata().custom(customMetadataName); + assertNotEquals(originalCustomMetadata, updatedCustomMetadata); + }); + diffManifest.getClusterStateCustomUpdated().forEach(clusterStateCustomName -> { + ClusterState.Custom updateClusterStateCustom = updatedClusterState.customs().get(clusterStateCustomName); + ClusterState.Custom originalClusterStateCustom = clusterState.customs().get(clusterStateCustomName); + assertNotEquals(originalClusterStateCustom, updateClusterStateCustom); + }); + diffManifest.getIndicesRoutingUpdated().forEach(indexName -> { + IndexRoutingTable updatedIndexRoutingTable = updatedClusterState.getRoutingTable().getIndicesRouting().get(indexName); + IndexRoutingTable originalIndexingRoutingTable = clusterState.getRoutingTable().getIndicesRouting().get(indexName); + assertNotEquals(originalIndexingRoutingTable, updatedIndexRoutingTable); + }); + diffManifest.getIndicesDeleted() + .forEach(indexName -> { assertFalse(updatedClusterState.metadata().getIndices().containsKey(indexName)); }); + diffManifest.getCustomMetadataDeleted().forEach(customMetadataName -> { + assertFalse(updatedClusterState.metadata().customs().containsKey(customMetadataName)); + }); + diffManifest.getClusterStateCustomDeleted().forEach(clusterStateCustomName -> { + assertFalse(updatedClusterState.customs().containsKey(clusterStateCustomName)); + }); + diffManifest.getIndicesRoutingDeleted().forEach(indexName -> { + assertFalse(updatedClusterState.getRoutingTable().getIndicesRouting().containsKey(indexName)); + }); + } + + public void testReadClusterStateInParallel_TimedOut() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + BlobContainer container = mockBlobStoreObjects(); + int readTimeOut = 2; + Settings newSettings = Settings.builder().put("cluster.remote_store.state.read_timeout", readTimeOut + "s").build(); + clusterSettings.applySettings(newSettings); + when(container.readBlob(anyString())).thenAnswer(invocationOnMock -> { + Thread.sleep(readTimeOut * 1000 + 100); + return null; + }); + remoteClusterStateService.start(); + RemoteStateTransferException exception = expectThrows( + RemoteStateTransferException.class, + () -> remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + emptyList(), + emptyMap(), + true, + true, + true, + true, + true, + true, + emptyList(), + true, + emptyMap(), + true + ) + ); + assertEquals("Timed out waiting to read cluster state from remote within timeout " + readTimeOut + "s", exception.getMessage()); + } + + public void testReadClusterStateInParallel_ExceptionDuringRead() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + BlobContainer container = mockBlobStoreObjects(); + Exception mockException = new IOException("mock exception"); + when(container.readBlob(anyString())).thenThrow(mockException); + remoteClusterStateService.start(); + RemoteStateTransferException exception = expectThrows( + RemoteStateTransferException.class, + () -> remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + emptyList(), + emptyMap(), + true, + true, + true, + true, + true, + true, + emptyList(), + true, + emptyMap(), + true + ) + ); + assertEquals("Exception during reading cluster state from remote", exception.getMessage()); + assertTrue(exception.getSuppressed().length > 0); + assertEquals(mockException, exception.getSuppressed()[0]); + } + + public void testReadClusterStateInParallel_UnexpectedResult() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + // index already present in previous state + List uploadedIndexMetadataList = new ArrayList<>( + List.of(new UploadedIndexMetadata("test-index", "test-index-uuid", "test-index-file__2")) + ); + // new index to be added + List newIndicesToRead = List.of( + new UploadedIndexMetadata("test-index-1", "test-index-1-uuid", "test-index-1-file__2") + ); + uploadedIndexMetadataList.addAll(newIndicesToRead); + // existing custom metadata + Map uploadedCustomMetadataMap = new HashMap<>( + Map.of( + "custom_md_1", + new UploadedMetadataAttribute("custom_md_1", "test-custom1-file__1"), + "custom_md_2", + new UploadedMetadataAttribute("custom_md_2", "test-custom2-file__1") + ) + ); + // new custom metadata to be added + Map newCustomMetadataMap = Map.of( + "custom_md_3", + new UploadedMetadataAttribute("custom_md_3", "test-custom3-file__1") + ); + uploadedCustomMetadataMap.putAll(newCustomMetadataMap); + // already existing cluster state customs + Map uploadedClusterStateCustomMap = new HashMap<>( + Map.of( + "custom_1", + new UploadedMetadataAttribute("custom_1", "test-cluster-state-custom1-file__1"), + "custom_2", + new UploadedMetadataAttribute("custom_2", "test-cluster-state-custom2-file__1") + ) + ); + // new customs uploaded + Map newClusterStateCustoms = Map.of( + "custom_3", + new UploadedMetadataAttribute("custom_3", "test-cluster-state-custom3-file__1") + ); + uploadedClusterStateCustomMap.putAll(newClusterStateCustoms); + ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .clusterUUID(previousClusterState.getMetadata().clusterUUID()) + .indices(uploadedIndexMetadataList) + .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, COORDINATION_METADATA_FILENAME)) + .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, PERSISTENT_SETTINGS_FILENAME)) + .transientSettingsMetadata(new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, TRANSIENT_SETTINGS_FILENAME)) + .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, TEMPLATES_METADATA_FILENAME)) + .hashesOfConsistentSettings( + new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS_FILENAME) + ) + .customMetadataMap(uploadedCustomMetadataMap) + .discoveryNodesMetadata(new UploadedMetadataAttribute(DISCOVERY_NODES, DISCOVERY_NODES_FILENAME)) + .clusterBlocksMetadata(new UploadedMetadataAttribute(CLUSTER_BLOCKS, CLUSTER_BLOCKS_FILENAME)) + .clusterStateCustomMetadataMap(uploadedClusterStateCustomMap) + .build(); + + RemoteReadResult mockResult = mock(RemoteReadResult.class); + RemoteIndexMetadataManager mockIndexMetadataManager = mock(RemoteIndexMetadataManager.class); + CheckedRunnable mockRunnable = mock(CheckedRunnable.class); + ArgumentCaptor> latchCapture = ArgumentCaptor.forClass(LatchedActionListener.class); + when(mockIndexMetadataManager.getAsyncIndexMetadataReadAction(anyString(), anyString(), latchCapture.capture())).thenReturn( + mockRunnable + ); + RemoteGlobalMetadataManager mockGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + when(mockGlobalMetadataManager.getAsyncMetadataReadAction(any(), anyString(), latchCapture.capture())).thenReturn(mockRunnable); + RemoteClusterStateAttributesManager mockClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + when(mockClusterStateAttributeManager.getAsyncMetadataReadAction(anyString(), any(), latchCapture.capture())).thenReturn( + mockRunnable + ); + doAnswer(invocationOnMock -> { + latchCapture.getValue().onResponse(mockResult); + return null; + }).when(mockRunnable).run(); + when(mockResult.getComponent()).thenReturn("mock-result"); + remoteClusterStateService.start(); + remoteClusterStateService.setRemoteIndexMetadataManager(mockIndexMetadataManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockClusterStateAttributeManager); + IllegalStateException exception = expectThrows( + IllegalStateException.class, + () -> remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + newIndicesToRead, + newCustomMetadataMap, + true, + true, + true, + true, + true, + true, + emptyList(), + true, + newClusterStateCustoms, + true + ) + ); + assertEquals("Unknown component: mock-result", exception.getMessage()); + newIndicesToRead.forEach( + uploadedIndexMetadata -> verify(mockIndexMetadataManager, times(1)).getAsyncIndexMetadataReadAction( + eq(previousClusterState.getMetadata().clusterUUID()), + eq(uploadedIndexMetadata.getUploadedFilename()), + any() + ) + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(COORDINATION_METADATA_FILENAME)), + eq(COORDINATION_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(PERSISTENT_SETTINGS_FILENAME)), + eq(SETTING_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TRANSIENT_SETTINGS_FILENAME)), + eq(TRANSIENT_SETTING_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TEMPLATES_METADATA_FILENAME)), + eq(TEMPLATES_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)), + eq(HASHES_OF_CONSISTENT_SETTINGS), + any() + ); + newCustomMetadataMap.keySet().forEach(uploadedCustomMetadataKey -> { + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(newCustomMetadataMap.get(uploadedCustomMetadataKey).getUploadedFilename())), + eq(uploadedCustomMetadataKey), + any() + ); + }); + verify(mockClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(DISCOVERY_NODES), + argThat(new BlobNameMatcher(DISCOVERY_NODES_FILENAME)), + any() + ); + verify(mockClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(CLUSTER_BLOCKS), + argThat(new BlobNameMatcher(CLUSTER_BLOCKS_FILENAME)), + any() + ); + newClusterStateCustoms.keySet().forEach(uploadedClusterStateCustomMetadataKey -> { + verify(mockClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, uploadedClusterStateCustomMetadataKey)), + argThat(new BlobNameMatcher(newClusterStateCustoms.get(uploadedClusterStateCustomMetadataKey).getUploadedFilename())), + any() + ); + }); + } + + public void testReadClusterStateInParallel_Success() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + String indexFilename = "test-index-1-file__2"; + String customMetadataFilename = "test-custom3-file__1"; + String clusterStateCustomFilename = "test-cluster-state-custom3-file__1"; + // index already present in previous state + List uploadedIndexMetadataList = new ArrayList<>( + List.of(new UploadedIndexMetadata("test-index", "test-index-uuid", "test-index-file__2")) + ); + // new index to be added + List newIndicesToRead = List.of( + new UploadedIndexMetadata("test-index-1", "test-index-1-uuid", indexFilename) + ); + uploadedIndexMetadataList.addAll(newIndicesToRead); + // existing custom metadata + Map uploadedCustomMetadataMap = new HashMap<>( + Map.of( + "custom_md_1", + new UploadedMetadataAttribute("custom_md_1", "test-custom1-file__1"), + "custom_md_2", + new UploadedMetadataAttribute("custom_md_2", "test-custom2-file__1") + ) + ); + // new custom metadata to be added + Map newCustomMetadataMap = Map.of( + "custom_md_3", + new UploadedMetadataAttribute("custom_md_3", customMetadataFilename) + ); + uploadedCustomMetadataMap.putAll(newCustomMetadataMap); + // already existing cluster state customs + Map uploadedClusterStateCustomMap = new HashMap<>( + Map.of( + "custom_1", + new UploadedMetadataAttribute("custom_1", "test-cluster-state-custom1-file__1"), + "custom_2", + new UploadedMetadataAttribute("custom_2", "test-cluster-state-custom2-file__1") + ) + ); + // new customs uploaded + Map newClusterStateCustoms = Map.of( + "custom_3", + new UploadedMetadataAttribute("custom_3", clusterStateCustomFilename) + ); + uploadedClusterStateCustomMap.putAll(newClusterStateCustoms); + + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().indices(uploadedIndexMetadataList) + .customMetadataMap(uploadedCustomMetadataMap) + .clusterStateCustomMetadataMap(uploadedClusterStateCustomMap) + .build(); + + IndexMetadata newIndexMetadata = new IndexMetadata.Builder("test-index-1").state(IndexMetadata.State.OPEN) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build()) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + CustomMetadata3 customMetadata3 = new CustomMetadata3("custom_md_3"); + CoordinationMetadata updatedCoordinationMetadata = CoordinationMetadata.builder() + .term(previousClusterState.metadata().coordinationMetadata().term() + 1) + .build(); + Settings updatedPersistentSettings = Settings.builder() + .put("random_persistent_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + Settings updatedTransientSettings = Settings.builder() + .put("random_transient_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + TemplatesMetadata updatedTemplateMetadata = getTemplatesMetadata(); + DiffableStringMap updatedHashesOfConsistentSettings = getHashesOfConsistentSettings(); + DiscoveryNodes updatedDiscoveryNodes = getDiscoveryNodes(); + ClusterBlocks updatedClusterBlocks = randomClusterBlocks(); + TestClusterStateCustom3 updatedClusterStateCustom3 = new TestClusterStateCustom3("custom_3"); + + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + RemoteClusterStateAttributesManager mockedClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + + when( + mockedIndexManager.getAsyncIndexMetadataReadAction( + eq(manifest.getClusterUUID()), + eq(indexFilename), + any(LatchedActionListener.class) + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(newIndexMetadata, INDEX, "test-index-1") + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(customMetadataFilename)), + eq("custom_md_3"), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(customMetadata3, CUSTOM_METADATA, "custom_md_3") + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(COORDINATION_METADATA_FILENAME)), + eq(COORDINATION_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedCoordinationMetadata, COORDINATION_METADATA, COORDINATION_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(PERSISTENT_SETTINGS_FILENAME)), + eq(SETTING_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedPersistentSettings, SETTING_METADATA, SETTING_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TRANSIENT_SETTINGS_FILENAME)), + eq(TRANSIENT_SETTING_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedTransientSettings, TRANSIENT_SETTING_METADATA, TRANSIENT_SETTING_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TEMPLATES_METADATA_FILENAME)), + eq(TEMPLATES_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedTemplateMetadata, TEMPLATES_METADATA, TEMPLATES_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)), + eq(HASHES_OF_CONSISTENT_SETTINGS), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedHashesOfConsistentSettings, HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS) + ); + }); + when( + mockedClusterStateAttributeManager.getAsyncMetadataReadAction( + eq(DISCOVERY_NODES), + argThat(new BlobNameMatcher(DISCOVERY_NODES_FILENAME)), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedDiscoveryNodes, CLUSTER_STATE_ATTRIBUTE, DISCOVERY_NODES) + ); + }); + when( + mockedClusterStateAttributeManager.getAsyncMetadataReadAction( + eq(CLUSTER_BLOCKS), + argThat(new BlobNameMatcher(CLUSTER_BLOCKS_FILENAME)), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedClusterBlocks, CLUSTER_STATE_ATTRIBUTE, CLUSTER_BLOCKS) + ); + }); + when( + mockedClusterStateAttributeManager.getAsyncMetadataReadAction( + eq(String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, updatedClusterStateCustom3.getWriteableName())), + argThat(new BlobNameMatcher(clusterStateCustomFilename)), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult( + updatedClusterStateCustom3, + CLUSTER_STATE_ATTRIBUTE, + String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, updatedClusterStateCustom3.getWriteableName()) + ) + ); + }); + + remoteClusterStateService.start(); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockedClusterStateAttributeManager); + + ClusterState updatedClusterState = remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + newIndicesToRead, + newCustomMetadataMap, + true, + true, + true, + true, + true, + true, + emptyList(), + true, + newClusterStateCustoms, + true + ); + + assertEquals(uploadedIndexMetadataList.size(), updatedClusterState.metadata().indices().size()); + assertTrue(updatedClusterState.metadata().indices().containsKey("test-index-1")); + assertEquals(newIndexMetadata, updatedClusterState.metadata().index(newIndexMetadata.getIndex())); + uploadedCustomMetadataMap.keySet().forEach(key -> assertTrue(updatedClusterState.metadata().customs().containsKey(key))); + assertEquals(customMetadata3, updatedClusterState.metadata().custom(customMetadata3.getWriteableName())); + assertEquals( + previousClusterState.metadata().coordinationMetadata().term() + 1, + updatedClusterState.metadata().coordinationMetadata().term() + ); + assertEquals(updatedPersistentSettings, updatedClusterState.metadata().persistentSettings()); + assertEquals(updatedTransientSettings, updatedClusterState.metadata().transientSettings()); + assertEquals(updatedTemplateMetadata.getTemplates(), updatedClusterState.metadata().templates()); + assertEquals(updatedHashesOfConsistentSettings, updatedClusterState.metadata().hashesOfConsistentSettings()); + assertEquals(updatedDiscoveryNodes.getSize(), updatedClusterState.getNodes().getSize()); + updatedDiscoveryNodes.getNodes().forEach((nodeId, node) -> assertEquals(updatedClusterState.getNodes().get(nodeId), node)); + assertEquals(updatedDiscoveryNodes.getClusterManagerNodeId(), updatedClusterState.getNodes().getClusterManagerNodeId()); + assertEquals(updatedClusterBlocks, updatedClusterState.blocks()); + uploadedClusterStateCustomMap.keySet().forEach(key -> assertTrue(updatedClusterState.customs().containsKey(key))); + assertEquals(updatedClusterStateCustom3, updatedClusterState.custom("custom_3")); + newIndicesToRead.forEach( + uploadedIndexMetadata -> verify(mockedIndexManager, times(1)).getAsyncIndexMetadataReadAction( + eq(previousClusterState.getMetadata().clusterUUID()), + eq(uploadedIndexMetadata.getUploadedFilename()), + any() + ) + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(COORDINATION_METADATA_FILENAME)), + eq(COORDINATION_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(PERSISTENT_SETTINGS_FILENAME)), + eq(SETTING_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TRANSIENT_SETTINGS_FILENAME)), + eq(TRANSIENT_SETTING_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TEMPLATES_METADATA_FILENAME)), + eq(TEMPLATES_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)), + eq(HASHES_OF_CONSISTENT_SETTINGS), + any() + ); + newCustomMetadataMap.keySet().forEach(uploadedCustomMetadataKey -> { + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(newCustomMetadataMap.get(uploadedCustomMetadataKey).getUploadedFilename())), + eq(uploadedCustomMetadataKey), + any() + ); + }); + verify(mockedClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(DISCOVERY_NODES), + argThat(new BlobNameMatcher(DISCOVERY_NODES_FILENAME)), + any() + ); + verify(mockedClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(CLUSTER_BLOCKS), + argThat(new BlobNameMatcher(CLUSTER_BLOCKS_FILENAME)), + any() + ); + newClusterStateCustoms.keySet().forEach(uploadedClusterStateCustomMetadataKey -> { + verify(mockedClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, uploadedClusterStateCustomMetadataKey)), + argThat(new BlobNameMatcher(newClusterStateCustoms.get(uploadedClusterStateCustomMetadataKey).getUploadedFilename())), + any() + ); + }); + } + /* * Here we will verify the migration of manifest file from codec V0. * @@ -1191,13 +2254,14 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .codecVersion(CODEC_V2) .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") .build(); BlobContainer blobContainer = mockBlobStoreObjects(); - mockBlobContainer(blobContainer, expectedManifest, Map.of()); + mockBlobContainer(blobContainer, expectedManifest, Map.of(), CODEC_V2); when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename())).thenThrow(FileNotFoundException.class); remoteClusterStateService.start(); @@ -1225,11 +2289,11 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) - .codecVersion(ClusterMetadataManifest.CODEC_V0) + .codecVersion(CODEC_V2) .previousClusterUUID("prev-cluster-uuid") .build(); - mockBlobContainer(mockBlobStoreObjects(), expectedManifest, new HashMap<>()); + mockBlobContainer(mockBlobStoreObjects(), expectedManifest, new HashMap<>(), CODEC_V2); remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( clusterState.getClusterName().value(), @@ -1353,10 +2417,10 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID("prev-cluster-uuid") - .codecVersion(ClusterMetadataManifest.CODEC_V0) + .codecVersion(CODEC_V2) .build(); - mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); + mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata), CODEC_V2); Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( clusterState.getClusterName().value(), @@ -1601,6 +2665,7 @@ public void testWriteFullMetadataInParallelSuccessWithRoutingTable() throws IOEx .clusterUUID("cluster-uuid") .previousClusterUUID("prev-cluster-uuid") .routingTableVersion(1) + .codecVersion(CODEC_V2) .indicesRouting(List.of(uploadedIndiceRoutingMetadata)) .build(); @@ -1857,7 +2922,7 @@ private void mockObjectsForGettingPreviousClusterUUID( BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(uuidBlobContainer, mockBlobContainerOrderedArray); - when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + when(blobStoreRepository.getCompressor()).thenReturn(compressor); } private ClusterMetadataManifest generateV1ClusterMetadataManifest( @@ -1986,7 +3051,7 @@ private void mockBlobContainer( } String fileName = uploadedIndexMetadata.getUploadedFilename(); when(blobContainer.readBlob(getFormattedIndexFileName(fileName))).thenAnswer((invocationOnMock) -> { - BytesReference bytesIndexMetadata = RemoteIndexMetadata.INDEX_METADATA_FORMAT.serialize( + BytesReference bytesIndexMetadata = INDEX_METADATA_FORMAT.serialize( indexMetadata, fileName, blobStoreRepository.getCompressor(), @@ -2018,7 +3083,7 @@ private void mockBlobContainerForGlobalMetadata( FORMAT_PARAMS ); when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); - if (codecVersion >= ClusterMetadataManifest.CODEC_V2) { + if (codecVersion >= CODEC_V2) { String coordinationFileName = getFileNameFromPath(clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename()); when(blobContainer.readBlob(COORDINATION_METADATA_FORMAT.blobName(coordinationFileName))).thenAnswer((invocationOnMock) -> { BytesReference bytesReference = COORDINATION_METADATA_FORMAT.serialize( @@ -2057,12 +3122,6 @@ private void mockBlobContainerForGlobalMetadata( .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> getFileNameFromPath(entry.getValue().getUploadedFilename()))); - // ChecksumBlobStoreFormat customMetadataFormat = new ChecksumBlobStoreFormat<>( - // "custom", - // METADATA_NAME_PLAIN_FORMAT, - // null - // ); - ChecksumWritableBlobStoreFormat customMetadataFormat = new ChecksumWritableBlobStoreFormat<>("custom", null); for (Map.Entry entry : customFileMap.entrySet()) { String custom = entry.getKey(); @@ -2147,32 +3206,105 @@ static ClusterState.Builder generateClusterStateWithOneIndex() { .routingTable(RoutingTable.builder().addAsNew(indexMetadata).version(1L).build()); } + static ClusterState.Builder generateClusterStateWithAllAttributes() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final Settings settings = Settings.builder().put("mock-settings", true).build(); + final Settings transientSettings = Settings.builder().put("mock-transient-settings", true).build(); + final DiffableStringMap hashesOfConsistentSettings = new DiffableStringMap(emptyMap()); + final TemplatesMetadata templatesMetadata = TemplatesMetadata.builder() + .put(IndexTemplateMetadata.builder("template-1").patterns(List.of("test-index* ")).build()) + .build(); + final CustomMetadata1 customMetadata1 = new CustomMetadata1("custom-metadata-1"); + final CustomMetadata2 customMetadata2 = new CustomMetadata2("custom-metadata-2"); + final DiscoveryNodes nodes = nodesWithLocalNodeClusterManager(); + final ClusterBlocks clusterBlocks = randomClusterBlocks(); + final TestClusterStateCustom1 custom1 = new RemoteClusterStateTestUtils.TestClusterStateCustom1("custom-1"); + final TestClusterStateCustom2 custom2 = new RemoteClusterStateTestUtils.TestClusterStateCustom2("custom-2"); + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .persistentSettings(settings) + .transientSettings(transientSettings) + .hashesOfConsistentSettings(hashesOfConsistentSettings) + .templates(templatesMetadata) + .putCustom(customMetadata1.getWriteableName(), customMetadata1) + .putCustom(customMetadata2.getWriteableName(), customMetadata2) + .build() + ) + .routingTable(RoutingTable.builder().addAsNew(indexMetadata).version(1L).build()) + .nodes(nodes) + .blocks(clusterBlocks) + .putCustom(custom1.getWriteableName(), custom1) + .putCustom(custom2.getWriteableName(), custom2); + } + + static ClusterMetadataManifest.Builder generateClusterMetadataManifestWithAllAttributes() { + return ClusterMetadataManifest.builder() + .codecVersion(CODEC_V2) + .clusterUUID("cluster-uuid") + .indices(List.of(new UploadedIndexMetadata("test-index", "test-index-uuid", "test-index-file__2"))) + .customMetadataMap( + Map.of( + "custom_md_1", + new UploadedMetadataAttribute("custom_md_1", "test-custom1-file__1"), + "custom_md_2", + new UploadedMetadataAttribute("custom_md_2", "test-custom2-file__1") + ) + ) + .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, COORDINATION_METADATA_FILENAME)) + .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, PERSISTENT_SETTINGS_FILENAME)) + .transientSettingsMetadata(new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, TRANSIENT_SETTINGS_FILENAME)) + .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, TEMPLATES_METADATA_FILENAME)) + .hashesOfConsistentSettings( + new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS_FILENAME) + ) + .discoveryNodesMetadata(new UploadedMetadataAttribute(DISCOVERY_NODES, DISCOVERY_NODES_FILENAME)) + .clusterBlocksMetadata(new UploadedMetadataAttribute(CLUSTER_BLOCKS, CLUSTER_BLOCKS_FILENAME)) + .clusterStateCustomMetadataMap( + Map.of( + "custom_1", + new UploadedMetadataAttribute("custom_1", "test-cluster-state-custom1-file__1"), + "custom_2", + new UploadedMetadataAttribute("custom_2", "test-cluster-state-custom2-file__1") + ) + ); + } + static DiscoveryNodes nodesWithLocalNodeClusterManager() { final DiscoveryNode localNode = new DiscoveryNode("cluster-manager-id", buildNewFakeTransportAddress(), Version.CURRENT); return DiscoveryNodes.builder().clusterManagerNodeId("cluster-manager-id").localNodeId("cluster-manager-id").add(localNode).build(); } - private static class CustomMetadata1 extends TestCustomMetadata { - public static final String TYPE = "custom_md_1"; + private class BlobNameMatcher implements ArgumentMatcher { + private final String expectedBlobName; - CustomMetadata1(String data) { - super(data); + BlobNameMatcher(String expectedBlobName) { + this.expectedBlobName = expectedBlobName; } @Override - public String getWriteableName() { - return TYPE; + public boolean matches(AbstractRemoteWritableBlobEntity argument) { + return argument != null && expectedBlobName.equals(argument.getFullBlobName()); } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); + public String toString() { + return "BlobNameMatcher[Expected blobName: " + expectedBlobName + "]"; } } - } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java new file mode 100644 index 0000000000000..b17ffcbaac344 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.TestClusterStateCustom; +import org.opensearch.test.TestCustomMetadata; + +import java.io.IOException; +import java.util.EnumSet; + +public class RemoteClusterStateTestUtils { + public static class CustomMetadata1 extends TestCustomMetadata { + public static final String TYPE = "custom_md_1"; + + public CustomMetadata1(String data) { + super(data); + } + + public CustomMetadata1(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata2 extends TestCustomMetadata { + public static final String TYPE = "custom_md_2"; + + public CustomMetadata2(String data) { + super(data); + } + + public CustomMetadata2(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata3 extends TestCustomMetadata { + public static final String TYPE = "custom_md_3"; + + public CustomMetadata3(String data) { + super(data); + } + + public CustomMetadata3(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata4 extends TestCustomMetadata { + public static final String TYPE = "custom_md_4"; + + public CustomMetadata4(String data) { + super(data); + } + + public CustomMetadata4(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata5 extends TestCustomMetadata { + public static final String TYPE = "custom_md_5"; + + public CustomMetadata5(String data) { + super(data); + } + + public CustomMetadata5(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.API); + } + } + + public static class TestClusterStateCustom1 extends TestClusterStateCustom { + + public static final String TYPE = "custom_1"; + + public TestClusterStateCustom1(String value) { + super(value); + } + + public TestClusterStateCustom1(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + public static class TestClusterStateCustom2 extends TestClusterStateCustom { + + public static final String TYPE = "custom_2"; + + public TestClusterStateCustom2(String value) { + super(value); + } + + public TestClusterStateCustom2(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + public static class TestClusterStateCustom3 extends TestClusterStateCustom { + + public static final String TYPE = "custom_3"; + + public TestClusterStateCustom3(String value) { + super(value); + } + + public TestClusterStateCustom3(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + public static class TestClusterStateCustom4 extends TestClusterStateCustom { + + public static final String TYPE = "custom_4"; + + public TestClusterStateCustom4(String value) { + super(value); + } + + public TestClusterStateCustom4(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java index c543f986b3e86..917794ec03c3a 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java @@ -8,7 +8,6 @@ package org.opensearch.gateway.remote; -import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; @@ -18,7 +17,6 @@ import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.network.NetworkModule; @@ -43,7 +41,6 @@ import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.TestCustomMetadata; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -51,7 +48,6 @@ import java.io.IOException; import java.io.InputStream; -import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -61,6 +57,11 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.cluster.metadata.Metadata.isGlobalStateEquals; import static org.opensearch.common.blobstore.stream.write.WritePriority.URGENT; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata3; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata4; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata5; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; @@ -699,121 +700,5 @@ public void testGetUpdatedCustoms() { ); assertThat(customsDiff.getUpserts(), is(expectedUpserts)); assertThat(customsDiff.getDeletes(), is(List.of(CustomMetadata1.TYPE))); - - } - - private static class CustomMetadata1 extends TestCustomMetadata { - public static final String TYPE = "custom_md_1"; - - CustomMetadata1(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata2 extends TestCustomMetadata { - public static final String TYPE = "custom_md_2"; - - CustomMetadata2(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata3 extends TestCustomMetadata { - public static final String TYPE = "custom_md_3"; - - CustomMetadata3(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata4 extends TestCustomMetadata { - public static final String TYPE = "custom_md_4"; - - CustomMetadata4(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata5 extends TestCustomMetadata { - public static final String TYPE = "custom_md_5"; - - CustomMetadata5(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(XContentContext.API); - } } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java new file mode 100644 index 0000000000000..817fc7b55d09a --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.TestCapturingListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX_METADATA_FORMAT; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyIterable; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteIndexMetadataManagerTests extends OpenSearchTestCase { + + private RemoteIndexMetadataManager remoteIndexMetadataManager; + private BlobStoreRepository blobStoreRepository; + private BlobStoreTransferService blobStoreTransferService; + private Compressor compressor; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + blobStoreRepository = mock(BlobStoreRepository.class); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + blobStoreTransferService = mock(BlobStoreTransferService.class); + compressor = new NoneCompressor(); + when(blobStoreRepository.getCompressor()).thenReturn(compressor); + remoteIndexMetadataManager = new RemoteIndexMetadataManager( + clusterSettings, + "test-cluster", + blobStoreRepository, + blobStoreTransferService, + threadPool + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testGetAsyncIndexMetadataWriteAction_Success() throws Exception { + IndexMetadata indexMetadata = getIndexMetadata(randomAlphaOfLength(10), randomBoolean(), randomAlphaOfLength(10)); + BlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + BlobStore blobStore = mock(BlobStore.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + String expectedFilePrefix = String.join(DELIMITER, "metadata", RemoteStoreUtils.invertLong(indexMetadata.getVersion())); + + doAnswer((invocationOnMock -> { + invocationOnMock.getArgument(4, ActionListener.class).onResponse(null); + return null; + })).when(blobStoreTransferService).uploadBlob(any(), any(), any(), eq(WritePriority.URGENT), any(ActionListener.class)); + + remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction( + indexMetadata, + "cluster-uuid", + new LatchedActionListener<>(listener, latch) + ).run(); + latch.await(); + + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + ClusterMetadataManifest.UploadedMetadata uploadedMetadata = listener.getResult(); + assertEquals(INDEX + "--" + indexMetadata.getIndex().getName(), uploadedMetadata.getComponent()); + String uploadedFileName = uploadedMetadata.getUploadedFilename(); + String[] pathTokens = uploadedFileName.split(PATH_DELIMITER); + assertEquals(7, pathTokens.length); + assertEquals(INDEX, pathTokens[4]); + assertEquals(indexMetadata.getIndex().getUUID(), pathTokens[5]); + assertTrue(pathTokens[6].startsWith(expectedFilePrefix)); + } + + public void testGetAsyncIndexMetadataWriteAction_IOFailure() throws Exception { + IndexMetadata indexMetadata = getIndexMetadata(randomAlphaOfLength(10), randomBoolean(), randomAlphaOfLength(10)); + BlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + BlobStore blobStore = mock(BlobStore.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + doAnswer((invocationOnMock -> { + invocationOnMock.getArgument(4, ActionListener.class).onFailure(new IOException("failure")); + return null; + })).when(blobStoreTransferService).uploadBlob(any(), any(), any(), eq(WritePriority.URGENT), any(ActionListener.class)); + + remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction( + indexMetadata, + "cluster-uuid", + new LatchedActionListener<>(listener, latch) + ).run(); + latch.await(); + assertNull(listener.getResult()); + assertNotNull(listener.getFailure()); + assertTrue(listener.getFailure() instanceof RemoteStateTransferException); + } + + public void testGetAsyncIndexMetadataReadAction_Success() throws Exception { + IndexMetadata indexMetadata = getIndexMetadata(randomAlphaOfLength(10), randomBoolean(), randomAlphaOfLength(10)); + String fileName = randomAlphaOfLength(10); + fileName = fileName + DELIMITER + '2'; + when(blobStoreTransferService.downloadBlob(anyIterable(), anyString())).thenReturn( + INDEX_METADATA_FORMAT.serialize(indexMetadata, fileName, compressor, FORMAT_PARAMS).streamInput() + ); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteIndexMetadataManager.getAsyncIndexMetadataReadAction("cluster-uuid", fileName, new LatchedActionListener<>(listener, latch)) + .run(); + latch.await(); + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + assertEquals(indexMetadata, listener.getResult().getObj()); + } + + public void testGetAsyncIndexMetadataReadAction_IOFailure() throws Exception { + String fileName = randomAlphaOfLength(10); + fileName = fileName + DELIMITER + '2'; + Exception exception = new IOException("testing failure"); + doThrow(exception).when(blobStoreTransferService).downloadBlob(anyIterable(), anyString()); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteIndexMetadataManager.getAsyncIndexMetadataReadAction("cluster-uuid", fileName, new LatchedActionListener<>(listener, latch)) + .run(); + latch.await(); + assertNull(listener.getResult()); + assertNotNull(listener.getFailure()); + assertEquals(exception, listener.getFailure()); + } + + private IndexMetadata getIndexMetadata(String name, @Nullable Boolean writeIndex, String... aliases) { + IndexMetadata.Builder builder = IndexMetadata.builder(name) + .settings( + Settings.builder() + .put("index.version.created", Version.CURRENT.id) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + ); + for (String alias : aliases) { + builder.putAlias(AliasMetadata.builder(alias).writeIndex(writeIndex).build()); + } + return builder.build(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index b31edd79411d0..7146b7dc51753 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,6 +48,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.codec.composite.Composite99Codec; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; @@ -59,6 +60,8 @@ import java.io.IOException; import java.util.Collections; +import org.mockito.Mockito; + import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; import static org.hamcrest.Matchers.instanceOf; @@ -76,23 +79,52 @@ public void testDefault() throws Exception { assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); } + public void testDefaultWithCompositeIndex() throws Exception { + Codec codec = createCodecService(false, true).codec("default"); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); + assert codec instanceof Composite99Codec; + } + public void testBestCompression() throws Exception { Codec codec = createCodecService(false).codec("best_compression"); assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); } + public void testBestCompressionWithCompositeIndex() throws Exception { + Codec codec = createCodecService(false, true).codec("best_compression"); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); + assert codec instanceof Composite99Codec; + } + public void testLZ4() throws Exception { Codec codec = createCodecService(false).codec("lz4"); assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); assert codec instanceof PerFieldMappingPostingFormatCodec; } + public void testLZ4WithCompositeIndex() throws Exception { + Codec codec = createCodecService(false, true).codec("lz4"); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); + assert codec instanceof Composite99Codec; + } + public void testZlib() throws Exception { Codec codec = createCodecService(false).codec("zlib"); assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); assert codec instanceof PerFieldMappingPostingFormatCodec; } + public void testZlibWithCompositeIndex() throws Exception { + Codec codec = createCodecService(false, true).codec("zlib"); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); + assert codec instanceof Composite99Codec; + } + + public void testResolveDefaultCodecsWithCompositeIndex() throws Exception { + CodecService codecService = createCodecService(false, true); + assertThat(codecService.codec("default"), instanceOf(Composite99Codec.class)); + } + public void testBestCompressionWithCompressionLevel() { final Settings settings = Settings.builder() .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) @@ -150,10 +182,17 @@ private void assertStoredFieldsCompressionEquals(Lucene99Codec.Mode expected, Co } private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { + return createCodecService(isMapperServiceNull, false); + } + + private CodecService createCodecService(boolean isMapperServiceNull, boolean isCompositeIndexPresent) throws IOException { Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); if (isMapperServiceNull) { return new CodecService(null, IndexSettingsModule.newIndexSettings("_na", nodeSettings), LogManager.getLogger("test")); } + if (isCompositeIndexPresent) { + return buildCodecServiceWithCompositeIndex(nodeSettings); + } return buildCodecService(nodeSettings); } @@ -176,6 +215,14 @@ private CodecService buildCodecService(Settings nodeSettings) throws IOException return new CodecService(service, indexSettings, LogManager.getLogger("test")); } + private CodecService buildCodecServiceWithCompositeIndex(Settings nodeSettings) throws IOException { + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); + MapperService service = Mockito.mock(MapperService.class); + Mockito.when(service.isCompositeIndexPresent()).thenReturn(true); + return new CodecService(service, indexSettings, LogManager.getLogger("test")); + } + private SegmentReader getSegmentReader(Codec codec) throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(null); diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java new file mode 100644 index 0000000000000..6c6d26656e4de --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite.datacube.startree; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.BaseDocValuesFormatTestCase; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.common.Rounding; +import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.StarTreeMapper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import org.mockito.Mockito; + +/** + * Star tree doc values Lucene tests + */ +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") +public class StarTreeDocValuesFormatTests extends BaseDocValuesFormatTestCase { + @Override + protected Codec getCodec() { + MapperService service = Mockito.mock(MapperService.class); + Mockito.when(service.getCompositeFieldTypes()).thenReturn(Set.of(getStarTreeFieldType())); + final Logger testLogger = LogManager.getLogger(StarTreeDocValuesFormatTests.class); + return new Composite99Codec(Lucene99Codec.Mode.BEST_SPEED, service, testLogger); + } + + private StarTreeMapper.StarTreeFieldType getStarTreeFieldType() { + List m1 = new ArrayList<>(); + m1.add(MetricStat.MAX); + Metric metric = new Metric("sndv", m1); + List d1CalendarIntervals = new ArrayList<>(); + d1CalendarIntervals.add(Rounding.DateTimeUnit.HOUR_OF_DAY); + StarTreeField starTreeField = getStarTreeField(d1CalendarIntervals, metric); + + return new StarTreeMapper.StarTreeFieldType("star_tree", starTreeField); + } + + private static StarTreeField getStarTreeField(List d1CalendarIntervals, Metric metric1) { + DateDimension d1 = new DateDimension("field", d1CalendarIntervals); + NumericDimension d2 = new NumericDimension("dv"); + + List metrics = List.of(metric1); + List dims = List.of(d1, d2); + StarTreeFieldConfiguration config = new StarTreeFieldConfiguration( + 100, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP + ); + + return new StarTreeField("starTree", dims, metrics, config); + } + + public void testStarTreeDocValues() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedNumericDocValuesField("dv", 1)); + doc.add(new SortedNumericDocValuesField("field", 1)); + iw.addDocument(doc); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedNumericDocValuesField("dv", 1)); + doc.add(new SortedNumericDocValuesField("field", 1)); + iw.addDocument(doc); + iw.forceMerge(1); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedNumericDocValuesField("dv", 2)); + doc.add(new SortedNumericDocValuesField("field", 2)); + iw.addDocument(doc); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedNumericDocValuesField("dv", 2)); + doc.add(new SortedNumericDocValuesField("field", 2)); + iw.addDocument(doc); + iw.forceMerge(1); + iw.close(); + + // TODO : validate star tree structures that got created + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java index b274cf28429e8..7a8c4ffe35021 100644 --- a/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java @@ -247,6 +247,46 @@ public void testCopyToStrictDynamicInnerObjectParsing() throws Exception { assertThat(e.getMessage(), startsWith("mapping set to strict, dynamic introduction of [very] within [_doc] is not allowed")); } + public void testCopyToStrictAllowTemplatesDynamicInnerObjectParsing() throws Exception { + DocumentMapper docMapper = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "test"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + b.startObject("properties"); + { + b.startObject("copy_test"); + { + b.field("type", "text"); + b.field("copy_to", "very.inner.field"); + } + b.endObject(); + } + b.endObject(); + })); + + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> docMapper.parse(source(b -> b.field("copy_test", "foo"))) + ); + + assertThat( + e.getMessage(), + startsWith("mapping set to strict_allow_templates, dynamic introduction of [very] within [_doc] is not allowed") + ); + } + public void testCopyToInnerStrictDynamicInnerObjectParsing() throws Exception { DocumentMapper docMapper = createDocumentMapper(mapping(b -> { diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java index ecab9da8c6b6c..15e2b6649b0be 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java @@ -878,6 +878,340 @@ public void testDynamicStrictDottedFieldNameLong() throws Exception { assertEquals("mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed", exception.getMessage()); } + public void testDynamicStrictAllowTemplatesDottedFieldNameLong() throws Exception { + DocumentMapper documentMapper = createDocumentMapper(topMapping(b -> b.field("dynamic", "strict_allow_templates"))); + StrictDynamicMappingException exception = expectThrows( + StrictDynamicMappingException.class, + () -> documentMapper.parse(source(b -> b.field("foo.bar.baz", 0))) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + DocumentMapper documentMapperWithDynamicTemplates = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("path_match", "foo.bar.baz"); + b.startObject("mapping").field("type", "long").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + exception = expectThrows( + StrictDynamicMappingException.class, + () -> documentMapperWithDynamicTemplates.parse(source(b -> b.field("foo.bar.baz", 0))) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "foo"); + b.field("match_mapping_type", "object"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + { + b.startObject(); + { + b.startObject("test1"); + { + b.field("match", "bar"); + b.field("match_mapping_type", "object"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + { + b.startObject(); + { + b.startObject("test2"); + { + b.field("path_match", "foo.bar.baz"); + b.startObject("mapping").field("type", "long").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(b -> b.field("foo.bar.baz", 0))); + assertEquals(2, doc.rootDoc().getFields("foo.bar.baz").length); + } + + public void testDynamicAllowTemplatesStrictLongArray() throws Exception { + DocumentMapper documentMapper = createDocumentMapper(topMapping(b -> b.field("dynamic", "strict_allow_templates"))); + StrictDynamicMappingException exception = expectThrows( + StrictDynamicMappingException.class, + () -> documentMapper.parse(source(b -> b.startArray("foo").value(0).value(1).endArray())) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + DocumentMapper documentMapperWithDynamicTemplates = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "test"); + b.startObject("mapping").field("type", "long").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + exception = expectThrows( + StrictDynamicMappingException.class, + () -> documentMapperWithDynamicTemplates.parse(source(b -> b.startArray("foo").value(0).value(1).endArray())) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "foo"); + b.startObject("mapping").field("type", "long").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(b -> b.startArray("foo").value(0).value(1).endArray())); + assertEquals(4, doc.rootDoc().getFields("foo").length); + } + + public void testDynamicStrictAllowTemplatesDottedFieldNameObject() throws Exception { + DocumentMapper documentMapper = createDocumentMapper(topMapping(b -> b.field("dynamic", "strict_allow_templates"))); + StrictDynamicMappingException exception = expectThrows( + StrictDynamicMappingException.class, + () -> documentMapper.parse(source(b -> b.startObject("foo.bar.baz").field("a", 0).endObject())) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + DocumentMapper documentMapperWithDynamicTemplates = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "test"); + b.startObject("mapping").field("type", "long").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + exception = expectThrows( + StrictDynamicMappingException.class, + () -> documentMapperWithDynamicTemplates.parse(source(b -> b.startObject("foo.bar.baz").field("a", 0).endObject())) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "foo"); + b.field("match_mapping_type", "object"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + { + b.startObject(); + { + b.startObject("test1"); + { + b.field("match", "bar"); + b.field("match_mapping_type", "object"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + { + b.startObject(); + { + b.startObject("test2"); + { + b.field("match", "baz"); + b.field("match_mapping_type", "object"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + { + b.startObject(); + { + b.startObject("test3"); + { + b.field("path_match", "foo.bar.baz.a"); + b.startObject("mapping").field("type", "long").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(b -> b.startObject("foo.bar.baz").field("a", 0).endObject())); + assertEquals(2, doc.rootDoc().getFields("foo.bar.baz.a").length); + } + + public void testDynamicStrictAllowTemplatesObject() throws Exception { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "test"); + b.field("match_mapping_type", "object"); + b.startObject("mapping").field("type", "object").endObject(); + } + b.endObject(); + } + b.endObject(); + } + { + b.startObject(); + { + b.startObject("test1"); + { + b.field("match", "test1"); + b.startObject("mapping").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + } + + )); + StrictDynamicMappingException exception = expectThrows( + StrictDynamicMappingException.class, + () -> mapper.parse(source(b -> b.startObject("foo").field("bar", "baz").endObject())) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [foo] within [_doc] is not allowed", + exception.getMessage() + ); + + ParsedDocument doc = mapper.parse(source(b -> b.startObject("test").field("test1", "baz").endObject())); + assertEquals(2, doc.rootDoc().getFields("test.test1").length); + } + + public void testDynamicStrictAllowTemplatesValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.field("dynamic", "strict_allow_templates"); + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "test*"); + b.field("match_mapping_type", "string"); + b.startObject("mapping").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + } + + )); + StrictDynamicMappingException exception = expectThrows( + StrictDynamicMappingException.class, + () -> mapper.parse(source(b -> b.field("bar", "baz"))) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [bar] within [_doc] is not allowed", + exception.getMessage() + ); + + ParsedDocument doc = mapper.parse(source(b -> b.field("test1", "baz"))); + assertEquals(2, doc.rootDoc().getFields("test1").length); + } + + public void testDynamicStrictAllowTemplatesNull() throws Exception { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> b.field("dynamic", "strict_allow_templates"))); + StrictDynamicMappingException exception = expectThrows( + StrictDynamicMappingException.class, + () -> mapper.parse(source(b -> b.nullField("bar"))) + ); + assertEquals( + "mapping set to strict_allow_templates, dynamic introduction of [bar] within [_doc] is not allowed", + exception.getMessage() + ); + } + public void testDynamicDottedFieldNameObject() throws Exception { DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); ParsedDocument doc = mapper.parse(source(b -> b.startObject("foo.bar.baz").field("a", 0).endObject())); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java index a22bfa5e845b1..0253caea9759d 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java @@ -380,6 +380,57 @@ public void testIndexOptions() throws IOException { } } + public void testPositionIncrementGapOnIndexPrefixField() throws IOException { + // test default position_increment_gap + MapperService mapperService = createMapperService( + fieldMapping(b -> b.field("type", "text").field("analyzer", "default").startObject("index_prefixes").endObject()) + ); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b 12" }))); + + withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 2, postings.nextPosition()); + }); + + withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field._index_prefix").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 2, postings.nextPosition()); + }); + + // test custom position_increment_gap + final int positionIncrementGap = randomIntBetween(1, 1000); + MapperService mapperService2 = createMapperService( + fieldMapping( + b -> b.field("type", "text") + .field("position_increment_gap", positionIncrementGap) + .field("analyzer", "default") + .startObject("index_prefixes") + .endObject() + ) + ); + ParsedDocument doc2 = mapperService2.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b 12" }))); + withLuceneIndex(mapperService2, iw -> iw.addDocument(doc2.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(positionIncrementGap + 2, postings.nextPosition()); + }); + withLuceneIndex(mapperService2, iw -> iw.addDocument(doc2.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field._index_prefix").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(positionIncrementGap + 2, postings.nextPosition()); + }); + } + public void testDefaultPositionIncrementGap() throws IOException { MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping)); ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b" }))); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java index 9c177bbec61fd..e672f94819541 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java @@ -167,6 +167,7 @@ public void testFuzzyQuery() { public void testIndexPrefixes() { TextFieldType ft = createFieldType(true); + ft.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft, "field._index_prefix", 2, 10)); Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, false, randomMockShardContext()); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index 280598c516c3c..1ec1e9977a9d5 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -571,13 +571,9 @@ public void testStatsObjectCreationViaStream() throws IOException { assertEquals((int) deserializedStats.uploadBytesStarted, (int) transferTrackerStats.uploadBytesStarted); assertEquals((int) deserializedStats.uploadBytesSucceeded, (int) transferTrackerStats.uploadBytesSucceeded); assertEquals((int) deserializedStats.uploadBytesFailed, (int) transferTrackerStats.uploadBytesFailed); - assertEquals((int) deserializedStats.uploadBytesMovingAverage, transferTrackerStats.uploadBytesMovingAverage, 0); - assertEquals( - (int) deserializedStats.uploadBytesPerSecMovingAverage, - transferTrackerStats.uploadBytesPerSecMovingAverage, - 0 - ); - assertEquals((int) deserializedStats.uploadTimeMovingAverage, transferTrackerStats.uploadTimeMovingAverage, 0); + assertEquals(deserializedStats.uploadBytesMovingAverage, transferTrackerStats.uploadBytesMovingAverage, 0); + assertEquals(deserializedStats.uploadBytesPerSecMovingAverage, transferTrackerStats.uploadBytesPerSecMovingAverage, 0); + assertEquals(deserializedStats.uploadTimeMovingAverage, transferTrackerStats.uploadTimeMovingAverage, 0); assertEquals((int) deserializedStats.totalUploadsStarted, (int) transferTrackerStats.totalUploadsStarted); assertEquals((int) deserializedStats.totalUploadsSucceeded, (int) transferTrackerStats.totalUploadsSucceeded); assertEquals((int) deserializedStats.totalUploadsFailed, (int) transferTrackerStats.totalUploadsFailed); diff --git a/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java b/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java index 2b40c01c61242..8ac457c32d53a 100644 --- a/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java +++ b/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java @@ -32,12 +32,17 @@ package org.opensearch.indices; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.tasks.TaskResultsService; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; @@ -67,7 +72,7 @@ public void testBasicOverlappingPatterns() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> SystemIndices.checkForOverlappingPatterns(descriptors) + () -> SystemIndexRegistry.checkForOverlappingPatterns(descriptors) ); assertThat( exception.getMessage(), @@ -104,7 +109,7 @@ public void testComplexOverlappingPatterns() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> SystemIndices.checkForOverlappingPatterns(descriptors) + () -> SystemIndexRegistry.checkForOverlappingPatterns(descriptors) ); assertThat( exception.getMessage(), @@ -133,4 +138,94 @@ public void testPluginCannotOverrideBuiltInSystemIndex() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SystemIndices(pluginMap)); assertThat(e.getMessage(), containsString("plugin or module attempted to define the same source")); } + + public void testSystemIndexMatching() { + SystemIndexPlugin plugin1 = new SystemIndexPlugin1(); + SystemIndexPlugin plugin2 = new SystemIndexPlugin2(); + SystemIndexPlugin plugin3 = new SystemIndexPatternPlugin(); + SystemIndices pluginSystemIndices = new SystemIndices( + Map.of( + SystemIndexPlugin1.class.getCanonicalName(), + plugin1.getSystemIndexDescriptors(Settings.EMPTY), + SystemIndexPlugin2.class.getCanonicalName(), + plugin2.getSystemIndexDescriptors(Settings.EMPTY), + SystemIndexPatternPlugin.class.getCanonicalName(), + plugin3.getSystemIndexDescriptors(Settings.EMPTY) + ) + ); + + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index1", ".system-index2"), + equalTo(List.of(SystemIndexPlugin1.SYSTEM_INDEX_1, SystemIndexPlugin2.SYSTEM_INDEX_2)) + ); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".system-index1"), equalTo(List.of(SystemIndexPlugin1.SYSTEM_INDEX_1))); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".system-index2"), equalTo(List.of(SystemIndexPlugin2.SYSTEM_INDEX_2))); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".system-index-pattern1"), equalTo(List.of(".system-index-pattern1"))); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index-pattern-sub*"), + equalTo(List.of(".system-index-pattern-sub*")) + ); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index-pattern1", ".system-index-pattern2"), + equalTo(List.of(".system-index-pattern1", ".system-index-pattern2")) + ); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index1", ".system-index-pattern1"), + equalTo(List.of(".system-index1", ".system-index-pattern1")) + ); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index1", ".system-index-pattern1", ".not-system"), + equalTo(List.of(".system-index1", ".system-index-pattern1")) + ); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".not-system"), equalTo(Collections.emptyList())); + } + + public void testRegisteredSystemIndexExpansion() { + SystemIndexPlugin plugin1 = new SystemIndexPlugin1(); + SystemIndexPlugin plugin2 = new SystemIndexPlugin2(); + SystemIndices pluginSystemIndices = new SystemIndices( + Map.of( + SystemIndexPlugin1.class.getCanonicalName(), + plugin1.getSystemIndexDescriptors(Settings.EMPTY), + SystemIndexPlugin2.class.getCanonicalName(), + plugin2.getSystemIndexDescriptors(Settings.EMPTY) + ) + ); + List systemIndices = SystemIndexRegistry.matchesSystemIndexPattern( + SystemIndexPlugin1.SYSTEM_INDEX_1, + SystemIndexPlugin2.SYSTEM_INDEX_2 + ); + assertEquals(2, systemIndices.size()); + assertTrue(systemIndices.containsAll(List.of(SystemIndexPlugin1.SYSTEM_INDEX_1, SystemIndexPlugin2.SYSTEM_INDEX_2))); + } + + static final class SystemIndexPlugin1 extends Plugin implements SystemIndexPlugin { + public static final String SYSTEM_INDEX_1 = ".system-index1"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + final SystemIndexDescriptor systemIndexDescriptor = new SystemIndexDescriptor(SYSTEM_INDEX_1, "System index 1"); + return Collections.singletonList(systemIndexDescriptor); + } + } + + static final class SystemIndexPlugin2 extends Plugin implements SystemIndexPlugin { + public static final String SYSTEM_INDEX_2 = ".system-index2"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + final SystemIndexDescriptor systemIndexDescriptor = new SystemIndexDescriptor(SYSTEM_INDEX_2, "System index 2"); + return Collections.singletonList(systemIndexDescriptor); + } + } + + static final class SystemIndexPatternPlugin extends Plugin implements SystemIndexPlugin { + public static final String SYSTEM_INDEX_PATTERN = ".system-index-pattern*"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + final SystemIndexDescriptor systemIndexDescriptor = new SystemIndexDescriptor(SYSTEM_INDEX_PATTERN, "System index pattern"); + return Collections.singletonList(systemIndexDescriptor); + } + } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index 43df482fcc2ae..15d0fcd10d701 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -57,7 +57,7 @@ import java.util.function.LongSupplier; import static org.opensearch.search.ResourceType.CPU; -import static org.opensearch.search.ResourceType.JVM; +import static org.opensearch.search.ResourceType.MEMORY; import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; @@ -102,7 +102,7 @@ public void testIsNodeInDuress() { EnumMap duressTrackers = new EnumMap<>(ResourceType.class) { { - put(ResourceType.JVM, heapUsageTracker); + put(ResourceType.MEMORY, heapUsageTracker); put(ResourceType.CPU, cpuUsageTracker); } }; @@ -233,7 +233,7 @@ public void testSearchTaskInFlightCancellation() { EnumMap duressTrackers = new EnumMap<>(ResourceType.class) { { - put(JVM, heapUsageTracker); + put(MEMORY, heapUsageTracker); put(CPU, mockNodeDuressTracker); } }; @@ -308,7 +308,7 @@ public void testSearchShardTaskInFlightCancellation() { EnumMap duressTrackers = new EnumMap<>(ResourceType.class) { { - put(JVM, new NodeDuressTracker(() -> false, () -> 3)); + put(MEMORY, new NodeDuressTracker(() -> false, () -> 3)); put(CPU, mockNodeDuressTracker); } }; @@ -401,7 +401,7 @@ public void testNonCancellationOfHeapBasedTasksWhenHeapNotInDuress() { EnumMap duressTrackers = new EnumMap<>(ResourceType.class) { { - put(JVM, new NodeDuressTracker(() -> false, () -> 3)); + put(MEMORY, new NodeDuressTracker(() -> false, () -> 3)); put(CPU, new NodeDuressTracker(() -> true, () -> 3)); } }; @@ -495,7 +495,7 @@ public void testNonCancellationWhenSearchTrafficIsNotQualifyingForCancellation() EnumMap duressTrackers = new EnumMap<>(ResourceType.class) { { - put(JVM, new NodeDuressTracker(() -> false, () -> 3)); + put(MEMORY, new NodeDuressTracker(() -> false, () -> 3)); put(CPU, new NodeDuressTracker(() -> true, () -> 3)); } }; diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java index 2db251ee461db..801576bdf89d4 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java @@ -19,7 +19,7 @@ public class NodeDuressTrackersTests extends OpenSearchTestCase { public void testNodeNotInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { - put(ResourceType.JVM, new NodeDuressTracker(() -> false, () -> 2)); + put(ResourceType.MEMORY, new NodeDuressTracker(() -> false, () -> 2)); put(ResourceType.CPU, new NodeDuressTracker(() -> false, () -> 2)); } }; @@ -34,7 +34,7 @@ public void testNodeNotInDuress() { public void testNodeInDuressWhenHeapInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { - put(ResourceType.JVM, new NodeDuressTracker(() -> true, () -> 3)); + put(ResourceType.MEMORY, new NodeDuressTracker(() -> true, () -> 3)); put(ResourceType.CPU, new NodeDuressTracker(() -> false, () -> 1)); } }; @@ -51,7 +51,7 @@ public void testNodeInDuressWhenHeapInDuress() { public void testNodeInDuressWhenCPUInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { - put(ResourceType.JVM, new NodeDuressTracker(() -> false, () -> 1)); + put(ResourceType.MEMORY, new NodeDuressTracker(() -> false, () -> 1)); put(ResourceType.CPU, new NodeDuressTracker(() -> true, () -> 3)); } }; @@ -68,7 +68,7 @@ public void testNodeInDuressWhenCPUInDuress() { public void testNodeInDuressWhenCPUAndHeapInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { - put(ResourceType.JVM, new NodeDuressTracker(() -> true, () -> 3)); + put(ResourceType.MEMORY, new NodeDuressTracker(() -> true, () -> 3)); put(ResourceType.CPU, new NodeDuressTracker(() -> false, () -> 3)); } }; diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index a532bf0c6287b..a3c2932be64c4 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.53.v20231009' + 'jetty': '9.4.55.v20240627' ] dependencies { @@ -52,6 +52,8 @@ dependencies { exclude module: "logback-classic" exclude module: "avro" exclude group: 'org.apache.kerby' + exclude group: 'com.nimbusds' + exclude module: "commons-configuration2" } api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" @@ -73,7 +75,14 @@ dependencies { api "commons-net:commons-net:3.11.1" api "ch.qos.logback:logback-core:1.5.6" api "ch.qos.logback:logback-classic:1.2.13" - api 'org.apache.kerby:kerb-admin:2.0.3' + api "org.jboss.xnio:xnio-nio:3.8.16.Final" + api 'org.jline:jline:3.26.2' + api 'org.apache.commons:commons-configuration2:2.11.0' + api 'com.nimbusds:nimbus-jose-jwt:9.40' + api ('org.apache.kerby:kerb-admin:2.0.3') { + exclude group: "org.jboss.xnio" + exclude group: "org.jline" + } runtimeOnly "com.google.guava:guava:${versions.guava}" runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { exclude group: "com.squareup.okio" diff --git a/test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java b/test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java new file mode 100644 index 0000000000000..ac32b8d227eda --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.Version; +import org.opensearch.cluster.AbstractNamedDiffable; +import org.opensearch.cluster.ClusterState; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +public abstract class TestClusterStateCustom extends AbstractNamedDiffable implements ClusterState.Custom { + + private final String value; + + protected TestClusterStateCustom(String value) { + this.value = value; + } + + protected TestClusterStateCustom(StreamInput in) throws IOException { + this.value = in.readString(); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + + @Override + public boolean isPrivate() { + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TestClusterStateCustom that = (TestClusterStateCustom) o; + + if (!value.equals(that.value)) return false; + + return true; + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index b7c31685bafa6..8c612d258f183 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -708,11 +708,15 @@ protected void refreshAllIndices() throws IOException { requestOptions.setWarningsHandler(warnings -> { if (warnings.isEmpty()) { return false; - } else if (warnings.size() > 1) { - return true; - } else { - return warnings.get(0).startsWith("this request accesses system indices:") == false; } + boolean allSystemIndexWarnings = true; + for (String warning : warnings) { + if (!warning.startsWith("this request accesses system indices:")) { + allSystemIndexWarnings = false; + break; + } + } + return !allSystemIndexWarnings; }); refreshRequest.setOptions(requestOptions); client().performRequest(refreshRequest);