diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 077064e33187c..a8b97a110d19a 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -115,7 +115,7 @@ dependencies { api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.68' + api 'com.networknt:json-schema-validator:1.0.69' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index d164b54c7506c..2a0521b17d55e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -125,18 +125,21 @@ public String call() throws Exception { // Add git origin info to generated POM files publication.getPom().withXml(PublishPlugin::addScmInfo); - // have to defer this until archivesBaseName is set - project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); + if (!publication.getName().toLowerCase().contains("zip")) { - // publish sources and javadoc for Java projects. - if (project.getPluginManager().hasPlugin("opensearch.java")) { - publication.artifact(project.getTasks().getByName("sourcesJar")); - publication.artifact(project.getTasks().getByName("javadocJar")); - } + // have to defer this until archivesBaseName is set + project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); + + // publish sources and javadoc for Java projects. + if (project.getPluginManager().hasPlugin("opensearch.java")) { + publication.artifact(project.getTasks().getByName("sourcesJar")); + publication.artifact(project.getTasks().getByName("javadocJar")); + } - generatePomTask.configure( - t -> t.dependsOn(String.format("generatePomFileFor%sPublication", Util.capitalize(publication.getName()))) - ); + generatePomTask.configure( + t -> t.dependsOn(String.format("generatePomFileFor%sPublication", Util.capitalize(publication.getName()))) + ); + } }); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java new file mode 100644 index 0000000000000..e8b4ecec7a56d --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.gradle.pluginzip; + +import java.util.*; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.publish.PublishingExtension; +import org.gradle.api.publish.maven.MavenPublication; +import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; +import java.nio.file.Path; + +public class Publish implements Plugin { + private Project project; + + public final static String EXTENSION_NAME = "zipmavensettings"; + public final static String PUBLICATION_NAME = "pluginZip"; + public final static String STAGING_REPO = "zipStaging"; + public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; + public final static String LOCALMAVEN = "publishToMavenLocal"; + public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; + public String zipDistributionLocation = "/build/distributions/"; + + public static void configMaven(Project project) { + final Path buildDirectory = project.getRootDir().toPath(); + project.getPluginManager().apply(MavenPublishPlugin.class); + project.getExtensions().configure(PublishingExtension.class, publishing -> { + publishing.repositories(repositories -> { + repositories.maven(maven -> { + maven.setName(STAGING_REPO); + maven.setUrl(buildDirectory.toString() + LOCAL_STAGING_REPO_PATH); + }); + }); + publishing.publications(publications -> { + publications.create(PUBLICATION_NAME, MavenPublication.class, mavenZip -> { + String zipGroup = "org.opensearch.plugin"; + String zipArtifact = project.getName(); + String zipVersion = getProperty("version", project); + mavenZip.artifact(project.getTasks().named("bundlePlugin")); + mavenZip.setGroupId(zipGroup); + mavenZip.setArtifactId(zipArtifact); + mavenZip.setVersion(zipVersion); + }); + }); + }); + } + + static String getProperty(String name, Project project) { + if (project.hasProperty(name)) { + Object property = project.property(name); + if (property != null) { + return property.toString(); + } + } + return null; + } + + @Override + public void apply(Project project) { + this.project = project; + project.afterEvaluate(evaluatedProject -> { configMaven(project); }); + project.getGradle().getTaskGraph().whenReady(graph -> { + if (graph.hasTask(LOCALMAVEN)) { + project.getTasks().getByName(PLUGIN_ZIP_PUBLISH_POM_TASK).setEnabled(false); + } + + }); + } +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties new file mode 100644 index 0000000000000..600218ff76835 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties @@ -0,0 +1 @@ +implementation-class=org.opensearch.gradle.pluginzip.Publish diff --git a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt index aeb5e25decf62..03dead38bd8b4 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt @@ -26,4 +26,4 @@ com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly te org.junit.Test @defaultMessage Just name your test method testFooBar -java.lang.Math#random() @ Use one of the various randomization methods from LuceneTestCase or ESTestCase for reproducibility +java.lang.Math#random() @ Use one of the various randomization methods from LuceneTestCase or OpenSearchTestCase for reproducibility diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java new file mode 100644 index 0000000000000..ae94ace55e637 --- /dev/null +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle.pluginzip; + +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testfixtures.ProjectBuilder; +import org.gradle.api.Project; +import org.opensearch.gradle.test.GradleUnitTestCase; +import org.junit.Test; +import java.io.IOException; +import org.gradle.api.publish.maven.tasks.PublishToMavenRepository; +import java.io.File; +import org.gradle.testkit.runner.BuildResult; +import java.io.FileWriter; +import java.io.Writer; +import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; +import static org.junit.Assert.assertEquals; +import java.nio.file.Files; +import org.apache.maven.model.Model; +import org.apache.maven.model.io.xpp3.MavenXpp3Reader; +import org.codehaus.plexus.util.xml.pull.XmlPullParserException; +import java.io.FileReader; +import org.gradle.api.tasks.bundling.Zip; + +public class PublishTests extends GradleUnitTestCase { + + @Test + public void testZipPublish() throws IOException, XmlPullParserException { + Project project = ProjectBuilder.builder().build(); + String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; + // Apply the opensearch.pluginzip plugin + project.getPluginManager().apply("opensearch.pluginzip"); + // Check if the plugin has been applied to the project + assertTrue(project.getPluginManager().hasPlugin("opensearch.pluginzip")); + // Check if the project has the task from class PublishToMavenRepository after plugin apply + assertNotNull(project.getTasks().withType(PublishToMavenRepository.class)); + // Create a mock bundlePlugin task + Zip task = project.getTasks().create("bundlePlugin", Zip.class); + Publish.configMaven(project); + // Check if the main task publishPluginZipPublicationToZipStagingRepository exists after plugin apply + assertTrue(project.getTasks().getNames().contains(zipPublishTask)); + assertNotNull("Task to generate: ", project.getTasks().getByName(zipPublishTask)); + // Run Gradle functional tests, but calling a build.gradle file, that resembles the plugin publish behavior + File projectDir = new File("build/functionalTest"); + // Create a sample plugin zip file + File sampleZip = new File("build/functionalTest/sample-plugin.zip"); + Files.createDirectories(projectDir.toPath()); + Files.createFile(sampleZip.toPath()); + writeString(new File(projectDir, "settings.gradle"), ""); + // Generate the build.gradle file + String buildFileContent = "apply plugin: 'maven-publish' \n" + + "publishing {\n" + + " repositories {\n" + + " maven {\n" + + " url = 'local-staging-repo/'\n" + + " name = 'zipStaging'\n" + + " }\n" + + " }\n" + + " publications {\n" + + " pluginZip(MavenPublication) {\n" + + " groupId = 'org.opensearch.plugin' \n" + + " artifactId = 'sample-plugin' \n" + + " version = '2.0.0.0' \n" + + " artifact('sample-plugin.zip') \n" + + " }\n" + + " }\n" + + "}"; + writeString(new File(projectDir, "build.gradle"), buildFileContent); + // Execute the task publishPluginZipPublicationToZipStagingRepository + GradleRunner runner = GradleRunner.create(); + runner.forwardOutput(); + runner.withPluginClasspath(); + runner.withArguments(zipPublishTask); + runner.withProjectDir(projectDir); + BuildResult result = runner.build(); + // Check if task publishMavenzipPublicationToZipstagingRepository has ran well + assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); + // check if the zip has been published to local staging repo + assertTrue( + new File("build/functionalTest/local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") + .exists() + ); + // Parse the maven file and validate the groupID to org.opensearch.plugin + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader("build/functionalTest/local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + ); + assertEquals(model.getGroupId(), "org.opensearch.plugin"); + } + + private void writeString(File file, String string) throws IOException { + try (Writer writer = new FileWriter(file)) { + writer.write(string); + } + } + +} diff --git a/client/benchmark/README.md b/client/benchmark/README.md index ee99a1384d27e..2732586b9e575 100644 --- a/client/benchmark/README.md +++ b/client/benchmark/README.md @@ -29,7 +29,7 @@ Example invocation: wget http://benchmarks.elasticsearch.org.s3.amazonaws.com/corpora/geonames/documents-2.json.bz2 bzip2 -d documents-2.json.bz2 mv documents-2.json client/benchmark/build -gradlew -p client/benchmark run --args ' rest bulk localhost build/documents-2.json geonames type 8647880 5000' +gradlew -p client/benchmark run --args ' rest bulk localhost build/documents-2.json geonames 8647880 5000' ``` The parameters are all in the `'`s and are in order: @@ -39,7 +39,6 @@ The parameters are all in the `'`s and are in order: * Benchmark target host IP (the host where OpenSearch is running) * full path to the file that should be bulk indexed * name of the index -* name of the (sole) type in the index * number of documents in the file * bulk size diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java index de9d075cb9a16..ab0a0d6b8a19c 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java @@ -49,7 +49,7 @@ public abstract class AbstractBenchmark { protected abstract T client(String benchmarkTargetHost) throws Exception; - protected abstract BulkRequestExecutor bulkRequestExecutor(T client, String indexName, String typeName); + protected abstract BulkRequestExecutor bulkRequestExecutor(T client, String indexName); protected abstract SearchRequestExecutor searchRequestExecutor(T client, String indexName); @@ -76,16 +76,15 @@ public final void run(String[] args) throws Exception { @SuppressForbidden(reason = "system out is ok for a command line tool") private void runBulkIndexBenchmark(String[] args) throws Exception { - if (args.length != 7) { - System.err.println("usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize"); + if (args.length != 6) { + System.err.println("usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName numberOfDocuments bulkSize"); System.exit(1); } String benchmarkTargetHost = args[1]; String indexFilePath = args[2]; String indexName = args[3]; - String typeName = args[4]; - int totalDocs = Integer.valueOf(args[5]); - int bulkSize = Integer.valueOf(args[6]); + int totalDocs = Integer.valueOf(args[4]); + int bulkSize = Integer.valueOf(args[5]); int totalIterationCount = (int) Math.floor(totalDocs / bulkSize); // consider 40% of all iterations as warmup iterations @@ -97,7 +96,7 @@ private void runBulkIndexBenchmark(String[] args) throws Exception { BenchmarkRunner benchmark = new BenchmarkRunner( warmupIterations, iterations, - new BulkBenchmarkTask(bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations, iterations, bulkSize) + new BulkBenchmarkTask(bulkRequestExecutor(client, indexName), indexFilePath, warmupIterations, iterations, bulkSize) ); try { diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java index 073fd5eab5c46..d2d7163b8dee2 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java @@ -65,8 +65,8 @@ protected RestClient client(String benchmarkTargetHost) { } @Override - protected BulkRequestExecutor bulkRequestExecutor(RestClient client, String indexName, String typeName) { - return new RestBulkRequestExecutor(client, indexName, typeName); + protected BulkRequestExecutor bulkRequestExecutor(RestClient client, String indexName) { + return new RestBulkRequestExecutor(client, indexName); } @Override @@ -78,9 +78,9 @@ private static final class RestBulkRequestExecutor implements BulkRequestExecuto private final RestClient client; private final String actionMetadata; - RestBulkRequestExecutor(RestClient client, String index, String type) { + RestBulkRequestExecutor(RestClient client, String index) { this.client = client; - this.actionMetadata = String.format(Locale.ROOT, "{ \"index\" : { \"_index\" : \"%s\", \"_type\" : \"%s\" } }%n", index, type); + this.actionMetadata = String.format(Locale.ROOT, "{ \"index\" : { \"_index\" : \"%s\" } }%n", index); } @Override @@ -91,7 +91,7 @@ public boolean bulkIndex(List bulkData) { bulkRequestBody.append(bulkItem); bulkRequestBody.append("\n"); } - Request request = new Request("POST", "/geonames/type/_noop_bulk"); + Request request = new Request("POST", "/geonames/_noop_bulk"); request.setJsonEntity(bulkRequestBody.toString()); try { Response response = client.performRequest(request); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 7a6227a7c2ec2..afecdc3eea1a3 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -895,8 +895,8 @@ Params withFields(String[] fields) { * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #withClusterManagerTimeout(TimeValue)} */ @Deprecated - Params withMasterTimeout(TimeValue masterTimeout) { - return putParam("master_timeout", masterTimeout); + Params withMasterTimeout(TimeValue clusterManagerTimeout) { + return putParam("master_timeout", clusterManagerTimeout); } Params withClusterManagerTimeout(TimeValue clusterManagerTimeout) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index e69ca149d697d..d293b979debb5 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -1917,6 +1917,10 @@ private Cancellable internalPerformRequestAsync( ActionListener listener, Set ignores ) { + if (listener == null) { + throw new IllegalArgumentException("The listener is required and cannot be null"); + } + Request req; try { req = requestConverter.apply(request); @@ -2069,7 +2073,7 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - XContentType xContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); if (xContentType == null) { throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index 3310425df4662..b5e7209a5212b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -47,7 +47,7 @@ public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); private TimeValue timeout = DEFAULT_ACK_TIMEOUT; - private TimeValue masterTimeout = DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerTimeout = DEFAULT_MASTER_NODE_TIMEOUT; /** * Sets the timeout to wait for the all the nodes to acknowledge @@ -58,11 +58,11 @@ public void setTimeout(TimeValue timeout) { } /** - * Sets the timeout to connect to the master node - * @param masterTimeout timeout as a {@link TimeValue} + * Sets the timeout to connect to the cluster-manager node + * @param clusterManagerTimeout timeout as a {@link TimeValue} */ - public void setMasterTimeout(TimeValue masterTimeout) { - this.masterTimeout = masterTimeout; + public void setMasterTimeout(TimeValue clusterManagerTimeout) { + this.clusterManagerTimeout = clusterManagerTimeout; } /** @@ -73,9 +73,9 @@ public TimeValue timeout() { } /** - * Returns the timeout for the request to be completed on the master node + * Returns the timeout for the request to be completed on the cluster-manager node */ public TimeValue masterNodeTimeout() { - return masterTimeout; + return clusterManagerTimeout; } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java index f70682fee3763..ba9702fd6f2f2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java @@ -44,7 +44,7 @@ public class GetComponentTemplatesRequest implements Validatable { private final String name; - private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; private boolean local = false; /** @@ -65,23 +65,23 @@ public String name() { } /** - * @return the timeout for waiting for the master node to respond + * @return the timeout for waiting for the cluster-manager node to respond */ public TimeValue getMasterNodeTimeout() { - return masterNodeTimeout; + return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) { - this.masterNodeTimeout = masterNodeTimeout; + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(String masterNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); + public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); setMasterNodeTimeout(timeValue); } /** - * @return true if this request is to read from the local cluster state, rather than the master node - false otherwise + * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ public boolean isLocal() { return local; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java index 572a5eeec2d23..cc8e820d5929f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java @@ -44,7 +44,7 @@ public class GetComposableIndexTemplateRequest implements Validatable { private final String name; - private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; private boolean local = false; /** @@ -65,23 +65,23 @@ public String name() { } /** - * @return the timeout for waiting for the master node to respond + * @return the timeout for waiting for the cluster-manager node to respond */ public TimeValue getMasterNodeTimeout() { - return masterNodeTimeout; + return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) { - this.masterNodeTimeout = masterNodeTimeout; + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(String masterNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); + public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); setMasterNodeTimeout(timeValue); } /** - * @return true if this request is to read from the local cluster state, rather than the master node - false otherwise + * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ public boolean isLocal() { return local; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java index 5e5ab6aeae305..c5ef5cb9c1795 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java @@ -82,9 +82,9 @@ public final GetIndexRequest local(boolean local) { } /** - * Return local information, do not retrieve the state from master node (default: false). + * Return local information, do not retrieve the state from cluster-manager node (default: false). * @return true if local information is to be returned; - * false if information is to be retrieved from master node (default). + * false if information is to be retrieved from cluster-manager node (default). */ public final boolean local() { return local; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java index 071bcc7a75a71..f46af130cc9b0 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java @@ -51,7 +51,7 @@ public class GetIndexTemplatesRequest implements Validatable { private final List names; - private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; private boolean local = false; /** @@ -84,23 +84,23 @@ public List names() { } /** - * @return the timeout for waiting for the master node to respond + * @return the timeout for waiting for the cluster-manager node to respond */ public TimeValue getMasterNodeTimeout() { - return masterNodeTimeout; + return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) { - this.masterNodeTimeout = masterNodeTimeout; + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(String masterNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); + public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); setMasterNodeTimeout(timeValue); } /** - * @return true if this request is to read from the local cluster state, rather than the master node - false otherwise + * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ public boolean isLocal() { return local; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index ed0a973081b62..ec6847630dc92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -89,9 +89,9 @@ public void testClusterHealth() { ClusterHealthRequest healthRequest = new ClusterHealthRequest(); Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomLocal(healthRequest::local, expectedParams); - String timeoutType = OpenSearchTestCase.randomFrom("timeout", "masterTimeout", "both", "none"); + String timeoutType = OpenSearchTestCase.randomFrom("timeout", "clusterManagerTimeout", "both", "none"); String timeout = OpenSearchTestCase.randomTimeValue(); - String masterTimeout = OpenSearchTestCase.randomTimeValue(); + String clusterManagerTimeout = OpenSearchTestCase.randomTimeValue(); switch (timeoutType) { case "timeout": healthRequest.timeout(timeout); @@ -99,10 +99,10 @@ public void testClusterHealth() { // If Cluster Manager Timeout wasn't set it uses the same value as Timeout expectedParams.put("cluster_manager_timeout", timeout); break; - case "masterTimeout": + case "clusterManagerTimeout": expectedParams.put("timeout", "30s"); - healthRequest.masterNodeTimeout(masterTimeout); - expectedParams.put("cluster_manager_timeout", masterTimeout); + healthRequest.masterNodeTimeout(clusterManagerTimeout); + expectedParams.put("cluster_manager_timeout", clusterManagerTimeout); break; case "both": healthRequest.timeout(timeout); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 66581fdc42c2b..0415b864ba35e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -806,7 +806,7 @@ public void testUpdate() throws IOException { UpdateRequest parsedUpdateRequest = new UpdateRequest(); - XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(entity.getContentType().getValue()); try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { parsedUpdateRequest.fromXContent(parser); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 7766fa76d5cfe..efcc13921c398 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -284,6 +284,20 @@ public ActionRequestValidationException validate() { } } + public void testNullableActionListener() { + ActionRequest request = new ActionRequest() { + @Override + public ActionRequestValidationException validate() { + return null; + } + }; + + assertThrows( + IllegalArgumentException.class, + () -> restHighLevelClient.performRequestAsync(request, null, RequestOptions.DEFAULT, null, null, null) + ); + } + public void testParseEntity() throws IOException { { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(null, null)); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java index 3026472bb8e53..659238debccad 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java @@ -48,10 +48,10 @@ public void testNonDefaults() { TimedRequest timedRequest = new TimedRequest() { }; TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); - TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + TimeValue clusterManagerTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); timedRequest.setTimeout(timeout); - timedRequest.setMasterTimeout(masterTimeout); + timedRequest.setMasterTimeout(clusterManagerTimeout); assertEquals(timedRequest.timeout(), timeout); - assertEquals(timedRequest.masterNodeTimeout(), masterTimeout); + assertEquals(timedRequest.masterNodeTimeout(), clusterManagerTimeout); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java index c96e296891fb9..5bfb0abab9f37 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java @@ -80,10 +80,10 @@ public void testTimeout() { final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); request.setTimeout(timeout); - final TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); - request.setMasterTimeout(masterTimeout); + final TimeValue clusterManagerTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + request.setMasterTimeout(clusterManagerTimeout); assertEquals(request.timeout(), timeout); - assertEquals(request.masterNodeTimeout(), masterTimeout); + assertEquals(request.masterNodeTimeout(), clusterManagerTimeout); } } diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index c982ae8eb931f..952823cf29d6c 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -210,7 +210,7 @@ public Roles(final Set roles) { } /** - * Returns whether or not the node could be elected master. + * Returns whether or not the node could be elected cluster-manager. */ public boolean isMasterEligible() { return roles.contains("master") || roles.contains("cluster_manager"); diff --git a/client/rest/src/main/java/org/opensearch/client/NodeSelector.java b/client/rest/src/main/java/org/opensearch/client/NodeSelector.java index 09d5a2c1fe576..1d1c09f33fef7 100644 --- a/client/rest/src/main/java/org/opensearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/NodeSelector.java @@ -36,7 +36,7 @@ /** * Selects nodes that can receive requests. Used to keep requests away - * from master nodes or to send them to nodes with a particular attribute. + * from cluster-manager nodes or to send them to nodes with a particular attribute. * Use with {@link RestClientBuilder#setNodeSelector(NodeSelector)}. */ public interface NodeSelector { @@ -80,10 +80,10 @@ public String toString() { /** * Selector that matches any node that has metadata and doesn't - * have the {@code master} role OR it has the data {@code data} + * have the {@code cluster_manager} role OR it has the data {@code data} * role. */ - NodeSelector SKIP_DEDICATED_MASTERS = new NodeSelector() { + NodeSelector SKIP_DEDICATED_CLUSTER_MANAGERS = new NodeSelector() { @Override public void select(Iterable nodes) { for (Iterator itr = nodes.iterator(); itr.hasNext();) { diff --git a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java index f7cb0733bb8c5..65a831e59bfb0 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java @@ -55,33 +55,33 @@ public void testAny() { assertEquals(expected, nodes); } - public void testNotMasterOnly() { - Node masterOnly = dummyNode(true, false, false); + public void testNotClusterManagerOnly() { + Node clusterManagerOnly = dummyNode(true, false, false); Node all = dummyNode(true, true, true); - Node masterAndData = dummyNode(true, true, false); - Node masterAndIngest = dummyNode(true, false, true); + Node clusterManagerAndData = dummyNode(true, true, false); + Node clusterManagerAndIngest = dummyNode(true, false, true); Node coordinatingOnly = dummyNode(false, false, false); Node ingestOnly = dummyNode(false, false, true); Node data = dummyNode(false, true, randomBoolean()); List nodes = new ArrayList<>(); - nodes.add(masterOnly); + nodes.add(clusterManagerOnly); nodes.add(all); - nodes.add(masterAndData); - nodes.add(masterAndIngest); + nodes.add(clusterManagerAndData); + nodes.add(clusterManagerAndIngest); nodes.add(coordinatingOnly); nodes.add(ingestOnly); nodes.add(data); Collections.shuffle(nodes, getRandom()); List expected = new ArrayList<>(nodes); - expected.remove(masterOnly); - NodeSelector.SKIP_DEDICATED_MASTERS.select(nodes); + expected.remove(clusterManagerOnly); + NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS.select(nodes); assertEquals(expected, nodes); } - private static Node dummyNode(boolean master, boolean data, boolean ingest) { + private static Node dummyNode(boolean clusterManager, boolean data, boolean ingest) { final Set roles = new TreeSet<>(); - if (master) { - roles.add("master"); + if (clusterManager) { + roles.add("cluster_manager"); } if (data) { roles.add("data"); @@ -98,4 +98,33 @@ private static Node dummyNode(boolean master, boolean data, boolean ingest) { Collections.>emptyMap() ); } + + /* + * Validate SKIP_DEDICATED_CLUSTER_MANAGERS can filter both the deprecated "master" role and the new "cluster_manager" role. + * The test is a modified copy of the above testNotClusterManagerOnly(). + */ + public void testDeprecatedNotMasterOnly() { + Node clusterManagerOnly = dummyNode(true, false, false); + Node all = dummyNode(true, true, true); + Node data = dummyNode(false, true, randomBoolean()); + Node deprecatedMasterOnly = new Node( + new HttpHost("dummy"), + Collections.emptySet(), + randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Roles(Collections.singleton("master")), + Collections.emptyMap() + ); + List nodes = new ArrayList<>(); + nodes.add(clusterManagerOnly); + nodes.add(all); + nodes.add(data); + nodes.add(deprecatedMasterOnly); + Collections.shuffle(nodes, getRandom()); + List expected = new ArrayList<>(nodes); + expected.remove(clusterManagerOnly); + expected.remove(deprecatedMasterOnly); + NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS.select(nodes); + assertEquals(expected, nodes); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java index 0b7d2881ccb54..d88d4f4afd9b1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java @@ -297,7 +297,7 @@ public void testNodeSelector() throws Exception { } public void testSetNodes() throws Exception { - RestClient restClient = createRestClient(NodeSelector.SKIP_DEDICATED_MASTERS); + RestClient restClient = createRestClient(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); List newNodes = new ArrayList<>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { Node.Roles roles = i == 0 diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 82c4fc2896213..066419844f048 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -133,7 +133,7 @@ public void usage() throws IOException, InterruptedException { //tag::rest-client-init-node-selector RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200, "http")); - builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); // <1> + builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); // <1> //end::rest-client-init-node-selector } { diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 93a82ff324835..cd0cf6b9db64d 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -355,7 +355,7 @@ tasks.register('buildDeb', Deb) { } tasks.register('buildNoJdkDeb', Deb) { - configure(commonDebConfig(true, 'x64')) + configure(commonDebConfig(false, 'x64')) } Closure commonRpmConfig(boolean jdk, String architecture) { diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index 61719282c1ca2..7f8af147e236c 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -11,6 +11,10 @@ apply plugin: 'jacoco' repositories { mavenCentral() gradlePluginPortal() + // TODO: Find the way to use the repositories from RepositoriesSetupPlugin + maven { + url = "https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/" + } } allprojects { diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java new file mode 100644 index 0000000000000..5cfc52b20bfc5 --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.xcontent; + +/** + * Abstracts a Media Type and a format parameter. + * Media types are used as values on Content-Type and Accept headers + * format is an URL parameter, specifies response media type. + */ +public interface MediaType { + /** + * Returns a type part of a MediaType + * i.e. application for application/json + */ + String type(); + + /** + * Returns a subtype part of a MediaType. + * i.e. json for application/json + */ + String subtype(); + + /** + * Returns a corresponding format for a MediaType. i.e. json for application/json media type + * Can differ from the MediaType's subtype i.e plain/text has a subtype of text but format is txt + */ + String format(); + + /** + * returns a string representation of a media type. + */ + default String typeWithSubtype() { + return type() + "/" + subtype(); + } +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java new file mode 100644 index 0000000000000..cbd3589f5b500 --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.xcontent; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +public class MediaTypeParser { + private final Map formatToMediaType; + private final Map typeWithSubtypeToMediaType; + + public MediaTypeParser(T[] acceptedMediaTypes) { + this(acceptedMediaTypes, Map.of()); + } + + public MediaTypeParser(T[] acceptedMediaTypes, Map additionalMediaTypes) { + final int size = acceptedMediaTypes.length + additionalMediaTypes.size(); + Map formatMap = new HashMap<>(size); + Map typeMap = new HashMap<>(size); + for (T mediaType : acceptedMediaTypes) { + typeMap.put(mediaType.typeWithSubtype(), mediaType); + formatMap.put(mediaType.format(), mediaType); + } + for (Map.Entry entry : additionalMediaTypes.entrySet()) { + String typeWithSubtype = entry.getKey(); + T mediaType = entry.getValue(); + + typeMap.put(typeWithSubtype.toLowerCase(Locale.ROOT), mediaType); + formatMap.put(mediaType.format(), mediaType); + } + + this.formatToMediaType = Map.copyOf(formatMap); + this.typeWithSubtypeToMediaType = Map.copyOf(typeMap); + } + + public T fromMediaType(String mediaType) { + ParsedMediaType parsedMediaType = parseMediaType(mediaType); + return parsedMediaType != null ? parsedMediaType.getMediaType() : null; + } + + public T fromFormat(String format) { + if (format == null) { + return null; + } + return formatToMediaType.get(format.toLowerCase(Locale.ROOT)); + } + + /** + * parsing media type that follows https://tools.ietf.org/html/rfc7231#section-3.1.1.1 + * @param headerValue a header value from Accept or Content-Type + * @return a parsed media-type + */ + public ParsedMediaType parseMediaType(String headerValue) { + if (headerValue != null) { + String[] split = headerValue.toLowerCase(Locale.ROOT).split(";"); + + String[] typeSubtype = split[0].trim().split("/"); + if (typeSubtype.length == 2) { + String type = typeSubtype[0]; + String subtype = typeSubtype[1]; + T xContentType = typeWithSubtypeToMediaType.get(type + "/" + subtype); + if (xContentType != null) { + Map parameters = new HashMap<>(); + for (int i = 1; i < split.length; i++) { + // spaces are allowed between parameters, but not between '=' sign + String[] keyValueParam = split[i].trim().split("="); + if (keyValueParam.length != 2 || hasSpaces(keyValueParam[0]) || hasSpaces(keyValueParam[1])) { + return null; + } + parameters.put(keyValueParam[0], keyValueParam[1]); + } + return new ParsedMediaType(xContentType, parameters); + } + } + + } + return null; + } + + private boolean hasSpaces(String s) { + return s.trim().equals(s) == false; + } + + /** + * A media type object that contains all the information provided on a Content-Type or Accept header + */ + public class ParsedMediaType { + private final Map parameters; + private final T mediaType; + + public ParsedMediaType(T mediaType, Map parameters) { + this.parameters = parameters; + this.mediaType = mediaType; + } + + public T getMediaType() { + return mediaType; + } + + public Map getParameters() { + return parameters; + } + } +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java index b0986d603ef23..1c745f591e38b 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java @@ -38,12 +38,12 @@ import org.opensearch.common.xcontent.yaml.YamlXContent; import java.util.Locale; -import java.util.Objects; +import java.util.Map; /** * The content type of {@link org.opensearch.common.xcontent.XContent}. */ -public enum XContentType { +public enum XContentType implements MediaType { /** * A JSON based content type. @@ -60,7 +60,7 @@ public String mediaType() { } @Override - public String shortName() { + public String subtype() { return "json"; } @@ -79,7 +79,7 @@ public String mediaTypeWithoutParameters() { } @Override - public String shortName() { + public String subtype() { return "smile"; } @@ -98,7 +98,7 @@ public String mediaTypeWithoutParameters() { } @Override - public String shortName() { + public String subtype() { return "yaml"; } @@ -117,7 +117,7 @@ public String mediaTypeWithoutParameters() { } @Override - public String shortName() { + public String subtype() { return "cbor"; } @@ -127,34 +127,42 @@ public XContent xContent() { } }; + /** a parser of media types */ + private static final MediaTypeParser MEDIA_TYPE_PARSER = new MediaTypeParser<>( + XContentType.values(), + Map.of("application/*", JSON, "application/x-ndjson", JSON) + ); + + /** gets the {@link MediaTypeParser} singleton for use outside class */ + @SuppressWarnings("rawtypes") + public static MediaTypeParser getMediaTypeParser() { + return MEDIA_TYPE_PARSER; + } + /** - * Accepts either a format string, which is equivalent to {@link XContentType#shortName()} or a media type that optionally has - * parameters and attempts to match the value to an {@link XContentType}. The comparisons are done in lower case format and this method - * also supports a wildcard accept for {@code application/*}. This method can be used to parse the {@code Accept} HTTP header or a - * format query string parameter. This method will return {@code null} if no match is found + * Accepts a format string, which is most of the time is equivalent to {@link XContentType#subtype()} + * and attempts to match the value to an {@link XContentType}. + * The comparisons are done in lower case format. + * This method will return {@code null} if no match is found */ - public static XContentType fromMediaTypeOrFormat(String mediaType) { - if (mediaType == null) { - return null; - } - - mediaType = removeVersionInMediaType(mediaType); - for (XContentType type : values()) { - if (isSameMediaTypeOrFormatAs(mediaType, type)) { - return type; - } - } - final String lowercaseMediaType = mediaType.toLowerCase(Locale.ROOT); - if (lowercaseMediaType.startsWith("application/*")) { - return JSON; - } + public static XContentType fromFormat(String mediaType) { + return MEDIA_TYPE_PARSER.fromFormat(mediaType); + } - return null; + /** + * Attempts to match the given media type with the known {@link XContentType} values. This match is done in a case-insensitive manner. + * The provided media type can optionally has parameters. + * This method is suitable for parsing of the {@code Content-Type} and {@code Accept} HTTP headers. + * This method will return {@code null} if no match is found + */ + public static XContentType fromMediaType(String mediaTypeHeaderValue) { + mediaTypeHeaderValue = removeVersionInMediaType(mediaTypeHeaderValue); + return MEDIA_TYPE_PARSER.fromMediaType(mediaTypeHeaderValue); } /** * Clients compatible with ES 7.x might start sending media types with versioned media type - * in a form of application/vnd.opensearch+json;compatible-with=7. + * in a form of application/vnd.elasticsearch+json;compatible-with=7. * This has to be removed in order to be used in 7.x server. * The same client connecting using that media type will be able to communicate with ES 8 thanks to compatible API. * @param mediaType - a media type used on Content-Type header, might contain versioned media type. @@ -162,38 +170,12 @@ public static XContentType fromMediaTypeOrFormat(String mediaType) { * @return a media type string without */ private static String removeVersionInMediaType(String mediaType) { - if (mediaType.contains("vnd.opensearch")) { + if (mediaType != null && (mediaType = mediaType.toLowerCase(Locale.ROOT)).contains("vnd.opensearch")) { return mediaType.replaceAll("vnd.opensearch\\+", "").replaceAll("\\s*;\\s*compatible-with=\\d+", ""); } return mediaType; } - /** - * Attempts to match the given media type with the known {@link XContentType} values. This match is done in a case-insensitive manner. - * The provided media type should not include any parameters. This method is suitable for parsing part of the {@code Content-Type} - * HTTP header. This method will return {@code null} if no match is found - */ - public static XContentType fromMediaType(String mediaType) { - final String lowercaseMediaType = Objects.requireNonNull(mediaType, "mediaType cannot be null").toLowerCase(Locale.ROOT); - for (XContentType type : values()) { - if (type.mediaTypeWithoutParameters().equals(lowercaseMediaType)) { - return type; - } - } - // we also support newline delimited JSON: http://specs.okfnlabs.org/ndjson/ - if (lowercaseMediaType.toLowerCase(Locale.ROOT).equals("application/x-ndjson")) { - return XContentType.JSON; - } - - return null; - } - - private static boolean isSameMediaTypeOrFormatAs(String stringType, XContentType type) { - return type.mediaTypeWithoutParameters().equalsIgnoreCase(stringType) - || stringType.toLowerCase(Locale.ROOT).startsWith(type.mediaTypeWithoutParameters().toLowerCase(Locale.ROOT) + ";") - || type.shortName().equalsIgnoreCase(stringType); - } - private int index; XContentType(int index) { @@ -208,10 +190,17 @@ public String mediaType() { return mediaTypeWithoutParameters(); } - public abstract String shortName(); - public abstract XContent xContent(); public abstract String mediaTypeWithoutParameters(); + @Override + public String type() { + return "application"; + } + + @Override + public String format() { + return subtype(); + } } diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java new file mode 100644 index 0000000000000..06dbd4ebd24dc --- /dev/null +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.xcontent; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class MediaTypeParserTests extends OpenSearchTestCase { + + @SuppressWarnings("unchecked") + MediaTypeParser mediaTypeParser = XContentType.getMediaTypeParser(); + + public void testJsonWithParameters() throws Exception { + String mediaType = "application/json"; + assertThat(mediaTypeParser.parseMediaType(mediaType).getParameters(), equalTo(Collections.emptyMap())); + assertThat(mediaTypeParser.parseMediaType(mediaType + ";").getParameters(), equalTo(Collections.emptyMap())); + assertThat(mediaTypeParser.parseMediaType(mediaType + "; charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8"))); + assertThat( + mediaTypeParser.parseMediaType(mediaType + "; custom=123;charset=UTF-8").getParameters(), + equalTo(Map.of("charset", "utf-8", "custom", "123")) + ); + } + + public void testWhiteSpaceInTypeSubtype() { + String mediaType = " application/json "; + assertThat(mediaTypeParser.parseMediaType(mediaType).getMediaType(), equalTo(XContentType.JSON)); + + assertThat( + mediaTypeParser.parseMediaType(mediaType + "; custom=123; charset=UTF-8").getParameters(), + equalTo(Map.of("charset", "utf-8", "custom", "123")) + ); + assertThat( + mediaTypeParser.parseMediaType(mediaType + "; custom=123;\n charset=UTF-8").getParameters(), + equalTo(Map.of("charset", "utf-8", "custom", "123")) + ); + + mediaType = " application / json "; + assertThat(mediaTypeParser.parseMediaType(mediaType), is(nullValue())); + } + + public void testInvalidParameters() { + String mediaType = "application/json"; + assertThat(mediaTypeParser.parseMediaType(mediaType + "; keyvalueNoEqualsSign"), is(nullValue())); + + assertThat(mediaTypeParser.parseMediaType(mediaType + "; key = value"), is(nullValue())); + assertThat(mediaTypeParser.parseMediaType(mediaType + "; key="), is(nullValue())); + } +} diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a18f18cea185e..d5bbd23325cd0 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -59,7 +59,7 @@ dependencies { api 'io.projectreactor:reactor-core:3.4.17' api 'io.projectreactor.netty:reactor-netty:1.0.17' api 'io.projectreactor.netty:reactor-netty-core:1.0.16' - api 'io.projectreactor.netty:reactor-netty-http:1.0.16' + api 'io.projectreactor.netty:reactor-netty-http:1.0.18' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 deleted file mode 100644 index d737315b06b62..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93edb9a1dc774d843551a616e0f316e11ffa81ed \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 new file mode 100644 index 0000000000000..43599c0b6c691 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 @@ -0,0 +1 @@ +a34930cbd46b53ffdb19d2089605f39589eb2b99 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 02ac822f94995..0a1e0bde3af2f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.20.0' + api 'com.google.protobuf:protobuf-java:3.20.1' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 deleted file mode 100644 index c5b0169ce0dba..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c72ddaaab7ffafe789e4f732c1fd614eb798bf4 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 new file mode 100644 index 0000000000000..1ebc9838b7bea --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 @@ -0,0 +1 @@ +5472700cd39a46060efbd35e29cb36b3fb89517b \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-2.0.0-rc1.md b/release-notes/opensearch.release-notes-2.0.0-rc1.md new file mode 100644 index 0000000000000..5171424203c62 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.0.0-rc1.md @@ -0,0 +1,628 @@ +## 2022-04-26 Version 2.0.0-rc1 Release Notes + + +### Breaking Changes in 2.0 + +#### Remove Mapping types + +* Remove type mapping from document index API ([#2026](https://github.com/opensearch-project/OpenSearch/pull/2026)) +* [Remove] Type mapping parameter from document update API ([#2204](https://github.com/opensearch-project/OpenSearch/pull/2204)) +* [Remove] Types from DocWrite Request and Response ([#2239](https://github.com/opensearch-project/OpenSearch/pull/2239)) +* [Remove] Types from GET/MGET ([#2168](https://github.com/opensearch-project/OpenSearch/pull/2168)) +* [Remove] types from SearchHit and Explain API ([#2205](https://github.com/opensearch-project/OpenSearch/pull/2205)) +* [Remove] type support from Bulk API ([#2215](https://github.com/opensearch-project/OpenSearch/pull/2215)) +* Remove type end-points from no-op bulk and search action ([#2261](https://github.com/opensearch-project/OpenSearch/pull/2261)) +* Remove type end-points from search and related APIs ([#2263](https://github.com/opensearch-project/OpenSearch/pull/2263)) +* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) +* Removes type mappings from mapping APIs ([#2238](https://github.com/opensearch-project/OpenSearch/pull/2238)) +* Remove type end-points from count action ([#2379](https://github.com/opensearch-project/OpenSearch/pull/2379)) +* Remove type from validate query API ([#2255](https://github.com/opensearch-project/OpenSearch/pull/2255)) +* [Remove] Type parameter from TermVectors API ([#2104](https://github.com/opensearch-project/OpenSearch/pull/2104)) +* [Remove] types from rest-api-spec endpoints (#2689) ([#2698](https://github.com/opensearch-project/OpenSearch/pull/2698)) +* [Type removal] Remove deprecation warning on use of _type in doc scripts (#2564) ([#2568](https://github.com/opensearch-project/OpenSearch/pull/2568)) +* [Remove] Types from PutIndexTemplateRequest and builder to reduce mapping to a string ([#2510](https://github.com/opensearch-project/OpenSearch/pull/2510)) +* [Remove] Type from Percolate query API ([#2490](https://github.com/opensearch-project/OpenSearch/pull/2490)) +* [Remove] types from CreateIndexRequest and companion Builder's mapping method ([#2498](https://github.com/opensearch-project/OpenSearch/pull/2498)) +* [Remove] Type from PutIndexTemplateRequest and PITRB ([#2497](https://github.com/opensearch-project/OpenSearch/pull/2497)) +* [Remove] Type metadata from ingest documents ([#2491](https://github.com/opensearch-project/OpenSearch/pull/2491)) +* [Remove] type from CIR.mapping and CIRB.mapping ([#2478](https://github.com/opensearch-project/OpenSearch/pull/2478)) +* [Remove] types based addMapping method from CreateIndexRequest and Builder ([#2460](https://github.com/opensearch-project/OpenSearch/pull/2460)) +* [Remove] type from TaskResults index and IndexMetadata.getMappings ([#2469](https://github.com/opensearch-project/OpenSearch/pull/2469)) +* [Remove] Type query ([#2448](https://github.com/opensearch-project/OpenSearch/pull/2448)) +* [Remove] Type from TermsLookUp ([#2459](https://github.com/opensearch-project/OpenSearch/pull/2459)) +* [Remove] types from Uid and remaining types/Uid from translog ([#2450](https://github.com/opensearch-project/OpenSearch/pull/2450)) +* [Remove] types from translog ([#2439](https://github.com/opensearch-project/OpenSearch/pull/2439)) +* [Remove] Multiple Types from IndexTemplateMetadata ([#2400](https://github.com/opensearch-project/OpenSearch/pull/2400)) +* Remove inclue_type_name parameter from rest api spec ([#2410](https://github.com/opensearch-project/OpenSearch/pull/2410)) +* [Remove] include_type_name from HLRC ([#2397](https://github.com/opensearch-project/OpenSearch/pull/2397)) +* [Remove] Type mappings from GeoShapeQueryBuilder ([#2322](https://github.com/opensearch-project/OpenSearch/pull/2322)) +* [Remove] types from PutMappingRequest ([#2335](https://github.com/opensearch-project/OpenSearch/pull/2335)) +* [Remove] deprecated getMapping API from IndicesClient ([#2262](https://github.com/opensearch-project/OpenSearch/pull/2262)) +* [Remove] remaining type usage in Client and AbstractClient ([#2258](https://github.com/opensearch-project/OpenSearch/pull/2258)) +* [Remove] Type from Client.prepare(Index,Delete,Update) ([#2253](https://github.com/opensearch-project/OpenSearch/pull/2253)) +* [Remove] Type Specific Index Stats ([#2198](https://github.com/opensearch-project/OpenSearch/pull/2198)) +* [Remove] Type from Search Internals ([#2109](https://github.com/opensearch-project/OpenSearch/pull/2109)) + + +#### Upgrades + +* [Upgrade] 1.2 BWC to Lucene 8.10.1 ([#1460](https://github.com/opensearch-project/OpenSearch/pull/1460)) +* [Upgrade] Lucene 9.1 release (#2560) ([#2565](https://github.com/opensearch-project/OpenSearch/pull/2565)) +* [Upgrade] Lucene 9.1.0-snapshot-ea989fe8f30 ([#2487](https://github.com/opensearch-project/OpenSearch/pull/2487)) +* [Upgrade] Lucene 9.0.0 release ([#1109](https://github.com/opensearch-project/OpenSearch/pull/1109)) +* Set target and source compatibility to 11, required by Lucene 9. ([#2407](https://github.com/opensearch-project/OpenSearch/pull/2407)) +* Upgrade to Lucene 8.10.1 ([#1440](https://github.com/opensearch-project/OpenSearch/pull/1440)) +* Upgrade to Lucene 8.9 ([#1080](https://github.com/opensearch-project/OpenSearch/pull/1080)) +* Update lucene version to 8.8.2 ([#557](https://github.com/opensearch-project/OpenSearch/pull/557)) +* Support Gradle 7. Fixing 'eclipse' plugin dependencies ([#1648](https://github.com/opensearch-project/OpenSearch/pull/1648)) +* Update to Gradle 7.3.3 ([#1803](https://github.com/opensearch-project/OpenSearch/pull/1803)) +* Support Gradle 7. More reliable tasks dependencies for Maven plugins publishing ([#1630](https://github.com/opensearch-project/OpenSearch/pull/1630)) +* Support Gradle 7. Fixing publishing to Maven Local for plugins ([#1624](https://github.com/opensearch-project/OpenSearch/pull/1624)) +* Support Gradle 7 ([#1609](https://github.com/opensearch-project/OpenSearch/pull/1609)) + +#### Deprecations + +* [Remove] Deprecated Synced Flush API ([#1761](https://github.com/opensearch-project/OpenSearch/pull/1761)) +* Remove deprecated search.remote settings ([#1870](https://github.com/opensearch-project/OpenSearch/pull/1870)) +* [Remove] Default Mapping ([#2151](https://github.com/opensearch-project/OpenSearch/pull/2151)) +* Remove Deprecated SimpleFS ([#1639](https://github.com/opensearch-project/OpenSearch/pull/1639)) +* [Remove] Deprecated Zen1 Discovery ([#1216](https://github.com/opensearch-project/OpenSearch/pull/1216)) +* Remove LegacyESVersion.V_6_8_x constants ([#1869](https://github.com/opensearch-project/OpenSearch/pull/1869)) +* Remove LegacyESVersion.V_6_7_x constants ([#1807](https://github.com/opensearch-project/OpenSearch/pull/1807)) +* Remove LegacyESVersion.V_6_6_x constants ([#1804](https://github.com/opensearch-project/OpenSearch/pull/1804)) +* Remove LegacyESVersion.V_6_5_x constants ([#1794](https://github.com/opensearch-project/OpenSearch/pull/1794)) +* Remove deprecated transport client ([#1781](https://github.com/opensearch-project/OpenSearch/pull/1781)) +* Remove LegacyVersion.v6.4.x constants ([#1787](https://github.com/opensearch-project/OpenSearch/pull/1787)) +* Remove LegacyESVersion.V_6_3_x constants ([#1691](https://github.com/opensearch-project/OpenSearch/pull/1691)) +* Remove LegacyESVersion.V_6_2_x constants ([#1686](https://github.com/opensearch-project/OpenSearch/pull/1686)) +* Remove LegacyESVersion.V_6_1_x constants ([#1681](https://github.com/opensearch-project/OpenSearch/pull/1681)) +* Remove 6.0.* version constants ([#1658](https://github.com/opensearch-project/OpenSearch/pull/1658)) +* [Remove] 6x skip from yml ([#2153](https://github.com/opensearch-project/OpenSearch/pull/2153)) + +### Security Fixes + +* [CVE] Upgrade dependencies for Azure related plugins to mitigate CVEs ([#688](https://github.com/opensearch-project/OpenSearch/pull/688)) +* [CVE] Upgrade dependencies to mitigate CVEs ([#657](https://github.com/opensearch-project/OpenSearch/pull/657)) +* [CVE-2018-11765] Upgrade hadoop dependencies for hdfs plugin ([#654](https://github.com/opensearch-project/OpenSearch/pull/654)) +* [CVE-2020-7692] Upgrade google-oauth clients for goolge cloud plugins ([#662](https://github.com/opensearch-project/OpenSearch/pull/662)) +* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 (#2599) ([#2647](https://github.com/opensearch-project/OpenSearch/pull/2647)) +* Remove old ES libraries used in reindex due to CVEs ([#1359](https://github.com/opensearch-project/OpenSearch/pull/1359)) + +### Features/Enhancements + +* Allowing custom folder name for plugin installation ([#848](https://github.com/opensearch-project/OpenSearch/pull/848)) +* A CLI tool to assist during an upgrade to OpenSearch. ([#846](https://github.com/opensearch-project/OpenSearch/pull/846)) +* Enable adding experimental features through sandbox modules ([#691](https://github.com/opensearch-project/OpenSearch/pull/691)) +* Rank feature - unknown field linear ([#983](https://github.com/opensearch-project/OpenSearch/pull/983)) +* [FEATURE] Add OPENSEARCH_JAVA_HOME env to override JAVA_HOME ([#2001](https://github.com/opensearch-project/OpenSearch/pull/2001)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs (#2682) ([#2891](https://github.com/opensearch-project/OpenSearch/pull/2891)) +* Change deprecation message for API parameter value 'master_node' of parameter 'metric' (#2880) ([#2882](https://github.com/opensearch-project/OpenSearch/pull/2882)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs (#2680) ([#2871](https://github.com/opensearch-project/OpenSearch/pull/2871)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs (#2678) ([#2867](https://github.com/opensearch-project/OpenSearch/pull/2867)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs (#2660) ([#2771](https://github.com/opensearch-project/OpenSearch/pull/2771)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs (#2658) ([#2755](https://github.com/opensearch-project/OpenSearch/pull/2755)) +* [Backport 2.0] Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Add 'cluster_manager_node' into ClusterState Metric as an alternative to 'master_node' ([#2415](https://github.com/opensearch-project/OpenSearch/pull/2415)) +* Add a new node role 'cluster_manager' as the alternative for 'master' role and deprecate 'master' role ([#2424](https://github.com/opensearch-project/OpenSearch/pull/2424)) +* Replace 'master' with 'cluster_manager' in 'GET Cat Nodes' API ([#2441](https://github.com/opensearch-project/OpenSearch/pull/2441)) +* Replace 'discovered_master' with 'discovered_cluster_manager' in 'GET Cat Health' API ([#2438](https://github.com/opensearch-project/OpenSearch/pull/2438)) +* Add a field discovered_cluster_manager in get cluster health api ([#2437](https://github.com/opensearch-project/OpenSearch/pull/2437)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT Nodes API ([#2435](https://github.com/opensearch-project/OpenSearch/pull/2435)) +* Add a new REST API endpoint 'GET _cat/cluster_manager' as the replacement of 'GET _cat/master' ([#2404](https://github.com/opensearch-project/OpenSearch/pull/2404)) +* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) +* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) +* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) +* Replace remaining 'blacklist' with 'denylist' in internal class and method names (#2784) ([#2813](https://github.com/opensearch-project/OpenSearch/pull/2813)) +* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API (#2670) ([#2696](https://github.com/opensearch-project/OpenSearch/pull/2696)) +* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' (#2702) ([#2741](https://github.com/opensearch-project/OpenSearch/pull/2741)) +* Replaced "master" terminology in Log message (#2575) ([#2594](https://github.com/opensearch-project/OpenSearch/pull/2594)) +* Deprecate setting 'reindex.remote.whitelist' and introduce the alternative setting 'reindex.remote.allowlist' ([#2221](https://github.com/opensearch-project/OpenSearch/pull/2221)) +* Replace exclusionary words whitelist and blacklist in the places that won't impact backwards compatibility ([#2178](https://github.com/opensearch-project/OpenSearch/pull/2178)) +* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) +* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) +* Add '_name' field support to score functions and provide it back in explanation response ([#2244](https://github.com/opensearch-project/OpenSearch/pull/2244)) +* Add support of SOCKS proxies for S3 repository ([#2160](https://github.com/opensearch-project/OpenSearch/pull/2160)) +* Case Insensitive Support in Regexp Interval ([#2237](https://github.com/opensearch-project/OpenSearch/pull/2237)) +* Support unordered non-overlapping intervals ([#2103](https://github.com/opensearch-project/OpenSearch/pull/2103)) +* Support _first and _last parameter for missing bucket ordering in composite aggregation ([#1942](https://github.com/opensearch-project/OpenSearch/pull/1942)) +* Concurrent Searching (Experimental): modify profiling implementation to support concurrent data collection ([#1673](https://github.com/opensearch-project/OpenSearch/pull/1673)) +* Changes to support retrieval of operations from translog based on specified range ([#1210](https://github.com/opensearch-project/OpenSearch/pull/1210)) +* Support for translog pruning based on retention leases ([#1038](https://github.com/opensearch-project/OpenSearch/pull/1038)) +* Support for bwc tests for plugins ([#1051](https://github.com/opensearch-project/OpenSearch/pull/1051)) +* Part 1: Support for cancel_after_timeinterval parameter in search and msearch request ([#986](https://github.com/opensearch-project/OpenSearch/pull/986)) +* alt bash path support ([#1047](https://github.com/opensearch-project/OpenSearch/pull/1047)) +* Support Data Streams in OpenSearch ([#690](https://github.com/opensearch-project/OpenSearch/pull/690)) +* Support for Heap after GC stats (correction after backport to 1.2.0) ([#1315](https://github.com/opensearch-project/OpenSearch/pull/1315)) +* Support for Heap after GC stats ([#1265](https://github.com/opensearch-project/OpenSearch/pull/1265)) +* Add deprecated API for creating History Ops Snapshot from translog (#2886) ([#2917](https://github.com/opensearch-project/OpenSearch/pull/2917)) +* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) +* Add default for EnginePlugin.getEngineFactory ([#2419](https://github.com/opensearch-project/OpenSearch/pull/2419)) +* Add valuesField in PercentilesAggregationBuilder streamInput constructor ([#2308](https://github.com/opensearch-project/OpenSearch/pull/2308)) +* Reintroduce negative epoch_millis #1991 ([#2232](https://github.com/opensearch-project/OpenSearch/pull/2232)) +* Install plugin command help ([#2193](https://github.com/opensearch-project/OpenSearch/pull/2193)) +* Always use Lucene index in peer recovery ([#2077](https://github.com/opensearch-project/OpenSearch/pull/2077)) +* Add Factory to enable Lucene ConcatenateGraphFilter (#1278) ([#2152](https://github.com/opensearch-project/OpenSearch/pull/2152)) +* Add proxy settings for GCS repository ([#2096](https://github.com/opensearch-project/OpenSearch/pull/2096)) +* Add proxy username and password settings for Azure repository ([#2098](https://github.com/opensearch-project/OpenSearch/pull/2098)) +* Add regexp interval source ([#1917](https://github.com/opensearch-project/OpenSearch/pull/1917)) +* Delay the request size calculation until required by the indexing pressure framework ([#1592](https://github.com/opensearch-project/OpenSearch/pull/1592)) +* Enabling Sort Optimization to make use of Lucene ([#1974](https://github.com/opensearch-project/OpenSearch/pull/1974)) +* Add max_expansions option to wildcard interval ([#1916](https://github.com/opensearch-project/OpenSearch/pull/1916)) +* Prefer adaptive replica selection over awareness attribute based routing ([#1107](https://github.com/opensearch-project/OpenSearch/pull/1107)) +* Prioritize primary shard movement during shard allocation ([#1445](https://github.com/opensearch-project/OpenSearch/pull/1445)) +* Enforce soft deletes ([#1903](https://github.com/opensearch-project/OpenSearch/pull/1903)) +* Make SortBuilders pluggable ([#1856](https://github.com/opensearch-project/OpenSearch/pull/1856)) +* Use try-with-resources with MockLogAppender ([#1595](https://github.com/opensearch-project/OpenSearch/pull/1595)) +* Bridging the gap in network overhead measurement in the profiler ([#1360](https://github.com/opensearch-project/OpenSearch/pull/1360)) +* Adding a cancelled field to tell if a cancellable task is cancelled ([#1732](https://github.com/opensearch-project/OpenSearch/pull/1732)) +* Avoid logging duplicate deprecation warnings multiple times ([#1660](https://github.com/opensearch-project/OpenSearch/pull/1660)) +* Added more detailed logging for SSLHandshakeException ([#1602](https://github.com/opensearch-project/OpenSearch/pull/1602)) +* Rename field_masking_span to span_field_masking ([#1606](https://github.com/opensearch-project/OpenSearch/pull/1606)) +* Giving informative error messages for double slashes in API call URLs ([#1568](https://github.com/opensearch-project/OpenSearch/pull/1568)) +* Renaming slave to replica in filebeat-6.0.template.json file. ([#1569](https://github.com/opensearch-project/OpenSearch/pull/1569)) +* Enable RestHighLevel-Client to set parameter require_alias for bulk index and reindex requests ([#1533](https://github.com/opensearch-project/OpenSearch/pull/1533)) +* Improve leader node-left logging to indicate timeout/coordination state rejection ([#1584](https://github.com/opensearch-project/OpenSearch/pull/1584)) +* Added logic to allow {dot} files on startup ([#1437](https://github.com/opensearch-project/OpenSearch/pull/1437)) +* remove codeQL warning about implicit narrowing conversion in compound assignment ([#1403](https://github.com/opensearch-project/OpenSearch/pull/1403)) +* Make TranslogDeletionPolicy abstract for extension ([#1456](https://github.com/opensearch-project/OpenSearch/pull/1456)) +* Remove deprecated settings and logic for translog pruning by retention lease. ([#1416](https://github.com/opensearch-project/OpenSearch/pull/1416)) +* Adjust CodeCache size to eliminate JVM warnings (and crashes) ([#1426](https://github.com/opensearch-project/OpenSearch/pull/1426)) +* Add extension point for custom TranslogDeletionPolicy in EnginePlugin. ([#1404](https://github.com/opensearch-project/OpenSearch/pull/1404)) +* Update node attribute check to version update (1.2) check for shard indexing pressure serialization. ([#1395](https://github.com/opensearch-project/OpenSearch/pull/1395)) +* Add EngineConfig extensions to EnginePlugin ([#1387](https://github.com/opensearch-project/OpenSearch/pull/1387)) +* Add Shard Level Indexing Pressure ([#1336](https://github.com/opensearch-project/OpenSearch/pull/1336)) +* Making GeneralScriptException an Implementation of OpensearchWrapperException ([#1066](https://github.com/opensearch-project/OpenSearch/pull/1066)) +* Handle shard over allocation during partial zone/rack or independent node failures ([#1149](https://github.com/opensearch-project/OpenSearch/pull/1149)) +* Introduce FS Health HEALTHY threshold to fail stuck node ([#1167](https://github.com/opensearch-project/OpenSearch/pull/1167)) +* Drop mocksocket in favour of custom security manager checks (tests only) ([#1205](https://github.com/opensearch-project/OpenSearch/pull/1205)) +* Improving the Grok circular reference check to prevent stack overflow ([#1079](https://github.com/opensearch-project/OpenSearch/pull/1079)) +* Introduce replaceRoutes() method and 2 new constructors to RestHandler.java ([#947](https://github.com/opensearch-project/OpenSearch/pull/947)) +* Fail fast when BytesRestResponse ctor throws exception ([#923](https://github.com/opensearch-project/OpenSearch/pull/923)) +* Restricting logs permissions ([#966](https://github.com/opensearch-project/OpenSearch/pull/966)) +* Avoid override of routes() in BaseRestHandler to respect the default behavior defined in RestHandler ([#889](https://github.com/opensearch-project/OpenSearch/pull/889)) +* Replacing docs-beta links with /docs ([#957](https://github.com/opensearch-project/OpenSearch/pull/957)) +* Adding broken links checker ([#877](https://github.com/opensearch-project/OpenSearch/pull/877)) +* Pass interceptor to super constructor ([#876](https://github.com/opensearch-project/OpenSearch/pull/876)) +* Add 'tagline' back to MainResponse in server that was removed in PR #427 ([#913](https://github.com/opensearch-project/OpenSearch/pull/913)) +* Remove distribution from main response in compatibility mode ([#898](https://github.com/opensearch-project/OpenSearch/pull/898)) +* Replace metadata keys in OpenSearchException during serialization and deserialization ([#905](https://github.com/opensearch-project/OpenSearch/pull/905)) +* Add cluster setting to spoof version number returned from MainResponse ([#847](https://github.com/opensearch-project/OpenSearch/pull/847)) +* Add URL for lucene snapshots ([#858](https://github.com/opensearch-project/OpenSearch/pull/858)) +* Decouple throttling limits for new and old indices. ([#778](https://github.com/opensearch-project/OpenSearch/pull/778)) +* Verbose plugin not found exception ([#849](https://github.com/opensearch-project/OpenSearch/pull/849)) +* Enable BWC checks ([#796](https://github.com/opensearch-project/OpenSearch/pull/796)) +* Add a method to use fallback setting to set the memory size ([#755](https://github.com/opensearch-project/OpenSearch/pull/755)) +* An allocation constraint mechanism, that de-prioritizes nodes from getting picked for allocation if they breach certain constraints ([#680](https://github.com/opensearch-project/OpenSearch/pull/680)) +* Create group settings with fallback. ([#743](https://github.com/opensearch-project/OpenSearch/pull/743)) +* Add timeout on cat/stats API ([#552](https://github.com/opensearch-project/OpenSearch/pull/552)) +* Make allocation decisions at node level first for pending task optimi… ([#534](https://github.com/opensearch-project/OpenSearch/pull/534)) +* Decouples primaries_recoveries limit from concurrent recoveries limit. ([#546](https://github.com/opensearch-project/OpenSearch/pull/546)) +* Merging javadoc feature branch changes to main ([#715](https://github.com/opensearch-project/OpenSearch/pull/715)) +* Add read_only block argument to opensearch-node unsafe-bootstrap command ([#599](https://github.com/opensearch-project/OpenSearch/pull/599)) +* Catch runtime exceptions to make class loader race conditions easier to debug. ([#608](https://github.com/opensearch-project/OpenSearch/pull/608)) +* Remove URL content from Reindex error response ([#630](https://github.com/opensearch-project/OpenSearch/pull/630)) +* Standardize int, long, double and float Setting constructors. ([#665](https://github.com/opensearch-project/OpenSearch/pull/665)) +* Add Remote Reindex SPI extension ([#547](https://github.com/opensearch-project/OpenSearch/pull/547)) +* Make default number of shards configurable ([#625](https://github.com/opensearch-project/OpenSearch/pull/625)) +* Converted all .asciidoc to .md. ([#658](https://github.com/opensearch-project/OpenSearch/pull/658)) +* Make -Dtests.output=always actually work. ([#648](https://github.com/opensearch-project/OpenSearch/pull/648)) +* Handle inefficiencies while fetching the delayed unassigned shards during cluster health ([#588](https://github.com/opensearch-project/OpenSearch/pull/588)) +* Replace elastic.co with opensearch.org ([#611](https://github.com/opensearch-project/OpenSearch/pull/611)) +* Speedup lang-painless tests ([#605](https://github.com/opensearch-project/OpenSearch/pull/605)) +* Speedup snapshot stale indices delete ([#613](https://github.com/opensearch-project/OpenSearch/pull/613)) +* Speed ups to test suite and precommit tasks. ([#580](https://github.com/opensearch-project/OpenSearch/pull/580)) +* [Versioning] Rebase to OpenSearch version 1.0.0 ([#555](https://github.com/opensearch-project/OpenSearch/pull/555)) +* Prevent setting maxParallelForks=0 on single-cpu machines ([#558](https://github.com/opensearch-project/OpenSearch/pull/558)) +* Use alternate example data in OpenSearch test cases. ([#454](https://github.com/opensearch-project/OpenSearch/pull/454)) + +### Bug Fixes + +* Adding a null pointer check to fix index_prefix query (#2879) ([#2903](https://github.com/opensearch-project/OpenSearch/pull/2903)) +* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check (#2779) ([#2794](https://github.com/opensearch-project/OpenSearch/pull/2794)) +* [Bug] Fix InboundDecoder version compat check (#2570) ([#2573](https://github.com/opensearch-project/OpenSearch/pull/2573)) +* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) +* Fixing the --release flag usage for javac (#2343) ([#2352](https://github.com/opensearch-project/OpenSearch/pull/2352)) +* Fix flaky test case - string profiler via global ordinals ([#2226](https://github.com/opensearch-project/OpenSearch/pull/2226)) +* Fixing the indentation in version.yml ([#2163](https://github.com/opensearch-project/OpenSearch/pull/2163)) +* Fixing org.opensearch.monitor.os.OsProbeTests::testLogWarnCpuMessageOnlyOnes when CGroups are not available ([#2101](https://github.com/opensearch-project/OpenSearch/pull/2101)) +* Fix integration tests failure ([#2067](https://github.com/opensearch-project/OpenSearch/pull/2067)) +* Another attempt to fix o.o.transport.netty4.OpenSearchLoggingHandlerIT fails w/ stack overflow ([#2051](https://github.com/opensearch-project/OpenSearch/pull/2051)) +* Fix AssertionError message ([#2044](https://github.com/opensearch-project/OpenSearch/pull/2044)) +* Fix composite aggregation failed test cases introduce by missing_order parameter (#1942) ([#2005](https://github.com/opensearch-project/OpenSearch/pull/2005)) +* Fixing allocation filters to persist existing state on settings update ([#1718](https://github.com/opensearch-project/OpenSearch/pull/1718)) +* Fix more failing tests as a result of renaming ([#457](https://github.com/opensearch-project/OpenSearch/pull/457)) +* Fix failing rest-api-spec tests as part of renaming. ([#451](https://github.com/opensearch-project/OpenSearch/pull/451)) +* Fix multiple failing server tests. ([#453](https://github.com/opensearch-project/OpenSearch/pull/453)) +* [TEST] Fix FsHealthServiceTest by increasing the timeout period before checking the FS health after restoring the FS status ([#1813](https://github.com/opensearch-project/OpenSearch/pull/1813)) +* [BUG] Wait for outstanding requests to complete in LastSuccessfulSett… ([#1939](https://github.com/opensearch-project/OpenSearch/pull/1939)) +* [Bug] Wait for outstanding requests to complete ([#1925](https://github.com/opensearch-project/OpenSearch/pull/1925)) +* [BUG] Serialization bugs can cause node drops ([#1885](https://github.com/opensearch-project/OpenSearch/pull/1885)) +* [BUG] Docker distribution builds are failing. Switching to http://vault.centos.org ([#2024](https://github.com/opensearch-project/OpenSearch/pull/2024)) +* [BUG] SymbolicLinkPreservingUntarTransform fails on Windows ([#1433](https://github.com/opensearch-project/OpenSearch/pull/1433)) +* [BUG] ConcurrentSnapshotsIT#testAssertMultipleSnapshotsAndPrimaryFailOver fails intermittently ([#1311](https://github.com/opensearch-project/OpenSearch/pull/1311)) +* [Bug] Fix InstallPluginCommand to use proper key signatures ([#1233](https://github.com/opensearch-project/OpenSearch/pull/1233)) +* [Bug] Fix mixed cluster support for OpenSearch 2+ ([#1191](https://github.com/opensearch-project/OpenSearch/pull/1191)) +* [BUG] Fix cat.health test failures in pre 1.0.0 mixed cluster test ([#928](https://github.com/opensearch-project/OpenSearch/pull/928)) +* [BUG] Fix versioning issues discovered through version bump ([#884](https://github.com/opensearch-project/OpenSearch/pull/884)) +* [BUG] fix MainResponse to spoof version number for legacy clients ([#708](https://github.com/opensearch-project/OpenSearch/pull/708)) +* [Bug] Fix gradle build on Windows failing from a recent change ([#758](https://github.com/opensearch-project/OpenSearch/pull/758)) +* Apply fix for health API response to distinguish no master ([#656](https://github.com/opensearch-project/OpenSearch/pull/656)) +* Rename translog pruning setting to CCR specific setting and addressed Bug in the test case ([#1243](https://github.com/opensearch-project/OpenSearch/pull/1243)) +* fix gradle check fail due to renameing -min in #1094 ([#1289](https://github.com/opensearch-project/OpenSearch/pull/1289)) +* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues (#3048) ([#3050](https://github.com/opensearch-project/OpenSearch/pull/3050)) +* [Backport] [2.0] Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) +* [Bug] Change 1.0.0 version check in PluginInfo +* TEST BUG: MergeSchedulerSettingsTests fails always on small machines ([#559](https://github.com/opensearch-project/OpenSearch/pull/559)) +* Fix bwcVersions after bumping version 1.3.1 ([#2532](https://github.com/opensearch-project/OpenSearch/pull/2532)) +* Fixing bwcVersions and bwc builds (#2430) - adding 1.4.0 into main bwcVersions +* Fixing invalid Java code example in JavaDoc ([#2008](https://github.com/opensearch-project/OpenSearch/pull/2008)) +* Fixing org.opensearch.common.network.InetAddressesTests.testForStringIPv6WithScopeIdInput ([#1913](https://github.com/opensearch-project/OpenSearch/pull/1913)) +* Fix o.o.transport.netty4.OpenSearchLoggingHandlerIT stack overflow test failure ([#1900](https://github.com/opensearch-project/OpenSearch/pull/1900)) +* Fix verifyVersions gradle task and cleanup bwcVersions ([#1878](https://github.com/opensearch-project/OpenSearch/pull/1878)) +* Attempt to fix :test:fixtures:s3-fixture:composeUp fails due to HTTP connection issue ([#1866](https://github.com/opensearch-project/OpenSearch/pull/1866)) +* Fixing build failures after Flavor Serialization backport ([#1867](https://github.com/opensearch-project/OpenSearch/pull/1867)) +* Fixing auto backport workflow ([#1845](https://github.com/opensearch-project/OpenSearch/pull/1845)) +* Upgrade and fix link checker to 1.2. ([#1811](https://github.com/opensearch-project/OpenSearch/pull/1811)) +* link checker fix - only run on opensearch-project/OpenSearch ([#1719](https://github.com/opensearch-project/OpenSearch/pull/1719)) +* Fixing .gitattributes for binary content, removing *.class files ([#1717](https://github.com/opensearch-project/OpenSearch/pull/1717)) +* Fix unit test testFailsHealthOnHungIOBeyondHealthyTimeout() by incresing the max waiting time before assertion ([#1692](https://github.com/opensearch-project/OpenSearch/pull/1692)) +* Fixing bwc test for repository-multi-version ([#1441](https://github.com/opensearch-project/OpenSearch/pull/1441)) +* Fixing support for a multi-node cluster via "gradle run" ([#1455](https://github.com/opensearch-project/OpenSearch/pull/1455)) +* Fix windows build (mostly) ([#1412](https://github.com/opensearch-project/OpenSearch/pull/1412)) +* Fixing post merge 3rd party audit issues ([#1384](https://github.com/opensearch-project/OpenSearch/pull/1384)) +* Minor fix for the flaky test to reduce concurrency (#1361) ([#1364](https://github.com/opensearch-project/OpenSearch/pull/1364)) +* Fixing org.opensearch.repositories.azure.AzureBlobContainerRetriesTests and org.opensearch.action.admin.cluster.node.stats.NodeStatsTests ([#1390](https://github.com/opensearch-project/OpenSearch/pull/1390)) +* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) +* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) +* fixed broken anchor link. ([#436](https://github.com/opensearch-project/OpenSearch/pull/436)) +* [Rename] fix painless test ([#446](https://github.com/opensearch-project/OpenSearch/pull/446)) +* Fix name of the log appender. ([#445](https://github.com/opensearch-project/OpenSearch/pull/445)) +* [Rename] Fixing lingering rename and ./gradlew run will start ([#443](https://github.com/opensearch-project/OpenSearch/pull/443)) +* Fixed copyright to OpenSearch ([#1175](https://github.com/opensearch-project/OpenSearch/pull/1175)) +* Fix defects in code-coverage.gralde to generate code coverage report properly ([#1214](https://github.com/opensearch-project/OpenSearch/pull/1214)) +* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) +* Fix Snapshot pattern in DistributionDownloader. ([#916](https://github.com/opensearch-project/OpenSearch/pull/916)) +* Fix stragglers from renaming to OpenSearch work. ([#483](https://github.com/opensearch-project/OpenSearch/pull/483)) +* Fix rename issues and failing repository-hdfs tests. ([#518](https://github.com/opensearch-project/OpenSearch/pull/518)) +* Fix build-tools integ test failures. ([#465](https://github.com/opensearch-project/OpenSearch/pull/465)) +* Fix a few more renaming issues. ([#464](https://github.com/opensearch-project/OpenSearch/pull/464)) +* Fix org.opensearch.index.reindex.ReindexRestClientSslTests#testClientSucceedsWithCertificateAuthorities - javax.net.ssl.SSLPeerUnverifiedException ([#1212](https://github.com/opensearch-project/OpenSearch/pull/1212)) +* Fix opensearch-env always sources the environment from hardcoded file ([#875](https://github.com/opensearch-project/OpenSearch/pull/875)) +* Fix resource leak issues suggested by Amazon CodeGuru ([#816](https://github.com/opensearch-project/OpenSearch/pull/816)) +* Fix arm architecture translation issue ([#809](https://github.com/opensearch-project/OpenSearch/pull/809)) +* Fix Javadoc errors in `client/sniffer` ([#802](https://github.com/opensearch-project/OpenSearch/pull/802)) +* [BWC] fix mixedCluster and rolling upgrades ([#775](https://github.com/opensearch-project/OpenSearch/pull/775)) +* Fix #649: Properly escape @ in JavaDoc. ([#651](https://github.com/opensearch-project/OpenSearch/pull/651)) +* Fix snapshot deletion task getting stuck in the event of exceptions ([#629](https://github.com/opensearch-project/OpenSearch/pull/629)) +* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) +* Use the correct domain to fix failing integration tests. ([#519](https://github.com/opensearch-project/OpenSearch/pull/519)) +* Change OpenSearch Version to OpenSearch version to fix failed test case org.opensearch.plugins.ListPluginsCommandTests.testPluginWithNativeController ([#460](https://github.com/opensearch-project/OpenSearch/pull/460)) +* [Rename] Fix env variables and old es maven repo ([#439](https://github.com/opensearch-project/OpenSearch/pull/439)) +* ignore_malformed parameter on ip_range data_type throws mapper_parsing_exception ([#2429](https://github.com/opensearch-project/OpenSearch/pull/2429)) +* Discrepancy in result from _validate/query API and actual query validity ([#2416](https://github.com/opensearch-project/OpenSearch/pull/2416)) +* MapperService has to be passed in as null for EnginePlugins CodecService constructor ([#2177](https://github.com/opensearch-project/OpenSearch/pull/2177)) +* Adding shards per node constraint for predictability to testClusterGr… ([#2110](https://github.com/opensearch-project/OpenSearch/pull/2110)) +* Mapping update for “date_range” field type is not idempotent ([#2094](https://github.com/opensearch-project/OpenSearch/pull/2094)) +* Use Version.compareMajor instead of using equals operator ([#1876](https://github.com/opensearch-project/OpenSearch/pull/1876)) +* Execution failed for task ':test:fixtures:azure/s3/hdfs/gcs-fixture:composeDown' ([#1824](https://github.com/opensearch-project/OpenSearch/pull/1824)) +* RestIntegTestTask fails because of missed log4j-core dependency ([#1815](https://github.com/opensearch-project/OpenSearch/pull/1815)) +* Start MockLogAppender before adding to static context ([#1587](https://github.com/opensearch-project/OpenSearch/pull/1587)) +* Use a non-default port for upgrade-cli unit tests ([#1512](https://github.com/opensearch-project/OpenSearch/pull/1512)) +* Close first engine instance before creating second ([#1457](https://github.com/opensearch-project/OpenSearch/pull/1457)) +* Avoid crashing on using the index.lifecycle.name in the API body ([#1060](https://github.com/opensearch-project/OpenSearch/pull/1060)) +* Max scroll limit breach to throw a OpenSearchRejectedExecutionException ([#1054](https://github.com/opensearch-project/OpenSearch/pull/1054)) +* Extract excludes into a file, fix the link checker by adding http://site.icu-project.org/. ([#1189](https://github.com/opensearch-project/OpenSearch/pull/1189)) +* Prevent /_cat/master from getting tripped by the CB ([#1036](https://github.com/opensearch-project/OpenSearch/pull/1036)) +* Excluding missed broken links from link checker ([#1010](https://github.com/opensearch-project/OpenSearch/pull/1010)) +* Excluding links from link checker ([#995](https://github.com/opensearch-project/OpenSearch/pull/995)) +* Version checks are incorrectly returning versions < 1.0.0. ([#797](https://github.com/opensearch-project/OpenSearch/pull/797)) +* Make `:server:check` pass successfully ([#471](https://github.com/opensearch-project/OpenSearch/pull/471)) +* Correct the regex pattern for class path in testDieWithDignity() ([#466](https://github.com/opensearch-project/OpenSearch/pull/466)) +* Change ESLoggingHandler to OpenSearchLoggingHandler to pass failing test case org.opensearch.transport.netty4.OpenSearchLoggingHandlerIT.testLoggingHandler due to renaming ([#461](https://github.com/opensearch-project/OpenSearch/pull/461)) + + +### Infrastructure + +* Using Github App token to trigger CI for version increment PRs ([#2157](https://github.com/opensearch-project/OpenSearch/pull/2157)) +* Using Github App to trigger CI for auto-backport ([#2071](https://github.com/opensearch-project/OpenSearch/pull/2071)) +* Remove precommit and wrapper validation workflows for gradle as we migrate it to internal CI tools ([#452](https://github.com/opensearch-project/OpenSearch/pull/452)) +* Updated the url for docker distribution ([#2325](https://github.com/opensearch-project/OpenSearch/pull/2325)) +* Recommend Docker 3.6.0. ([#1427](https://github.com/opensearch-project/OpenSearch/pull/1427)) +* docker build: use OSS `log4j2.properties` ([#878](https://github.com/opensearch-project/OpenSearch/pull/878)) +* [DOCKER] add apt update to test fixture krb5kdc ([#565](https://github.com/opensearch-project/OpenSearch/pull/565)) +* Cleanup `default` flavor stragglers from docker distributions. ([#481](https://github.com/opensearch-project/OpenSearch/pull/481)) +* Replace blacklist in Gradle build environment configuration (#2752) ([#2781](https://github.com/opensearch-project/OpenSearch/pull/2781)) +* Add 1.3.2 to main causing gradle check failures (#2679) ([#2684](https://github.com/opensearch-project/OpenSearch/pull/2684)) +* Added jenkinsfile to run gradle check in OpenSearch (#2166) ([#2629](https://github.com/opensearch-project/OpenSearch/pull/2629)) +* Gradle check retry (#2638) ([#2661](https://github.com/opensearch-project/OpenSearch/pull/2661)) +* Move Gradle wrapper and precommit checks into OpenSearch repo. ([#1664](https://github.com/opensearch-project/OpenSearch/pull/1664)) +* Enabling missingJavadoc validation in gradle check ([#721](https://github.com/opensearch-project/OpenSearch/pull/721)) +* Removing Jenkinsfile (not used), replaced by opensearch-build/jenkins/opensearch/Jenkinsfile ([#1408](https://github.com/opensearch-project/OpenSearch/pull/1408)) +* Changed JAVA_HOME to jdk-17 (#2656) ([#2671](https://github.com/opensearch-project/OpenSearch/pull/2671)) +* Adding support for JDK17 and removing JDK8 ([#2025](https://github.com/opensearch-project/OpenSearch/pull/2025)) +* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) +* Better JDK-18 EA (and beyond) support of SecurityManager ([#1750](https://github.com/opensearch-project/OpenSearch/pull/1750)) +* Support JDK 18 EA builds ([#1710](https://github.com/opensearch-project/OpenSearch/pull/1710)) +* Adding 1.2.2 ([#1731](https://github.com/opensearch-project/OpenSearch/pull/1731)) +* Add version 1.2.1. ([#1701](https://github.com/opensearch-project/OpenSearch/pull/1701)) +* Add version 1.2.3. ([#1760](https://github.com/opensearch-project/OpenSearch/pull/1760)) +* Modernize and consolidate JDKs usage across all stages of the build. Use JDK-17 as bundled JDK distribution to run tests ([#1358](https://github.com/opensearch-project/OpenSearch/pull/1358)) +* Fix build-tools/reaper source/target compatibility to be JDK-11 (#2596) ([#2606](https://github.com/opensearch-project/OpenSearch/pull/2606)) +* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) +* Remove Github DCO action since DCO runs via Github App now ([#2317](https://github.com/opensearch-project/OpenSearch/pull/2317)) +* Adding Github action for auto backport PR creation ([#1600](https://github.com/opensearch-project/OpenSearch/pull/1600)) +* Add a whitesource unified agent file and update the config ([#1540](https://github.com/opensearch-project/OpenSearch/pull/1540)) +* Run link checker GitHub action on schedule. ([#1221](https://github.com/opensearch-project/OpenSearch/pull/1221)) +* Clarify opensearch.version to not include -SNAPSHOT. ([#1186](https://github.com/opensearch-project/OpenSearch/pull/1186)) +* Move pr template to .github as default since folder design required manually added to url ([#458](https://github.com/opensearch-project/OpenSearch/pull/458)) +* changed label from low hanging fruit to help wanted. added link to filter for that label. Added link to forum ([#435](https://github.com/opensearch-project/OpenSearch/pull/435)) +* adding in untriaged label to features ([#1419](https://github.com/opensearch-project/OpenSearch/pull/1419)) +* Run spotless and exclude checkstyle on plugins module ([#1417](https://github.com/opensearch-project/OpenSearch/pull/1417)) +* Adding spotless support for subprojects under :test ([#1464](https://github.com/opensearch-project/OpenSearch/pull/1464)) +* Run spotless and exclude checkstyle on rest-api-spec module ([#1462](https://github.com/opensearch-project/OpenSearch/pull/1462)) +* Run spotless and exclude checkstyle on modules module ([#1442](https://github.com/opensearch-project/OpenSearch/pull/1442)) +* Enabling spotless, disabling checkstyle check on plugins ([#1488](https://github.com/opensearch-project/OpenSearch/pull/1488)) +* Cleanup for Checkstyle ([#1370](https://github.com/opensearch-project/OpenSearch/pull/1370)) +* Run spotless and exclude checkstyle on libs module ([#1428](https://github.com/opensearch-project/OpenSearch/pull/1428)) +* Run spotless and exclude checkstyle on client module ([#1392](https://github.com/opensearch-project/OpenSearch/pull/1392)) +* Run spotless and exclude checkstyle on server module ([#1380](https://github.com/opensearch-project/OpenSearch/pull/1380)) +* Change whitesource integration to scan on 1.x branch ([#1786](https://github.com/opensearch-project/OpenSearch/pull/1786)) +* Add .whitesource configuration file ([#1525](https://github.com/opensearch-project/OpenSearch/pull/1525)) +* add codeowners file ([#1530](https://github.com/opensearch-project/OpenSearch/pull/1530)) +* Updated links for linkchecker ([#1539](https://github.com/opensearch-project/OpenSearch/pull/1539)) +* Updating dependabot open pr limits ([#1875](https://github.com/opensearch-project/OpenSearch/pull/1875)) +* Updating .gitattributes for additional file types ([#1727](https://github.com/opensearch-project/OpenSearch/pull/1727)) +* Updating the Ivy repository to point to real url for Releases ([#602](https://github.com/opensearch-project/OpenSearch/pull/602)) +* build: introduce support for reproducible builds ([#1995](https://github.com/opensearch-project/OpenSearch/pull/1995)) +* Add support to generate code coverage report with JaCoCo ([#971](https://github.com/opensearch-project/OpenSearch/pull/971)) +* Support running elasticsearch-oss distribution in test cluster for BWC ([#764](https://github.com/opensearch-project/OpenSearch/pull/764)) +* FreeBSD Java support ([#1014](https://github.com/opensearch-project/OpenSearch/pull/1014)) +* Override Default Distribution Download Url with Custom Distribution Url when it is passed from Plugin ([#2420](https://github.com/opensearch-project/OpenSearch/pull/2420)) +* Restore Java 8 compatibility for build tools. (#2300) ([#2321](https://github.com/opensearch-project/OpenSearch/pull/2321)) +* Revert "Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url" ([#2256](https://github.com/opensearch-project/OpenSearch/pull/2256)) +* Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url ([#2086](https://github.com/opensearch-project/OpenSearch/pull/2086)) +* added config file to git issue template directory to disable blank issue creation ([#2158](https://github.com/opensearch-project/OpenSearch/pull/2158)) +* Add JetBrains Gateway setup details ([#1944](https://github.com/opensearch-project/OpenSearch/pull/1944)) +* Adding workflow to auto delete backport merged branches from backport workflow ([#2050](https://github.com/opensearch-project/OpenSearch/pull/2050)) +* Add IssueNavigationLink ([#1964](https://github.com/opensearch-project/OpenSearch/pull/1964)) +* Using pull_request_target in place of pull_request ([#1952](https://github.com/opensearch-project/OpenSearch/pull/1952)) +* Using custom branch name for auto backporting PRs ([#1862](https://github.com/opensearch-project/OpenSearch/pull/1862)) +* Added help to build distributions in docs ([#1898](https://github.com/opensearch-project/OpenSearch/pull/1898)) +* Auto-increment next development iteration. ([#1816](https://github.com/opensearch-project/OpenSearch/pull/1816)) +* Catching Maintainers up for Q4 2021 new additions/removals ([#1841](https://github.com/opensearch-project/OpenSearch/pull/1841)) +* Added .gitattributes to manage end-of-line checks for Windows/*nix systems ([#1638](https://github.com/opensearch-project/OpenSearch/pull/1638)) +* Add staged version 1.1.1 ([#1506](https://github.com/opensearch-project/OpenSearch/pull/1506)) +* [BWC] Diable BWC tests until branch versions are synced ([#1508](https://github.com/opensearch-project/OpenSearch/pull/1508)) +* Moving DCO to workflows ([#1458](https://github.com/opensearch-project/OpenSearch/pull/1458)) +* changed work-in-progress language ([#1275](https://github.com/opensearch-project/OpenSearch/pull/1275)) +* Removed beta from new issues. ([#1071](https://github.com/opensearch-project/OpenSearch/pull/1071)) +* Include sources and javadoc artifacts while publishing to a Maven repository ([#1049](https://github.com/opensearch-project/OpenSearch/pull/1049)) +* Replaced custom built JNA by official JNA distribution. ([#1003](https://github.com/opensearch-project/OpenSearch/pull/1003)) +* [Version] Don't spoof major for 3.0+ clusters (#2722) ([#2749](https://github.com/opensearch-project/OpenSearch/pull/2749)) +* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) +* Add Version.V_1_2_5 constant +* add 1.2.5 to bwcVersions +* [Deprecate] Setting explicit version on analysis component ([#1978](https://github.com/opensearch-project/OpenSearch/pull/1978)) +* [Deprecate] index.merge.policy.max_merge_at_once_explicit ([#1981](https://github.com/opensearch-project/OpenSearch/pull/1981)) +* [plugin] repository-azure: add configuration settings for connect/write/response/read timeouts ([#1789](https://github.com/opensearch-project/OpenSearch/pull/1789)) +* [plugin] repository-azure is not working properly hangs on basic operations (#1740) ([#1749](https://github.com/opensearch-project/OpenSearch/pull/1749)) +* [main] Add staged version 1.3.0 for bwc ([#1510](https://github.com/opensearch-project/OpenSearch/pull/1510)) +* [repository-azure] plugin should use Azure Storage SDK v12 for Java ([#1302](https://github.com/opensearch-project/OpenSearch/pull/1302)) +* Allow building on FreeBSD ([#1091](https://github.com/opensearch-project/OpenSearch/pull/1091)) +* initial commit to add in a dependabot.yml file ([#1353](https://github.com/opensearch-project/OpenSearch/pull/1353)) +* Rename artifact produced by the build to include -min ([#1251](https://github.com/opensearch-project/OpenSearch/pull/1251)) +* [Version] Add 1.2 for BWC testing ([#1241](https://github.com/opensearch-project/OpenSearch/pull/1241)) +* Exclude failing links from plugins/modules ([#1223](https://github.com/opensearch-project/OpenSearch/pull/1223)) +* Kept the original constructor for PluginInfo to maintain bwc ([#1206](https://github.com/opensearch-project/OpenSearch/pull/1206)) +* [Version] Increment main to 2.0 ([#1192](https://github.com/opensearch-project/OpenSearch/pull/1192)) +* Added all icu-project.org websites to the link checker exclusions. ([#1201](https://github.com/opensearch-project/OpenSearch/pull/1201)) +* Add 1.0.1 revision ([#1152](https://github.com/opensearch-project/OpenSearch/pull/1152)) +* distribution/packages: Fix filename format for deb archives ([#621](https://github.com/opensearch-project/OpenSearch/pull/621)) +* [Versioning] Fix Version.fromString logic for legacy version ([#604](https://github.com/opensearch-project/OpenSearch/pull/604)) +* Rename the distribution used in test clusters. ([#603](https://github.com/opensearch-project/OpenSearch/pull/603)) +* clean up rpm artifact naming ([#590](https://github.com/opensearch-project/OpenSearch/pull/590)) +* changed to point to open issues rather than the project board +* Update Plugin Signing Key ([#512](https://github.com/opensearch-project/OpenSearch/pull/512)) +* Use OpenSearch artifacts URL for official plugin installation. ([#490](https://github.com/opensearch-project/OpenSearch/pull/490)) +* Perform more renaming to OpenSearch. ([#470](https://github.com/opensearch-project/OpenSearch/pull/470)) +* Adding instructions on License and DCO practices to PR template ([#462](https://github.com/opensearch-project/OpenSearch/pull/462)) +* Remove lingering instances of Default distribution in favour of Oss ([#440](https://github.com/opensearch-project/OpenSearch/pull/440)) +* Validation for official plugins for upgrade tool ([#973](https://github.com/opensearch-project/OpenSearch/pull/973)) +* Lower build requirement from Java 14+ to Java 11+ ([#940](https://github.com/opensearch-project/OpenSearch/pull/940)) +* Add Snapshot maven repository ([#829](https://github.com/opensearch-project/OpenSearch/pull/829)) +* distribution/packages: Fix RPM architecture name for 64-bit x86 ([#620](https://github.com/opensearch-project/OpenSearch/pull/620)) +* Update issue template with multiple labels ([#668](https://github.com/opensearch-project/OpenSearch/pull/668)) +* Renaming CPU architecture to have consistent naming ([#612](https://github.com/opensearch-project/OpenSearch/pull/612)) + +### Documentation + +* Adding workflow to create documentation related issues in documentation-website repo (#2929) ([#2976](https://github.com/opensearch-project/OpenSearch/pull/2976)) +* Updating auto backport documentation ([#1620](https://github.com/opensearch-project/OpenSearch/pull/1620)) +* Updating README and CONTRIBUTING guide to get ready for beta1 release. ([#672](https://github.com/opensearch-project/OpenSearch/pull/672)) +* Update instructions on debugging OpenSearch. ([#689](https://github.com/opensearch-project/OpenSearch/pull/689)) +* Fixing typo in TESTING.md ([#1849](https://github.com/opensearch-project/OpenSearch/pull/1849)) +* Fix JavaDoc typo in XContentBuilder ([#1739](https://github.com/opensearch-project/OpenSearch/pull/1739)) +* Update Readme ([#433](https://github.com/opensearch-project/OpenSearch/pull/433)) +* Fix DCO CLI example in CONTRIBUTING.md ([#576](https://github.com/opensearch-project/OpenSearch/pull/576)) +* Change comment to point to DEVELOPER_GUIDE.md ([#1415](https://github.com/opensearch-project/OpenSearch/pull/1415)) +* [typos] typos in DEVELOPER_GUIDE.md ([#1381](https://github.com/opensearch-project/OpenSearch/pull/1381)) +* Adding Security Reporting Instructions in README.md file Signed-off-by: Rishikesh Reddy Pasham rishireddy1159@gmail.com ([#1326](https://github.com/opensearch-project/OpenSearch/pull/1326)) +* Add guide for generating code coverage report in TESTING.md ([#1264](https://github.com/opensearch-project/OpenSearch/pull/1264)) +* Added Eclipse import instructions to DEVELOPER_GUIDE.md ([#1215](https://github.com/opensearch-project/OpenSearch/pull/1215)) +* Update/maintainers.md ([#723](https://github.com/opensearch-project/OpenSearch/pull/723)) +* Added a link to the maintainer file in contribution guides ([#589](https://github.com/opensearch-project/OpenSearch/pull/589)) +* Updated READMEs on releasing, maintaining, admins and security. ([#853](https://github.com/opensearch-project/OpenSearch/pull/853)) +* adding components to DEVELOPER_GUIDE ([#1200](https://github.com/opensearch-project/OpenSearch/pull/1200)) +* Update developer guide reference to download JDK 14 ([#1452](https://github.com/opensearch-project/OpenSearch/pull/1452)) +* [WIP] Developer guide updates ([#595](https://github.com/opensearch-project/OpenSearch/pull/595)) +* Update README with getting started ([#549](https://github.com/opensearch-project/OpenSearch/pull/549)) +* Update Developers Guide. ([#522](https://github.com/opensearch-project/OpenSearch/pull/522)) +* Update LICENSE.txt +* [License] Add SPDX and OpenSearch Modification license header ([#509](https://github.com/opensearch-project/OpenSearch/pull/509)) +* [License] Update SPDX License Header ([#510](https://github.com/opensearch-project/OpenSearch/pull/510)) +* Cleanup TESTING and DEVELOPER_GUIDE markdowns ([#946](https://github.com/opensearch-project/OpenSearch/pull/946)) +* Add 1.3.0 release notes in main ([#2489](https://github.com/opensearch-project/OpenSearch/pull/2489)) +* Add release notes for 1.2.4 ([#1934](https://github.com/opensearch-project/OpenSearch/pull/1934)) +* Added release notes for 1.2.3. ([#1791](https://github.com/opensearch-project/OpenSearch/pull/1791)) +* Adding release notes for 1.2.2 ([#1730](https://github.com/opensearch-project/OpenSearch/pull/1730)) +* Adding release notes for 1.2.1 ([#1725](https://github.com/opensearch-project/OpenSearch/pull/1725)) +* Add 1.2 release notes and correct 1.1 release notes. ([#1581](https://github.com/opensearch-project/OpenSearch/pull/1581)) +* Generate release notes for 1.1 ([#1230](https://github.com/opensearch-project/OpenSearch/pull/1230)) +* Update release note for GA 1.0 with new commits and removes #547 ([#953](https://github.com/opensearch-project/OpenSearch/pull/953)) +* Adding release notes for 1.0.0 ([#885](https://github.com/opensearch-project/OpenSearch/pull/885)) +* Adding release notes for 1.0.0-rc1 ([#794](https://github.com/opensearch-project/OpenSearch/pull/794)) +* Modified TESTING instructions to clarify use of testing classes ([#1930](https://github.com/opensearch-project/OpenSearch/pull/1930)) +* Clarify JDK requirement in the developer guide ([#1153](https://github.com/opensearch-project/OpenSearch/pull/1153)) +* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) +* Expand SearchPlugin javadocs. ([#1909](https://github.com/opensearch-project/OpenSearch/pull/1909)) +* Linked the formatting setting file ([#1860](https://github.com/opensearch-project/OpenSearch/pull/1860)) +* Add more instructions how to install/configure git secrets ([#1202](https://github.com/opensearch-project/OpenSearch/pull/1202)) +* Add themed logo to README ([#988](https://github.com/opensearch-project/OpenSearch/pull/988)) +* Replace Elasticsearch docs links in scripts ([#994](https://github.com/opensearch-project/OpenSearch/pull/994)) +* Cleaned up developer guide, added TOC. ([#572](https://github.com/opensearch-project/OpenSearch/pull/572)) +* Document running individual tests. ([#741](https://github.com/opensearch-project/OpenSearch/pull/741)) +* [License] Add SPDX License Header to security policies ([#531](https://github.com/opensearch-project/OpenSearch/pull/531)) +* Added a maintainers file ([#523](https://github.com/opensearch-project/OpenSearch/pull/523)) +* Remove extra greater-thans from README ([#527](https://github.com/opensearch-project/OpenSearch/pull/527)) +* [Rename] Update Vagrantfile ([#515](https://github.com/opensearch-project/OpenSearch/pull/515)) +* [README] Remove stale information ([#513](https://github.com/opensearch-project/OpenSearch/pull/513)) +* [Rename] Change license header and copyright notice to SPDX ([#437](https://github.com/opensearch-project/OpenSearch/pull/437)) + + +### Maintenance + +* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe (#2641) ([#2653](https://github.com/opensearch-project/OpenSearch/pull/2653)) +* Update azure-storage-blob to 12.15.0: fix test flakiness (#2795) ([#2799](https://github.com/opensearch-project/OpenSearch/pull/2799)) +* Update azure-storage-blob to 12.15.0 (#2774) ([#2778](https://github.com/opensearch-project/OpenSearch/pull/2778)) +* Update the BWC versions (post 1.x backport) ([#2390](https://github.com/opensearch-project/OpenSearch/pull/2390)) +* Update bwc verions for (#2237) ([#2248](https://github.com/opensearch-project/OpenSearch/pull/2248)) +* Update #2103 BWC Versions ([#2173](https://github.com/opensearch-project/OpenSearch/pull/2173)) +* Update bundled JDK distribution to 17.0.2+8 ([#2007](https://github.com/opensearch-project/OpenSearch/pull/2007)) +* Update Mockito to 4.3.1 ([#1973](https://github.com/opensearch-project/OpenSearch/pull/1973)) +* Update protobuf-java to 3.19.3 ([#1945](https://github.com/opensearch-project/OpenSearch/pull/1945)) +* Update Netty to 4.1.73.Final ([#1936](https://github.com/opensearch-project/OpenSearch/pull/1936)) +* Update FIPS API libraries of Bouncy Castle ([#1853](https://github.com/opensearch-project/OpenSearch/pull/1853)) +* Update junit to 4.13.1 ([#1837](https://github.com/opensearch-project/OpenSearch/pull/1837)) +* Update Mockito to 4.2.x ([#1830](https://github.com/opensearch-project/OpenSearch/pull/1830)) +* Upgrading bouncycastle to 1.70 ([#1832](https://github.com/opensearch-project/OpenSearch/pull/1832)) +* Updating Netty to 4.1.72.Final ([#1831](https://github.com/opensearch-project/OpenSearch/pull/1831)) +* Update to log4j 2.17.1 ([#1820](https://github.com/opensearch-project/OpenSearch/pull/1820)) +* Update to log4j 2.17.0 ([#1771](https://github.com/opensearch-project/OpenSearch/pull/1771)) +* [repository-azure] Update to the latest Azure Storage SDK v12, remove privileged runnable wrapper in favor of access helper ([#1521](https://github.com/opensearch-project/OpenSearch/pull/1521)) +* Update bundled JDK distribution to 17.0.1+12 ([#1476](https://github.com/opensearch-project/OpenSearch/pull/1476)) +* Upgrading netty version to 4.1.69.Final ([#1363](https://github.com/opensearch-project/OpenSearch/pull/1363)) +* Modernize and consolidate JDKs usage across all stages of the build. Update JDK-14 requirement, switch to JDK-17 instead ([#1368](https://github.com/opensearch-project/OpenSearch/pull/1368)) +* Upgrade hadoop dependencies for hdfs plugin ([#1335](https://github.com/opensearch-project/OpenSearch/pull/1335)) +* Replace securemock with mock-maker (test support), update Mockito to 3.12.4 ([#1332](https://github.com/opensearch-project/OpenSearch/pull/1332)) +* Update Jackson to 2.12.5 ([#1247](https://github.com/opensearch-project/OpenSearch/pull/1247)) +* Update DistributionDownloader to support fetching arm64 bundles. ([#929](https://github.com/opensearch-project/OpenSearch/pull/929)) +* Update favicon for OpenSearch ([#932](https://github.com/opensearch-project/OpenSearch/pull/932)) +* Update DistributionDownloader to fetch snapshots and staging bundles. ([#904](https://github.com/opensearch-project/OpenSearch/pull/904)) +* Version bump for 1.1 release ([#772](https://github.com/opensearch-project/OpenSearch/pull/772)) +* update external library 'pdfbox' version to 2.0.24 to reduce vulnerability ([#883](https://github.com/opensearch-project/OpenSearch/pull/883)) +* Update dependencies for ingest-attachment plugin. ([#666](https://github.com/opensearch-project/OpenSearch/pull/666)) +* Update hadoop-minicluster version for test fixture. ([#645](https://github.com/opensearch-project/OpenSearch/pull/645)) +* Update remote repo for BWC checks. ([#482](https://github.com/opensearch-project/OpenSearch/pull/482)) +* Update year and developer info in generated POMs. ([#444](https://github.com/opensearch-project/OpenSearch/pull/444)) +* Refresh OpenSearch nodes version in cluster state after upgrade ([#865](https://github.com/opensearch-project/OpenSearch/pull/865)) +* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) +* Upgrade to log4j 2.16.0 ([#1721](https://github.com/opensearch-project/OpenSearch/pull/1721)) +* Upgrade to logj4 2.15.0 ([#1698](https://github.com/opensearch-project/OpenSearch/pull/1698)) +* Updating Log4j to 2.11.2 ([#1696](https://github.com/opensearch-project/OpenSearch/pull/1696)) +* Upgrade dependency ([#1571](https://github.com/opensearch-project/OpenSearch/pull/1571)) +* Upgrade apache commons-compress to 1.21 ([#1197](https://github.com/opensearch-project/OpenSearch/pull/1197)) +* Removed java11 source folders since JDK-11 is the baseline now (#2898) ([#2953](https://github.com/opensearch-project/OpenSearch/pull/2953)) +* [Remove] MainResponse version override cluster setting (#3031) ([#3033](https://github.com/opensearch-project/OpenSearch/pull/3033)) +* [Remove] remaining AllFieldMapper references (#3007) ([#3010](https://github.com/opensearch-project/OpenSearch/pull/3010)) +* [2.x] Remove deprecation warning of using REST API request parameter 'master_timeout' (#2920) ([#2931](https://github.com/opensearch-project/OpenSearch/pull/2931)) +* [Rename] ESTestCase stragglers to OpenSearchTestCase (#3053) ([#3064](https://github.com/opensearch-project/OpenSearch/pull/3064)) +* Use G1GC on JDK11+ (#2964) ([#2970](https://github.com/opensearch-project/OpenSearch/pull/2970)) +* Remove endpoint_suffix dependency on account key (#2485) ([#2808](https://github.com/opensearch-project/OpenSearch/pull/2808)) +* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) +* Upgrading Shadow plugin to 7.1.2 ([#2033](https://github.com/opensearch-project/OpenSearch/pull/2033)) +* Upgrading Jackson-Databind version ([#1982](https://github.com/opensearch-project/OpenSearch/pull/1982)) +* Upgrading commons-codec in hdfs-fixture and cleaning up dependencies in repository-hdfs ([#1603](https://github.com/opensearch-project/OpenSearch/pull/1603)) +* Upgrading gson to 2.8.9 ([#1541](https://github.com/opensearch-project/OpenSearch/pull/1541)) +* Upgrading dependencies ([#1491](https://github.com/opensearch-project/OpenSearch/pull/1491)) +* Upgrading dependencies in hdfs plugin ([#1466](https://github.com/opensearch-project/OpenSearch/pull/1466)) +* Upgrading mockito version to make it consistent across the repo ([#1410](https://github.com/opensearch-project/OpenSearch/pull/1410)) +* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal (#2863) ([#2865](https://github.com/opensearch-project/OpenSearch/pull/2865)) +* Update ThirdPartyAuditTask to check for and list pointless exclusions. (#2760) ([#2765](https://github.com/opensearch-project/OpenSearch/pull/2765)) +* Add Shadow jar publication to lang-painless module. (#2681) ([#2712](https://github.com/opensearch-project/OpenSearch/pull/2712)) +* Add mapping method back referenced in other repos (#2636) ([#2649](https://github.com/opensearch-project/OpenSearch/pull/2649)) +* Move Jackson-databind to 2.13.2 ([#2548](https://github.com/opensearch-project/OpenSearch/pull/2548)) +* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) +* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) +* [Remove] TrimUnsafeCommit logic for legacy 6.x indexes ([#2225](https://github.com/opensearch-project/OpenSearch/pull/2225)) +* Adjust main version after backport to 1.x ([#2147](https://github.com/opensearch-project/OpenSearch/pull/2147)) +* [Remove] CircuitBreaker Accounting ([#2056](https://github.com/opensearch-project/OpenSearch/pull/2056)) +* [Remove] Segment memory estimation and tracking ([#2029](https://github.com/opensearch-project/OpenSearch/pull/2029)) +* [Remove] index.merge.policy.max_merge_at_once_explicit ([#1988](https://github.com/opensearch-project/OpenSearch/pull/1988)) +* [Remove] Setting explicit version on analysis component ([#1986](https://github.com/opensearch-project/OpenSearch/pull/1986)) +* Wildcard max_expansion version check update ([#1980](https://github.com/opensearch-project/OpenSearch/pull/1980)) +* Removing lingering transportclient ([#1955](https://github.com/opensearch-project/OpenSearch/pull/1955)) +* [BWC] Ensure 2.x compatibility with Legacy 7.10.x ([#1902](https://github.com/opensearch-project/OpenSearch/pull/1902)) +* File name correction to follow existing convention ([#1874](https://github.com/opensearch-project/OpenSearch/pull/1874)) +* [Remove] Old Translog Checkpoint Format ([#1884](https://github.com/opensearch-project/OpenSearch/pull/1884)) +* Remove unwanted unreleased versions ([#1877](https://github.com/opensearch-project/OpenSearch/pull/1877)) +* replace with opensearch-http-channel and opensearch-http-server-channel ([#1799](https://github.com/opensearch-project/OpenSearch/pull/1799)) +* Add bwc version 1.2.4 ([#1796](https://github.com/opensearch-project/OpenSearch/pull/1796)) +* [Remove] various builder and mapping deprecations ([#1752](https://github.com/opensearch-project/OpenSearch/pull/1752)) +* [Remove] Remaining Flavor Serialization ([#1751](https://github.com/opensearch-project/OpenSearch/pull/1751)) +* [Remove] DynamicTemplate deprecations ([#1742](https://github.com/opensearch-project/OpenSearch/pull/1742)) +* [Remove] Analyzer Deprecations ([#1741](https://github.com/opensearch-project/OpenSearch/pull/1741)) +* Drop mocksocket & securemock dependencies from sniffer and rest client (no needed) ([#1174](https://github.com/opensearch-project/OpenSearch/pull/1174)) +* [BWC] Temporarily disable bwc testing while bumping 1.0.1 +* [DEPRECATE] SimpleFS in favor of NIOFS ([#1073](https://github.com/opensearch-project/OpenSearch/pull/1073)) +* Replace JCenter with Maven Central. ([#1057](https://github.com/opensearch-project/OpenSearch/pull/1057)) +* Restoring alpha/beta/rc version semantics ([#1112](https://github.com/opensearch-project/OpenSearch/pull/1112)) +* Remove `client/sniffer` from Javadoc exemption list ([#818](https://github.com/opensearch-project/OpenSearch/pull/818)) +* Removed pre-alpha notes. ([#815](https://github.com/opensearch-project/OpenSearch/pull/815)) +* Remove checks for legacy .yaml and .json config files. ([#792](https://github.com/opensearch-project/OpenSearch/pull/792)) +* Remove reference to an EC2 instance type. ([#812](https://github.com/opensearch-project/OpenSearch/pull/812)) +* Remove all elastic.co references from javadocs ([#586](https://github.com/opensearch-project/OpenSearch/pull/586)) +* Remove the oss string from OpenSearch distributions ([#575](https://github.com/opensearch-project/OpenSearch/pull/575)) +* [Rename] Remove final references to legacy keystore ([#514](https://github.com/opensearch-project/OpenSearch/pull/514)) +* changed Apache to Apache 2.0. Numbered principles +* fixed apache to apache 2.0 +* Replace nio and nitty test endpoint ([#475](https://github.com/opensearch-project/OpenSearch/pull/475)) +* [Rename] org.elasticsearch.client.documentation.SearchDocumentationIT.testSearchRequestSuggestions ([#467](https://github.com/opensearch-project/OpenSearch/pull/467)) + +### Refactoring + +* [Rename] Refactoring Elastic references in docker and kerberos builds (#428) ([#438](https://github.com/opensearch-project/OpenSearch/pull/438)) +* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) +* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) +* [Refactor] MapperService to QueryShardContext in valueFetcher ([#2027](https://github.com/opensearch-project/OpenSearch/pull/2027)) +* [Refactor] Lucene DataInput and DataOutput to StreamInput and StreamOutput ([#2035](https://github.com/opensearch-project/OpenSearch/pull/2035)) +* [Refactor] InternalEngine to always use soft deletes ([#1933](https://github.com/opensearch-project/OpenSearch/pull/1933)) +* Refactor LegacyESVersion tests from Version tests ([#1662](https://github.com/opensearch-project/OpenSearch/pull/1662)) +* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) +* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* Clear up some confusing code in IndexShardHotSpotTests ([#1534](https://github.com/opensearch-project/OpenSearch/pull/1534)) +* Rename reference to project OpenSearch was forked from ([#2483](https://github.com/opensearch-project/OpenSearch/pull/2483)) +* Introduce RestHandler.Wrapper to help with delegate implementations ([#1004](https://github.com/opensearch-project/OpenSearch/pull/1004)) + +### Tests + +* Add type mapping removal bwc tests for indexing, searching, snapshots ([#2901](https://github.com/opensearch-project/OpenSearch/pull/2901)) +* Removing SLM check in tests for OpenSearch versions (#2604) ([#2620](https://github.com/opensearch-project/OpenSearch/pull/2620)) +* Use Hamcrest matchers and assertThat() in ReindexRenamedSettingTests ([#2503](https://github.com/opensearch-project/OpenSearch/pull/2503)) +* [Test-Failure] Mute TranslogPolicyIT ([#2342](https://github.com/opensearch-project/OpenSearch/pull/2342)) +* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#2074](https://github.com/opensearch-project/OpenSearch/pull/2074)) +* Stabilizing org.opensearch.cluster.routing.MovePrimaryFirstTests.test… ([#2048](https://github.com/opensearch-project/OpenSearch/pull/2048)) +* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#1983](https://github.com/opensearch-project/OpenSearch/pull/1983)) +* Add hook to execute logic before Integ test task starts ([#1969](https://github.com/opensearch-project/OpenSearch/pull/1969)) +* Remove transport client from tests. ([#1809](https://github.com/opensearch-project/OpenSearch/pull/1809)) +* [Tests] ClusterHealthIT:testHealthOnMasterFailover - Increase master node timeout ([#1812](https://github.com/opensearch-project/OpenSearch/pull/1812)) +* Ignore file order in test assertion ([#1755](https://github.com/opensearch-project/OpenSearch/pull/1755)) +* Integration test that checks for settings upgrade ([#1482](https://github.com/opensearch-project/OpenSearch/pull/1482)) +* [bwc] reenable bwc testing after syncing staged branches ([#1511](https://github.com/opensearch-project/OpenSearch/pull/1511)) +* [Tests] Translog Pruning tests to MetadataCreateIndexServiceTests ([#1295](https://github.com/opensearch-project/OpenSearch/pull/1295)) +* Reduce iterations to improve test run time ([#1168](https://github.com/opensearch-project/OpenSearch/pull/1168)) +* Tune datanode count and shards count to improve test run time ([#1170](https://github.com/opensearch-project/OpenSearch/pull/1170)) +* [BWC] Re-enable bwc testing after 1.0.1 version bump +* Add unit test for RestActionListener. Validate that onFailure() sends response even when BytesRestResponse can not be constructed using passed exception. Follow up on #923. ([#1024](https://github.com/opensearch-project/OpenSearch/pull/1024)) +* [TEST] Fix failing distro tests for linux packages ([#569](https://github.com/opensearch-project/OpenSearch/pull/569)) +* [TEST] Fix failing packaging tests for OpenSearch distributions. ([#541](https://github.com/opensearch-project/OpenSearch/pull/541)) +* Remove the references to xpack and elastic in tests. ([#516](https://github.com/opensearch-project/OpenSearch/pull/516)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml index a0e4762ea9b53..4da0f8eeed39b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml @@ -46,8 +46,8 @@ setup: --- "Basic test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -96,8 +96,8 @@ setup: --- "IP test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -146,8 +146,8 @@ setup: --- "Boolean test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -196,8 +196,8 @@ setup: --- "Double test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -239,8 +239,8 @@ setup: --- "Date test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -282,8 +282,8 @@ setup: --- "Unmapped keywords": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -322,8 +322,8 @@ setup: --- "Null value": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -357,8 +357,8 @@ setup: --- "multiple multi_terms bucket": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -409,8 +409,8 @@ setup: --- "ordered by metrics": - skip: - version: "- 3.0.0" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -457,8 +457,8 @@ setup: --- "top 1 ordered by metrics ": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -502,8 +502,8 @@ setup: --- "min_doc_count": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -574,8 +574,8 @@ setup: --- "sum_other_doc_count": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index c74f992970545..ac0ae44eb732e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -470,9 +470,6 @@ public void onTaskUnregistered(Task task) {} @Override public void waitForTaskCompletion(Task task) {} - - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} }); } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener @@ -654,9 +651,6 @@ public void waitForTaskCompletion(Task task) { waitForWaitingToStart.countDown(); } - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} - @Override public void onTaskRegistered(Task task) {} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 292469c6e7b79..f976ffdbe8ad5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -111,7 +111,7 @@ private MockTerminal executeCommand( private MockTerminal unsafeBootstrap(Environment environment, boolean abort, Boolean applyClusterReadOnlyBlock) throws Exception { final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort, applyClusterReadOnlyBlock); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG)); return terminal; } @@ -171,7 +171,7 @@ public void testBootstrapNotMasterEligible() { final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build() ); - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_CLUSTER_MANAGER_NODE_MSG); } public void testBootstrapNoDataFolder() { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java index 086aeb695c411..e6ddfd94871ce 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java @@ -248,7 +248,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { blockDataNode(repoName, dataNode); - logger.info("--> create snapshot via master node client"); + logger.info("--> create snapshot via cluster-manager node client"); final ActionFuture snapshotResponse = internalCluster().masterClient() .admin() .cluster() @@ -272,7 +272,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { SnapshotException.class, () -> snapshotResponse.actionGet(TimeValue.timeValueSeconds(30L)) ); - assertThat(sne.getMessage(), endsWith("no longer master")); + assertThat(sne.getMessage(), endsWith("no longer cluster-manager")); } private void assertSnapshotExists(String repository, String snapshot) { diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java index 2547333490f23..e2bbd0ee13db3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java @@ -136,7 +136,7 @@ private void executeRepurposeCommand(Settings settings, int expectedIndexCount, boolean verbose = randomBoolean(); Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); Matcher matcher = allOf( - containsString(NodeRepurposeCommand.noMasterMessage(expectedIndexCount, expectedShardCount, 0)), + containsString(NodeRepurposeCommand.noClusterManagerMessage(expectedIndexCount, expectedShardCount, 0)), NodeRepurposeCommandTests.conditionalNot(containsString("test-repurpose"), verbose == false) ); NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, verbose); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 3163458d1f8d8..f1a552c929d3d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -35,7 +35,13 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.*; +import org.opensearch.action.search.CreatePITAction; +import org.opensearch.action.search.CreatePITRequest; +import org.opensearch.action.search.CreatePITResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 8d3e2569957f1..5a9e5b91982a2 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -785,7 +785,7 @@ private enum OpenSearchExceptionHandle { 2, UNKNOWN_VERSION_ADDED ), - MASTER_NOT_DISCOVERED_EXCEPTION( + CLUSTER_MANAGER_NOT_DISCOVERED_EXCEPTION( org.opensearch.discovery.MasterNotDiscoveredException.class, org.opensearch.discovery.MasterNotDiscoveredException::new, 3, @@ -1496,7 +1496,7 @@ private enum OpenSearchExceptionHandle { 143, UNKNOWN_VERSION_ADDED ), - NOT_MASTER_EXCEPTION( + NOT_CLUSTER_MANAGER_EXCEPTION( org.opensearch.cluster.NotMasterException.class, org.opensearch.cluster.NotMasterException::new, 144, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 95fbe42384238..baa2ce0847501 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -64,7 +64,7 @@ /** * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the - * master node in the cluster. + * cluster-manager node in the cluster. */ public class TransportClusterAllocationExplainAction extends TransportMasterNodeAction< ClusterAllocationExplainRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index 841231c971eaa..ce731fd1c8aca 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -90,7 +90,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo // ClusterStateHealth fields int numberOfNodes = (int) parsedObjects[i++]; int numberOfDataNodes = (int) parsedObjects[i++]; - boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]); + boolean hasDiscoveredClusterManager = Boolean.TRUE.equals(parsedObjects[i++]); int activeShards = (int) parsedObjects[i++]; int relocatingShards = (int) parsedObjects[i++]; int activePrimaryShards = (int) parsedObjects[i++]; @@ -118,7 +118,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo unassignedShards, numberOfNodes, numberOfDataNodes, - hasDiscoveredMaster, + hasDiscoveredClusterManager, activeShardsPercent, status, indices diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 6855803ba6c45..98c264e54a1d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -218,11 +218,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onNoLongerMaster(String source) { logger.trace( - "stopped being master while waiting for events with priority [{}]. retrying.", + "stopped being cluster-manager while waiting for events with priority [{}]. retrying.", request.waitForEvents() ); // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException - listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); + listener.onFailure(new NotMasterException("no longer cluster-manager. source: [" + source + "]")); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index df448d2665434..b7875c5f99774 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -42,7 +42,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -61,15 +60,8 @@ public static long waitForCompletionTimeout(TimeValue timeout) { private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); - private final TaskResourceTrackingService taskResourceTrackingService; - @Inject - public TransportListTasksAction( - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - TaskResourceTrackingService taskResourceTrackingService - ) { + public TransportListTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { super( ListTasksAction.NAME, clusterService, @@ -80,7 +72,6 @@ public TransportListTasksAction( TaskInfo::new, ThreadPool.Names.MANAGEMENT ); - this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -110,8 +101,6 @@ protected void processTasks(ListTasksRequest request, Consumer operation) } taskManager.waitForTaskCompletion(task, timeoutNanos); }); - } else { - operation = operation.andThen(taskResourceTrackingService::refreshResourceStats); } super.processTasks(request, operation); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index b7b9da675a385..c56b2fd2b2205 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -73,12 +73,12 @@ *
    *
  1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions * and add an entry for the repository that is to be cleaned up to {@link RepositoryCleanupInProgress}
  2. - *
  3. Run cleanup actions on the repository. Note, these are executed exclusively on the master node. + *
  4. Run cleanup actions on the repository. Note, these are executed exclusively on the cluster-manager node. * For the precise operations execute see {@link BlobStoreRepository#cleanup}
  5. *
  6. Remove the entry in {@link RepositoryCleanupInProgress} in the first step.
  7. *
* - * On master failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in + * On cluster-manager failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in * {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry * and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes * as well as deletes. @@ -119,7 +119,7 @@ public TransportCleanupRepositoryAction( ); this.repositoriesService = repositoriesService; this.snapshotsService = snapshotsService; - // We add a state applier that will remove any dangling repository cleanup actions on master failover. + // We add a state applier that will remove any dangling repository cleanup actions on cluster-manager failover. // This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent // operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes. if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { @@ -136,7 +136,7 @@ private static void addClusterStateApplier(ClusterService clusterService) { return; } clusterService.submitStateUpdateTask( - "clean up repository cleanup task after master failover", + "clean up repository cleanup task after cluster-manager failover", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 6d479431e1a94..3bfdf2a0cbd5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -155,11 +155,11 @@ public void onAckTimeout() { } private void reroute(final boolean updateSettingsAcked) { - // We're about to send a second update task, so we need to check if we're still the elected master - // For example the minimum_master_node could have been breached and we're no longer elected master, + // We're about to send a second update task, so we need to check if we're still the elected cluster-manager + // For example the minimum_master_node could have been breached and we're no longer elected cluster-manager, // so we should *not* execute the reroute. if (!clusterService.state().nodes().isLocalNodeElectedMaster()) { - logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); + logger.debug("Skipping reroute after cluster update settings, because node is no longer cluster-manager"); listener.onResponse( new ClusterUpdateSettingsResponse( updateSettingsAcked, @@ -198,7 +198,7 @@ protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { @Override public void onNoLongerMaster(String source) { logger.debug( - "failed to preform reroute after cluster settings were updated - current node is no longer a master" + "failed to preform reroute after cluster settings were updated - current node is no longer a cluster-manager" ); listener.onResponse( new ClusterUpdateSettingsResponse( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index e596348127faf..cb6f8493551f6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -69,8 +69,8 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); if (prevEntry == null) { - // When there is a master failure after a restore has been started, this listener might not be registered - // on the current master and as such it might miss some intermediary cluster states due to batching. + // When there is a cluster-manager failure after a restore has been started, this listener might not be registered + // on the current cluster-manager and as such it might miss some intermediary cluster states due to batching. // Clean up listener in that case and acknowledge completion of restore operation to client. clusterService.removeListener(this); listener.onResponse(new RestoreSnapshotResponse((RestoreInfo) null)); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 33d4ac5d50347..1e29a70e1f41f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -221,8 +221,8 @@ private void buildResponse( // Unlikely edge case: // Data node has finished snapshotting the shard but the cluster state has not yet been updated // to reflect this. We adjust the status to show up as snapshot metadata being written because - // technically if the data node failed before successfully reporting DONE state to master, then - // this shards state would jump to a failed state. + // technically if the data node failed before successfully reporting DONE state to cluster-manager, + // then this shards state would jump to a failed state. shardStatus = new SnapshotIndexShardStatus( shardEntry.key, SnapshotIndexShardStage.FINALIZE, @@ -406,7 +406,7 @@ private SnapshotInfo snapshot(SnapshotsInProgress snapshotsInProgress, String re /** * Returns status of shards currently finished snapshots *

- * This method is executed on master node and it's complimentary to the + * This method is executed on cluster-manager node and it's complimentary to the * {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it * returns similar information but for already finished snapshots. *

diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index d2f053137e446..80d1f7022967d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -108,20 +108,20 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; ClusterStateResponse response = (ClusterStateResponse) o; return waitForTimedOut == response.waitForTimedOut && Objects.equals(clusterName, response.clusterName) && - // Best effort. Only compare cluster state version and master node id, + // Best effort. Only compare cluster state version and cluster-manager node id, // because cluster state doesn't implement equals() Objects.equals(getVersion(clusterState), getVersion(response.clusterState)) - && Objects.equals(getMasterNodeId(clusterState), getMasterNodeId(response.clusterState)); + && Objects.equals(getClusterManagerNodeId(clusterState), getClusterManagerNodeId(response.clusterState)); } @Override public int hashCode() { - // Best effort. Only use cluster state version and master node id, + // Best effort. Only use cluster state version and cluster-manager node id, // because cluster state doesn't implement hashcode() - return Objects.hash(clusterName, getVersion(clusterState), getMasterNodeId(clusterState), waitForTimedOut); + return Objects.hash(clusterName, getVersion(clusterState), getClusterManagerNodeId(clusterState), waitForTimedOut); } - private static String getMasterNodeId(ClusterState clusterState) { + private static String getClusterManagerNodeId(ClusterState clusterState) { if (clusterState == null) { return null; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 42497c5244167..595127d83d4bf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -138,7 +138,7 @@ public void onNewClusterState(ClusterState newState) { } else { listener.onFailure( new NotMasterException( - "master stepped down waiting for metadata version " + request.waitForMetadataVersion() + "cluster-manager stepped down waiting for metadata version " + request.waitForMetadataVersion() ) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 7607a2ef70980..01d4d5ac0fb53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -85,7 +85,7 @@ public NodeStats nodeStats() { } /** - * Cluster Health Status, only populated on master nodes. + * Cluster Health Status, only populated on cluster-manager nodes. */ @Nullable public ClusterHealthStatus clusterStatus() { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index 1470f252756a5..172159a1efe5b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -60,7 +60,7 @@ public class ClusterStatsResponse extends BaseNodesResponse *
  • A user overflows the index graveyard by deleting more than 500 indices while a node is offline and then the node rejoins the * cluster
  • - *
  • A node (unsafely) moves from one cluster to another, perhaps because the original cluster lost all its master nodes
  • + *
  • A node (unsafely) moves from one cluster to another, perhaps because the original cluster lost all its cluster-manager nodes
  • *
  • A user (unsafely) meddles with the contents of the data path, maybe restoring an old index folder from a backup
  • *
  • A disk partially fails and the user has no replicas and no snapshots and wants to (unsafely) recover whatever they can
  • - *
  • A cluster loses all master nodes and those are (unsafely) restored from backup, but the backup does not contain the index
  • + *
  • A cluster loses all cluster-manager nodes and those are (unsafely) restored from backup, but the backup does not contain the index
  • * * *

    The classes in this package form an API for managing dangling indices, allowing them to be listed, imported or deleted. diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java index da8833fe49a29..17bfa082295af 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java @@ -61,7 +61,7 @@ enum ItemProcessingState { TRANSLATED, /** * the request can not execute with the current mapping and should wait for a new mapping - * to arrive from the master. A mapping request for the needed changes has already been + * to arrive from the cluster-manager. A mapping request for the needed changes has already been * submitted */ WAIT_FOR_MAPPING_UPDATE, @@ -144,7 +144,7 @@ public boolean isOperationExecuted() { return currentItemState == ItemProcessingState.EXECUTED; } - /** returns true if the request needs to wait for a mapping update to arrive from the master */ + /** returns true if the request needs to wait for a mapping update to arrive from the cluster-manager */ public boolean requiresWaitingForMappingUpdate() { return currentItemState == ItemProcessingState.WAIT_FOR_MAPPING_UPDATE; } @@ -216,7 +216,7 @@ public > T getRequestToExecute() { return (T) requestToExecute; } - /** indicates that the current operation can not be completed and needs to wait for a new mapping from the master */ + /** indicates that the current operation can not be completed and needs to wait for a new mapping from the cluster-manager */ public void markAsRequiringMappingUpdate() { assert assertInvariants(ItemProcessingState.TRANSLATED); currentItemState = ItemProcessingState.WAIT_FOR_MAPPING_UPDATE; diff --git a/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java b/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java index c0eb29e4c112f..ebfe82eb6ed38 100644 --- a/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java +++ b/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java @@ -39,7 +39,7 @@ public interface MappingUpdatePerformer { /** - * Update the mappings on the master. + * Update the mappings on the cluster-manager. */ void updateMappings(Mapping update, ShardId shardId, ActionListener listener); diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index cc9f20b7aa256..5311186fee0dc 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -621,7 +621,7 @@ private static Engine.Result performOpOnReplica( throw new IllegalStateException("Unexpected request operation type on replica: " + docWriteRequest.opType().getLowercase()); } if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - // Even though the primary waits on all nodes to ack the mapping changes to the master + // Even though the primary waits on all nodes to ack the mapping changes to the cluster-manager // (see MappingUpdatedAction.updateMappingOnMaster) we still need to protect against missing mappings // and wait for them. The reason is concurrent requests. Request r1 which has new field f triggers a // mapping update. Assume that that update is first applied on the primary, and only later on the replica diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index f09701c7769eb..2e506c6fe181b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -49,11 +49,6 @@ public SearchShardTask(long id, String type, String action, String description, super(id, type, action, description, parentTaskId, headers); } - @Override - public boolean supportsResourceTracking() { - return true; - } - @Override public boolean shouldCancelChildrenOnCancellation() { return false; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index bf6f141a3e829..7f80f7836be6c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -78,11 +78,6 @@ public final String getDescription() { return descriptionSupplier.get(); } - @Override - public boolean supportsResourceTracking() { - return true; - } - /** * Attach a {@link SearchProgressListener} to this task. */ diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index 83fca715c7e28..84ece8cfec530 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionResponse; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskId; @@ -89,39 +88,31 @@ public final Task execute(Request request, ActionListener listener) { */ final Releasable unregisterChildNode = registerChildNode(request.getParentTask()); final Task task; - try { task = taskManager.register("transport", actionName, request); } catch (TaskCancelledException e) { unregisterChildNode.close(); throw e; } - - ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); - try { - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(response); - } + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(response); } + } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(e); - } + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(e); } - }); - } finally { - storedContext.close(); - } - + } + }); return task; } @@ -138,30 +129,25 @@ public final Task execute(Request request, TaskListener listener) { unregisterChildNode.close(); throw e; } - ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); - try { - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(task, response); - } + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(task, response); } + } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(task, e); - } + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(task, e); } - }); - } finally { - storedContext.close(); - } + } + }); return task; } diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 3950f3a9fef77..6eb3c7a0cfe89 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -310,9 +310,9 @@ protected AsyncAction(Task task, Request request, ActionListener liste for (ShardRouting shard : shardIt) { // send a request to the shard only if it is assigned to a node that is in the local node's cluster state // a scenario in which a shard can be assigned but to a node that is not in the local node's cluster state - // is when the shard is assigned to the master node, the local node has detected the master as failed - // and a new master has not yet been elected; in this situation the local node will have removed the - // master node from the local cluster state, but the shards assigned to the master will still be in the + // is when the shard is assigned to the cluster-manager node, the local node has detected the cluster-manager as failed + // and a new cluster-manager has not yet been elected; in this situation the local node will have removed the + // cluster-manager node from the local cluster state, but the shards assigned to the cluster-manager will still be in the // routing table as such if (shard.assignedToNode() && nodes.get(shard.currentNodeId()) != null) { String nodeId = shard.currentNodeId(); diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java index 9337e646cebea..daed5a09bb0f3 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java @@ -36,7 +36,7 @@ import org.opensearch.common.unit.TimeValue; /** - * Base request builder for master node operations that support acknowledgements + * Base request builder for cluster-manager node operations that support acknowledgements */ public abstract class AcknowledgedRequestBuilder< Request extends AcknowledgedRequest, diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java index b39ec5fe4cc6b..98996e222b30a 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java @@ -39,7 +39,7 @@ import org.opensearch.common.unit.TimeValue; /** - * Base request builder for master node operations + * Base request builder for cluster-manager node operations */ public abstract class MasterNodeOperationRequestBuilder< Request extends MasterNodeRequest, @@ -53,7 +53,7 @@ protected MasterNodeOperationRequestBuilder(OpenSearchClient client, ActionType< } /** - * Sets the master node timeout in case the master has not yet been discovered. + * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. */ @SuppressWarnings("unchecked") public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) { @@ -62,7 +62,7 @@ public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) { } /** - * Sets the master node timeout in case the master has not yet been discovered. + * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. */ @SuppressWarnings("unchecked") public final RequestBuilder setMasterNodeTimeout(String timeout) { diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java index add5c5177df42..99e6b37debd8f 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java @@ -37,7 +37,7 @@ import org.opensearch.client.OpenSearchClient; /** - * Base request builder for master node read operations that can be executed on the local node as well + * Base request builder for cluster-manager node read operations that can be executed on the local node as well */ public abstract class MasterNodeReadOperationRequestBuilder< Request extends MasterNodeReadRequest, diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java index eeafa148ca7c3..9842c47652a97 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java @@ -38,7 +38,7 @@ import java.io.IOException; /** - * Base request for master based read operations that allows to read the cluster state from the local node if needed + * Base request for cluster-manager based read operations that allows to read the cluster state from the local node if needed */ public abstract class MasterNodeReadRequest> extends MasterNodeRequest { @@ -64,9 +64,9 @@ public final Request local(boolean local) { } /** - * Return local information, do not retrieve the state from master node (default: false). + * Return local information, do not retrieve the state from cluster-manager node (default: false). * @return true if local information is to be returned; - * false if information is to be retrieved from master node (default). + * false if information is to be retrieved from cluster-manager node (default). */ public final boolean local() { return local; diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java index d5be6c48e23b8..f7ea962f7c4a1 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java @@ -40,7 +40,7 @@ import java.io.IOException; /** - * A based request for master based operation. + * A based request for cluster-manager based operation. */ public abstract class MasterNodeRequest> extends ActionRequest { @@ -62,7 +62,7 @@ public void writeTo(StreamOutput out) throws IOException { } /** - * A timeout value in case the master has not been discovered yet or disconnected. + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. */ @SuppressWarnings("unchecked") public final Request masterNodeTimeout(TimeValue timeout) { @@ -71,7 +71,7 @@ public final Request masterNodeTimeout(TimeValue timeout) { } /** - * A timeout value in case the master has not been discovered yet or disconnected. + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. */ public final Request masterNodeTimeout(String timeout) { return masterNodeTimeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".masterNodeTimeout")); diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java index 62d08c23534af..083bea079174c 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java @@ -67,7 +67,7 @@ import java.util.function.Predicate; /** - * A base class for operations that needs to be performed on the master node. + * A base class for operations that needs to be performed on the cluster-manager node. */ public abstract class TransportMasterNodeAction, Response extends ActionResponse> extends HandledTransportAction { @@ -198,13 +198,13 @@ protected void doStart(ClusterState clusterState) { } } else { if (nodes.getMasterNode() == null) { - logger.debug("no known master node, scheduling a retry"); + logger.debug("no known cluster-manager node, scheduling a retry"); retryOnMasterChange(clusterState, null); } else { - DiscoveryNode masterNode = nodes.getMasterNode(); - final String actionName = getMasterActionName(masterNode); + DiscoveryNode clusterManagerNode = nodes.getMasterNode(); + final String actionName = getMasterActionName(clusterManagerNode); transportService.sendRequest( - masterNode, + clusterManagerNode, actionName, request, new ActionListenerResponseHandler(listener, TransportMasterNodeAction.this::read) { @@ -213,7 +213,7 @@ public void handleException(final TransportException exp) { Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { - // we want to retry here a bit to see if a new master is elected + // we want to retry here a bit to see if a new cluster-manager is elected logger.debug( "connection exception while trying to forward request with action name [{}] to " + "master node [{}], scheduling a retry. Error: [{}]", @@ -279,7 +279,7 @@ public void onTimeout(TimeValue timeout) { } /** - * Allows to conditionally return a different master node action name in the case an action gets renamed. + * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. * This mainly for backwards compatibility should be used rarely */ protected String getMasterActionName(DiscoveryNode node) { diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java index b230901eb456e..b8be63dd6564b 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java @@ -41,7 +41,7 @@ import org.opensearch.transport.TransportService; /** - * A base class for read operations that needs to be performed on the master node. + * A base class for read operations that needs to be performed on the cluster-manager node. * Can also be executed on the local node if needed. */ public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java index e91a659d331d1..27d9cb8b6c002 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java @@ -39,7 +39,7 @@ import java.io.IOException; -// TODO: this class can be removed in master once 7.x is bumped to 7.4.0 +// TODO: this class can be removed in main once 7.x is bumped to 7.4.0 public abstract class BaseNodeRequest extends TransportRequest { public BaseNodeRequest() {} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 68c5416f3603e..f7fd6acf8be23 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -263,7 +263,8 @@ public void onFailure(Exception replicaException) { ), replicaException ); - // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. + // Only report "critical" exceptions + // TODO: Reach out to the cluster-manager node to get the latest shard state then report. if (TransportActions.isShardNotAvailableException(replicaException) == false) { RestStatus restStatus = ExceptionsHelper.status(replicaException); shardReplicaFailures.add( diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 387c0d24ed4df..e554ebc0f8414 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -154,7 +154,7 @@ protected void doExecute(Task task, final UpdateRequest request, final ActionLis request.index() ); } - // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API + // if we don't have a master, we don't have metadata, that's fine, let it find a cluster-manager using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { client.admin() .indices() diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index db54016a61f6d..387a27da46820 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -248,7 +248,7 @@ public boolean nodesChanged() { * Determines whether or not the current cluster state represents an entirely * new cluster, either when a node joins a cluster for the first time or when * the node receives a cluster state update from a brand new cluster (different - * UUID from the previous cluster), which will happen when a master node is + * UUID from the previous cluster), which will happen when a cluster-manager node is * elected that has never been part of the cluster before. */ public boolean isNewCluster() { @@ -260,10 +260,10 @@ public boolean isNewCluster() { // Get the deleted indices by comparing the index metadatas in the previous and new cluster states. // If an index exists in the previous cluster state, but not in the new cluster state, it must have been deleted. private List indicesDeletedFromClusterState() { - // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected - // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; + // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected cluster-manager + // that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous - // cluster UUID, in which case, we don't want to delete indices that the master erroneously believes shouldn't exist. + // cluster UUID, in which case, we don't want to delete indices that the cluster-manager erroneously believes shouldn't exist. // See test DiscoveryWithServiceDisruptionsIT.testIndicesDeleted() // See discussion on https://github.com/elastic/elasticsearch/pull/9952 and // https://github.com/elastic/elasticsearch/issues/11665 diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index b9f3a2a99f0b7..c85691b80d7c3 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -94,7 +94,6 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -395,7 +394,6 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); - bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 459c0b9502acf..3eaac99bad998 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -81,7 +81,7 @@ *

    * The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is * built on demand from the {@link RoutingTable}. - * The cluster state can be updated only on the master node. All updates are performed by on a + * The cluster state can be updated only on the cluster-manager node. All updates are performed by on a * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on @@ -169,7 +169,7 @@ default boolean isPrivate() { private final boolean wasReadFromDiff; - private final int minimumMasterNodesOnPublishingMaster; + private final int minimumClusterManagerNodesOnPublishingClusterManager; // built on demand private volatile RoutingNodes routingNodes; @@ -198,7 +198,7 @@ public ClusterState( DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, - int minimumMasterNodesOnPublishingMaster, + int minimumClusterManagerNodesOnPublishingClusterManager, boolean wasReadFromDiff ) { this.version = version; @@ -209,7 +209,7 @@ public ClusterState( this.nodes = nodes; this.blocks = blocks; this.customs = customs; - this.minimumMasterNodesOnPublishingMaster = minimumMasterNodesOnPublishingMaster; + this.minimumClusterManagerNodesOnPublishingClusterManager = minimumClusterManagerNodesOnPublishingClusterManager; this.wasReadFromDiff = wasReadFromDiff; } @@ -226,8 +226,9 @@ public long getVersion() { } public long getVersionOrMetadataVersion() { - // When following a Zen1 master, the cluster state version is not guaranteed to increase, so instead it is preferable to use the - // metadata version to determine the freshest node. However when following a Zen2 master the cluster state version should be used. + // When following a Zen1 cluster-manager, the cluster state version is not guaranteed to increase, + // so instead it is preferable to use the metadata version to determine the freshest node. + // However when following a Zen2 cluster-manager the cluster state version should be used. return term() == ZEN1_BWC_TERM ? metadata().version() : version(); } @@ -388,7 +389,7 @@ public String toString() { } /** - * a cluster state supersedes another state if they are from the same master and the version of this state is higher than that of the + * a cluster state supersedes another state if they are from the same cluster-manager and the version of this state is higher than that of the * other state. *

    * In essence that means that all the changes from the other cluster state are also reflected by the current one @@ -590,7 +591,7 @@ public static class Builder { private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; private boolean fromDiff; - private int minimumMasterNodesOnPublishingMaster = -1; + private int minimumClusterManagerNodesOnPublishingClusterManager = -1; public Builder(ClusterState state) { this.clusterName = state.clusterName; @@ -601,7 +602,7 @@ public Builder(ClusterState state) { this.metadata = state.metadata(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); - this.minimumMasterNodesOnPublishingMaster = state.minimumMasterNodesOnPublishingMaster; + this.minimumClusterManagerNodesOnPublishingClusterManager = state.minimumClusterManagerNodesOnPublishingClusterManager; this.fromDiff = false; } @@ -662,8 +663,8 @@ public Builder stateUUID(String uuid) { return this; } - public Builder minimumMasterNodesOnPublishingMaster(int minimumMasterNodesOnPublishingMaster) { - this.minimumMasterNodesOnPublishingMaster = minimumMasterNodesOnPublishingMaster; + public Builder minimumClusterManagerNodesOnPublishingClusterManager(int minimumClusterManagerNodesOnPublishingClusterManager) { + this.minimumClusterManagerNodesOnPublishingClusterManager = minimumClusterManagerNodesOnPublishingClusterManager; return this; } @@ -701,7 +702,7 @@ public ClusterState build() { nodes, blocks, customs.build(), - minimumMasterNodesOnPublishingMaster, + minimumClusterManagerNodesOnPublishingClusterManager, fromDiff ); } @@ -746,7 +747,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - builder.minimumMasterNodesOnPublishingMaster = in.readVInt(); + builder.minimumClusterManagerNodesOnPublishingClusterManager = in.readVInt(); return builder.build(); } @@ -772,7 +773,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(cursor.value); } } - out.writeVInt(minimumMasterNodesOnPublishingMaster); + out.writeVInt(minimumClusterManagerNodesOnPublishingClusterManager); } private static class ClusterStateDiff implements Diff { @@ -795,7 +796,7 @@ private static class ClusterStateDiff implements Diff { private final Diff> customs; - private final int minimumMasterNodesOnPublishingMaster; + private final int minimumClusterManagerNodesOnPublishingClusterManager; ClusterStateDiff(ClusterState before, ClusterState after) { fromUuid = before.stateUUID; @@ -807,7 +808,7 @@ private static class ClusterStateDiff implements Diff { metadata = after.metadata.diff(before.metadata); blocks = after.blocks.diff(before.blocks); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - minimumMasterNodesOnPublishingMaster = after.minimumMasterNodesOnPublishingMaster; + minimumClusterManagerNodesOnPublishingClusterManager = after.minimumClusterManagerNodesOnPublishingClusterManager; } ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException { @@ -820,7 +821,7 @@ private static class ClusterStateDiff implements Diff { metadata = Metadata.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - minimumMasterNodesOnPublishingMaster = in.readVInt(); + minimumClusterManagerNodesOnPublishingClusterManager = in.readVInt(); } @Override @@ -834,7 +835,7 @@ public void writeTo(StreamOutput out) throws IOException { metadata.writeTo(out); blocks.writeTo(out); customs.writeTo(out); - out.writeVInt(minimumMasterNodesOnPublishingMaster); + out.writeVInt(minimumClusterManagerNodesOnPublishingClusterManager); } @Override @@ -854,7 +855,7 @@ public ClusterState apply(ClusterState state) { builder.metadata(metadata.apply(state.metadata)); builder.blocks(blocks.apply(state.blocks)); builder.customs(customs.apply(state.customs)); - builder.minimumMasterNodesOnPublishingMaster(minimumMasterNodesOnPublishingMaster); + builder.minimumClusterManagerNodesOnPublishingClusterManager(minimumClusterManagerNodesOnPublishingClusterManager); builder.fromDiff(true); return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java index 5d55ce70aec02..4f3372b4e9069 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java @@ -187,7 +187,7 @@ public void waitForNextChange(Listener listener, Predicate statePr // sample a new state. This state maybe *older* than the supplied state if we are called from an applier, // which wants to wait for something else to happen ClusterState newState = clusterApplierService.state(); - if (lastObservedState.get().isOlderOrDifferentMaster(newState) && statePredicate.test(newState)) { + if (lastObservedState.get().isOlderOrDifferentClusterManager(newState) && statePredicate.test(newState)) { // good enough, let's go. logger.trace("observer: sampled state accepted by predicate ({})", newState); lastObservedState.set(new StoredState(newState)); @@ -241,7 +241,7 @@ public void postAdded() { return; } ClusterState newState = clusterApplierService.state(); - if (lastObservedState.get().isOlderOrDifferentMaster(newState) && context.statePredicate.test(newState)) { + if (lastObservedState.get().isOlderOrDifferentClusterManager(newState) && context.statePredicate.test(newState)) { // double check we're still listening if (observingContext.compareAndSet(context, null)) { logger.trace("observer: post adding listener: accepting current cluster state ({})", newState); @@ -295,22 +295,23 @@ public String toString() { } /** - * The observer considers two cluster states to be the same if they have the same version and master node id (i.e. null or set) + * The observer considers two cluster states to be the same if they have the same version and cluster-manager node id (i.e. null or set) */ private static class StoredState { - private final String masterNodeId; + private final String clusterManagerNodeId; private final long version; StoredState(ClusterState clusterState) { - this.masterNodeId = clusterState.nodes().getMasterNodeId(); + this.clusterManagerNodeId = clusterState.nodes().getMasterNodeId(); this.version = clusterState.version(); } /** - * returns true if stored state is older then given state or they are from a different master, meaning they can't be compared + * returns true if stored state is older then given state or they are from a different cluster-manager, meaning they can't be compared * */ - public boolean isOlderOrDifferentMaster(ClusterState clusterState) { - return version < clusterState.version() || Objects.equals(masterNodeId, clusterState.nodes().getMasterNodeId()) == false; + public boolean isOlderOrDifferentClusterManager(ClusterState clusterState) { + return version < clusterState.version() + || Objects.equals(clusterManagerNodeId, clusterState.nodes().getMasterNodeId()) == false; } } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 04002b31a8b3e..48d3dd7d03cb5 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -45,7 +45,7 @@ public interface ClusterStateTaskExecutor { ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception; /** - * indicates whether this executor should only run if the current node is master + * indicates whether this executor should only run if the current node is cluster-manager */ default boolean runOnlyOnMaster() { return true; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 718df33f8a2d2..d5b9eebbc3b5d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -43,11 +43,11 @@ public interface ClusterStateTaskListener { void onFailure(String source, Exception e); /** - * called when the task was rejected because the local node is no longer master. + * called when the task was rejected because the local node is no longer cluster-manager. * Used only for tasks submitted to {@link MasterService}. */ default void onNoLongerMaster(String source) { - onFailure(source, new NotMasterException("no longer master. source: [" + source + "]")); + onFailure(source, new NotMasterException("no longer cluster-manager. source: [" + source + "]")); } /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 72d72158a5f0b..9393663b309fc 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -101,7 +101,7 @@ public Priority priority() { } /** - * Marked as final as cluster state update tasks should only run on master. + * Marked as final as cluster state update tasks should only run on cluster-manager. * For local requests, use {@link LocalClusterUpdateTask} instead. */ @Override diff --git a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java index 06ed0c0580e2f..ffcd63b3b57c1 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java @@ -38,7 +38,7 @@ import java.util.List; /** - * Used to apply state updates on nodes that are not necessarily master + * Used to apply state updates on nodes that are not necessarily cluster-manager */ public abstract class LocalClusterUpdateTask implements diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index 1c35f7bbbe8a1..d4456b379237c 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -32,29 +32,29 @@ package org.opensearch.cluster; /** - * Enables listening to master changes events of the local node (when the local node becomes the master, and when the local - * node cease being a master). + * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local + * node cease being a cluster-manager). */ public interface LocalNodeMasterListener extends ClusterStateListener { /** - * Called when local node is elected to be the master + * Called when local node is elected to be the cluster-manager */ - void onMaster(); + void onClusterManager(); /** - * Called when the local node used to be the master, a new master was elected and it's no longer the local node. + * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. */ - void offMaster(); + void offClusterManager(); @Override default void clusterChanged(ClusterChangedEvent event) { - final boolean wasMaster = event.previousState().nodes().isLocalNodeElectedMaster(); - final boolean isMaster = event.localNodeMaster(); - if (wasMaster == false && isMaster) { - onMaster(); - } else if (wasMaster && isMaster == false) { - offMaster(); + final boolean wasClusterManager = event.previousState().nodes().isLocalNodeElectedMaster(); + final boolean isClusterManager = event.localNodeMaster(); + if (wasClusterManager == false && isClusterManager) { + onClusterManager(); + } else if (wasClusterManager && isClusterManager == false) { + offClusterManager(); } } } diff --git a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java index 9d11fb84af801..8eeaedd83cb26 100644 --- a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java +++ b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java @@ -48,14 +48,14 @@ private MasterNodeChangePredicate() { */ public static Predicate build(ClusterState currentState) { final long currentVersion = currentState.version(); - final DiscoveryNode masterNode = currentState.nodes().getMasterNode(); - final String currentMasterId = masterNode == null ? null : masterNode.getEphemeralId(); + final DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); + final String currentMasterId = clusterManagerNode == null ? null : clusterManagerNode.getEphemeralId(); return newState -> { - final DiscoveryNode newMaster = newState.nodes().getMasterNode(); + final DiscoveryNode newClusterManager = newState.nodes().getMasterNode(); final boolean accept; - if (newMaster == null) { + if (newClusterManager == null) { accept = false; - } else if (newMaster.getEphemeralId().equals(currentMasterId) == false) { + } else if (newClusterManager.getEphemeralId().equals(currentMasterId) == false) { accept = true; } else { accept = newState.version() > currentVersion; diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 02139ea21b483..696df6278dbb6 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -69,7 +69,7 @@ * This component is responsible for maintaining connections from this node to all the nodes listed in the cluster state, and for * disconnecting from nodes once they are removed from the cluster state. It periodically checks that all connections are still open and * restores them if needed. Note that this component is *not* responsible for removing nodes from the cluster state if they disconnect or - * are unresponsive: this is the job of the master's fault detection components, particularly {@link FollowersChecker}. + * are unresponsive: this is the job of the cluster-manager's fault detection components, particularly {@link FollowersChecker}. *

    * The {@link NodeConnectionsService#connectToNodes(DiscoveryNodes, Runnable)} and {@link * NodeConnectionsService#disconnectFromNodesExcept(DiscoveryNodes)} methods are called on the {@link ClusterApplier} thread. This component diff --git a/server/src/main/java/org/opensearch/cluster/NotMasterException.java b/server/src/main/java/org/opensearch/cluster/NotMasterException.java index 61772aa13233b..c8ec32ed77eb9 100644 --- a/server/src/main/java/org/opensearch/cluster/NotMasterException.java +++ b/server/src/main/java/org/opensearch/cluster/NotMasterException.java @@ -37,9 +37,9 @@ import java.io.IOException; /** - * Thrown when a node join request or a master ping reaches a node which is not - * currently acting as a master or when a cluster state update task is to be executed - * on a node that is no longer master. + * Thrown when a node join request or a cluster-manager ping reaches a node which is not + * currently acting as a cluster-manager or when a cluster state update task is to be executed + * on a node that is no longer cluster-manager. */ public class NotMasterException extends OpenSearchException { diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 2c001833f46ce..d0d5aea9d036b 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -297,9 +297,9 @@ private Entry(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { version = Version.readVersion(in); } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { - // If an older master informs us that shard generations are supported we use the minimum shard generation compatible - // version. If shard generations are not supported yet we use a placeholder for a version that does not use shard - // generations. + // If an older cluster-manager informs us that shard generations are supported + // we use the minimum shard generation compatible version. + // If shard generations are not supported yet we use a placeholder for a version that does not use shard generations. version = in.readBoolean() ? SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION : SnapshotsService.OLD_SNAPSHOT_FORMAT; } else { version = SnapshotsService.OLD_SNAPSHOT_FORMAT; diff --git a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java index 23a9ed16e35d2..97d628e3231c9 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java @@ -45,7 +45,7 @@ public interface AckedRequest { TimeValue ackTimeout(); /** - * Returns the timeout for the request to be completed on the master node + * Returns the timeout for the request to be completed on the cluster-manager node */ TimeValue masterNodeTimeout(); } diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java index d142c28086f70..0931086ab3ff0 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java @@ -62,7 +62,7 @@ public T ackTimeout(TimeValue ackTimeout) { /** * Returns the maximum time interval to wait for the request to - * be completed on the master node + * be completed on the cluster-manager node */ @Override public TimeValue masterNodeTimeout() { @@ -70,7 +70,7 @@ public TimeValue masterNodeTimeout() { } /** - * Sets the master node timeout + * Sets the cluster-manager node timeout */ @SuppressWarnings("unchecked") public T masterNodeTimeout(TimeValue masterNodeTimeout) { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index f22d489ec6fd7..cf1f2d3141ccd 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -104,10 +104,10 @@ public void setClient(Client client) { } /** - * Update mappings on the master node, waiting for the change to be committed, + * Update mappings on the cluster-manager node, waiting for the change to be committed, * but not for the mapping update to be applied on all nodes. The timeout specified by - * {@code timeout} is the master node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), - * potentially waiting for a master node to be available. + * {@code timeout} is the cluster-manager node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), + * potentially waiting for a cluster-manager node to be available. */ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java index 23ce218904d21..b40665a1bcf1b 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java @@ -74,12 +74,12 @@ public NodeMappingRefreshAction(TransportService transportService, MetadataMappi ); } - public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { - if (masterNode == null) { - logger.warn("can't send mapping refresh for [{}], no master known.", request.index()); + public void nodeMappingRefresh(final DiscoveryNode clusterManagerNode, final NodeMappingRefreshRequest request) { + if (clusterManagerNode == null) { + logger.warn("can't send mapping refresh for [{}], no cluster-manager known.", request.index()); return; } - transportService.sendRequest(masterNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); + transportService.sendRequest(clusterManagerNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index 300067587b78b..fd6a5367146a4 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -177,14 +177,14 @@ private void sendShardAction( final ActionListener listener ) { ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); - DiscoveryNode masterNode = currentState.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); Predicate changePredicate = MasterNodeChangePredicate.build(currentState); - if (masterNode == null) { - logger.warn("no master known for action [{}] for shard entry [{}]", actionName, request); - waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); + if (clusterManagerNode == null) { + logger.warn("no cluster-manager known for action [{}] for shard entry [{}]", actionName, request); + waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); } else { - logger.debug("sending [{}] to [{}] for shard entry [{}]", actionName, masterNode.getId(), request); - transportService.sendRequest(masterNode, actionName, request, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + logger.debug("sending [{}] to [{}] for shard entry [{}]", actionName, clusterManagerNode.getId(), request); + transportService.sendRequest(clusterManagerNode, actionName, request, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(TransportResponse.Empty response) { listener.onResponse(null); @@ -192,14 +192,14 @@ public void handleResponse(TransportResponse.Empty response) { @Override public void handleException(TransportException exp) { - if (isMasterChannelException(exp)) { - waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); + if (isClusterManagerChannelException(exp)) { + waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); } else { logger.warn( new ParameterizedMessage( "unexpected failure while sending request [{}]" + " to [{}] for shard entry [{}]", actionName, - masterNode, + clusterManagerNode, request ), exp @@ -217,17 +217,17 @@ public void handleException(TransportException exp) { } } - private static Class[] MASTER_CHANNEL_EXCEPTIONS = new Class[] { + private static Class[] CLUSTER_MANAGER_CHANNEL_EXCEPTIONS = new Class[] { NotMasterException.class, ConnectTransportException.class, FailedToCommitClusterStateException.class }; - private static boolean isMasterChannelException(TransportException exp) { - return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null; + private static boolean isClusterManagerChannelException(TransportException exp) { + return ExceptionsHelper.unwrap(exp, CLUSTER_MANAGER_CHANNEL_EXCEPTIONS) != null; } /** - * Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node. This means + * Send a shard failed request to the cluster-manager node to update the cluster state with the failure of a shard on another node. This means * that the shard should be failed because a write made it into the primary but was not replicated to this shard copy. If the shard * does not exist anymore but still has an entry in the in-sync set, remove its allocation id from the in-sync set. * @@ -261,7 +261,7 @@ int remoteShardFailedCacheSize() { } /** - * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. + * Send a shard failed request to the cluster-manager node to update the cluster state when a shard on the local node failed. */ public void localShardFailed( final ShardRouting shardRouting, @@ -273,7 +273,7 @@ public void localShardFailed( } /** - * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. + * Send a shard failed request to the cluster-manager node to update the cluster state when a shard on the local node failed. */ public void localShardFailed( final ShardRouting shardRouting, @@ -294,7 +294,7 @@ public void localShardFailed( } // visible for testing - protected void waitForNewMasterAndRetry( + protected void waitForNewClusterManagerAndRetry( String actionName, ClusterStateObserver observer, TransportRequest request, @@ -305,7 +305,7 @@ protected void waitForNewMasterAndRetry( @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election for shard entry [{}]", state, request); + logger.trace("new cluster state [{}] after waiting for cluster-manager election for shard entry [{}]", state, request); } sendShardAction(actionName, state, request, listener); } @@ -318,7 +318,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - // we wait indefinitely for a new master + // we wait indefinitely for a new cluster-manager assert false; } }, changePredicate); @@ -376,13 +376,13 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - logger.error("{} no longer master while failing shard [{}]", request.shardId, request); + logger.error("{} no longer cluster-manager while failing shard [{}]", request.shardId, request); try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { logger.warn( () -> new ParameterizedMessage( - "{} failed to send no longer master while failing shard [{}]", + "{} failed to send no longer cluster-manager while failing shard [{}]", request.shardId, request ), @@ -714,7 +714,8 @@ public ClusterTasksResult execute(ClusterState currentState, if (matched == null) { // tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started // events on every cluster state publishing that does not contain the shard as started yet. This means that old stale - // requests might still be in flight even after the shard has already been started or failed on the master. We just + // requests might still be in flight even after the shard has already been started or failed on the cluster-manager. We + // just // ignore these requests for now. logger.debug("{} ignoring shard started task [{}] (shard does not exist anymore)", task.shardId, task); builder.success(task); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java index 40d6375d8d916..2ace3e86b31de 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java @@ -38,7 +38,7 @@ import java.io.IOException; /** - * A master node sends this request to its peers to inform them that it could commit the + * A cluster-manager node sends this request to its peers to inform them that it could commit the * cluster state with the given term and version. Peers that have accepted the given cluster * state will then consider it as committed and proceed to apply the state locally. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java index c7708a54f9031..979b36110b6a3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java @@ -141,11 +141,11 @@ public ClusterBootstrapService( bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings)); unconfiguredBootstrapTimeout = null; } else { - final List initialMasterNodes = INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings); - bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); - if (bootstrapRequirements.size() != initialMasterNodes.size()) { + final List initialClusterManagerNodes = INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialClusterManagerNodes)); + if (bootstrapRequirements.size() != initialClusterManagerNodes.size()) { throw new IllegalArgumentException( - "setting [" + initialClusterManagerSettingName + "] contains duplicates: " + initialMasterNodes + "setting [" + initialClusterManagerSettingName + "] contains duplicates: " + initialClusterManagerNodes ); } unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 47a18d5be1ec4..ef35c6f8b3249 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -39,8 +39,8 @@ public interface ClusterStatePublisher { /** - * Publish all the changes to the cluster from the master (can be called just by the master). The publish - * process should apply this state to the master as well! + * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish + * process should apply this state to the cluster-manager as well! * * The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing. * The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index b28fde5d9cc16..9713c841caaf7 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -610,7 +610,8 @@ default void markLastAcceptedStateAsCommitted() { metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); metadataBuilder.coordinationMetadata(coordinationMetadata); } - // if we receive a commit from a Zen1 master that has not recovered its state yet, the cluster uuid might not been known yet. + // if we receive a commit from a Zen1 cluster-manager that has not recovered its state yet, + // the cluster uuid might not been known yet. assert lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false || lastAcceptedState.term() == ZEN1_BWC_TERM : "received cluster state with empty cluster uuid but not Zen1 BWC term: " + lastAcceptedState; @@ -622,7 +623,8 @@ default void markLastAcceptedStateAsCommitted() { metadataBuilder.clusterUUIDCommitted(true); if (lastAcceptedState.term() != ZEN1_BWC_TERM) { - // Zen1 masters never publish a committed cluster UUID so if we logged this it'd happen on on every update. Let's just + // Zen1 cluster-managers never publish a committed cluster UUID so if we logged this it'd happen on on every update. + // Let's just // not log it at all in a 6.8/7.x rolling upgrade. logger.info("cluster UUID set to [{}]", lastAcceptedState.metadata().clusterUUID()); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 89e5b9b4cfbcc..ef578300cdbe2 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -136,12 +136,12 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final boolean singleNodeDiscovery; private final ElectionStrategy electionStrategy; private final TransportService transportService; - private final MasterService masterService; + private final MasterService clusterManagerService; private final AllocationService allocationService; private final JoinHelper joinHelper; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final Supplier persistedStateSupplier; - private final NoMasterBlockService noMasterBlockService; + private final NoMasterBlockService noClusterManagerBlockService; final Object mutex = new Object(); // package-private to allow tests to call methods that assert that the mutex is held private final SetOnce coordinationState = new SetOnce<>(); // initialized on start-up (see doStart) private volatile ClusterState applierState; // the state that should be exposed to the cluster state applier @@ -186,7 +186,7 @@ public Coordinator( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, - MasterService masterService, + MasterService clusterManagerService, Supplier persistedStateSupplier, SeedHostsProvider seedHostsProvider, ClusterApplier clusterApplier, @@ -198,7 +198,7 @@ public Coordinator( ) { this.settings = settings; this.transportService = transportService; - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); @@ -206,10 +206,10 @@ public Coordinator( this.joinHelper = new JoinHelper( settings, allocationService, - masterService, + clusterManagerService, transportService, this::getCurrentTerm, - this::getStateForMasterService, + this::getStateForClusterManagerService, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators, @@ -217,7 +217,7 @@ public Coordinator( nodeHealthService ); this.persistedStateSupplier = persistedStateSupplier; - this.noMasterBlockService = new NoMasterBlockService(settings, clusterSettings); + this.noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings); this.lastKnownLeader = Optional.empty(); this.lastJoin = Optional.empty(); this.joinAccumulator = new InitialJoinAccumulator(); @@ -255,7 +255,7 @@ public Coordinator( ); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); this.clusterApplier = clusterApplier; - masterService.setClusterStateSupplier(this::getStateForMasterService); + clusterManagerService.setClusterStateSupplier(this::getStateForClusterManagerService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); this.clusterBootstrapService = new ClusterBootstrapService( settings, @@ -282,7 +282,7 @@ public Coordinator( private ClusterFormationState getClusterFormationState() { return new ClusterFormationState( settings, - getStateForMasterService(), + getStateForClusterManagerService(), peerFinder.getLastResolvedAddresses(), Stream.concat(Stream.of(getLocalNode()), StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false)) .collect(Collectors.toList()), @@ -296,7 +296,7 @@ private void onLeaderFailure(Exception e) { synchronized (mutex) { if (mode != Mode.CANDIDATE) { assert lastKnownLeader.isPresent(); - logger.info(new ParameterizedMessage("master node [{}] failed, restarting discovery", lastKnownLeader.get()), e); + logger.info(new ParameterizedMessage("cluster-manager node [{}] failed, restarting discovery", lastKnownLeader.get()), e); } becomeCandidate("onLeaderFailure"); } @@ -305,7 +305,7 @@ private void onLeaderFailure(Exception e) { private void removeNode(DiscoveryNode discoveryNode, String reason) { synchronized (mutex) { if (mode == Mode.LEADER) { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( "node-left", new NodeRemovalClusterStateTaskExecutor.Task(discoveryNode, reason), ClusterStateTaskConfig.build(Priority.IMMEDIATE), @@ -336,11 +336,11 @@ void onFollowerCheckRequest(FollowerCheckRequest followerCheckRequest) { } else if (mode == Mode.FOLLOWER) { logger.trace("onFollowerCheckRequest: responding successfully to {}", followerCheckRequest); } else if (joinHelper.isJoinPending()) { - logger.trace("onFollowerCheckRequest: rejoining master, responding successfully to {}", followerCheckRequest); + logger.trace("onFollowerCheckRequest: rejoining cluster-manager, responding successfully to {}", followerCheckRequest); } else { - logger.trace("onFollowerCheckRequest: received check from faulty master, rejecting {}", followerCheckRequest); + logger.trace("onFollowerCheckRequest: received check from faulty cluster-manager, rejecting {}", followerCheckRequest); throw new CoordinationStateRejectedException( - "onFollowerCheckRequest: received check from faulty master, rejecting " + followerCheckRequest + "onFollowerCheckRequest: received check from faulty cluster-manager, rejecting " + followerCheckRequest ); } } @@ -352,9 +352,9 @@ private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionList coordinationState.get().handleCommit(applyCommitRequest); final ClusterState committedState = hideStateIfNotRecovered(coordinationState.get().getLastAcceptedState()); - applierState = mode == Mode.CANDIDATE ? clusterStateWithNoMasterBlock(committedState) : committedState; + applierState = mode == Mode.CANDIDATE ? clusterStateWithNoClusterManagerBlock(committedState) : committedState; if (applyCommitRequest.getSourceNode().equals(getLocalNode())) { - // master node applies the committed state at the end of the publication process, not here. + // cluster-manager node applies the committed state at the end of the publication process, not here. applyListener.onResponse(null); } else { clusterApplier.onNewClusterState(applyCommitRequest.toString(), () -> applierState, new ClusterApplyListener() { @@ -423,7 +423,7 @@ && getCurrentTerm() == ZEN1_BWC_TERM } if (publishRequest.getAcceptedState().term() > localState.term()) { - // only do join validation if we have not accepted state from this master yet + // only do join validation if we have not accepted state from this cluster manager yet onJoinValidators.forEach(a -> a.accept(getLocalNode(), publishRequest.getAcceptedState())); } @@ -507,12 +507,12 @@ private void startElection() { } } - private void abdicateTo(DiscoveryNode newMaster) { + private void abdicateTo(DiscoveryNode newClusterManager) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; - assert newMaster.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newMaster; - final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1); - logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm()); + assert newClusterManager.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newClusterManager; + final StartJoinRequest startJoinRequest = new StartJoinRequest(newClusterManager, Math.max(getCurrentTerm(), maxTermSeen) + 1); + logger.info("abdicating to {} with term {}", newClusterManager, startJoinRequest.getTerm()); getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> { if (isZen1Node(node) == false) { joinHelper.sendStartJoinRequest(startJoinRequest, node); @@ -521,7 +521,7 @@ private void abdicateTo(DiscoveryNode newMaster) { // handling of start join messages on the local node will be dispatched to the generic thread-pool assert mode == Mode.LEADER : "should still be leader after sending abdication messages " + mode; // explicitly move node to candidate state so that the next cluster state update task yields an onNoLongerMaster event - becomeCandidate("after abdicating to " + newMaster); + becomeCandidate("after abdicating to " + newClusterManager); } private static boolean localNodeMayWinElection(ClusterState lastAcceptedState) { @@ -580,7 +580,7 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback } transportService.connectToNode(joinRequest.getSourceNode(), ActionListener.wrap(ignore -> { - final ClusterState stateForJoinValidation = getStateForMasterService(); + final ClusterState stateForJoinValidation = getStateForClusterManagerService(); if (stateForJoinValidation.nodes().isLocalNodeElectedMaster()) { onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); @@ -668,11 +668,11 @@ void becomeCandidate(String method) { lagDetector.clearTrackedNodes(); if (prevMode == Mode.LEADER) { - cleanMasterService(); + cleanClusterManagerService(); } if (applierState.nodes().getMasterNodeId() != null) { - applierState = clusterStateWithNoMasterBlock(applierState); + applierState = clusterStateWithNoClusterManagerBlock(applierState); clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, (source, e) -> {}); } } @@ -750,8 +750,8 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { lagDetector.clearTrackedNodes(); } - private void cleanMasterService() { - masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { + private void cleanClusterManagerService() { + clusterManagerService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { @Override public void onFailure(String source, Exception e) { // ignore @@ -833,7 +833,7 @@ protected void doStart() { .blocks( ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) - .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) + .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) .build(); @@ -888,7 +888,7 @@ public void invariant() { + lagDetector.getTrackedNodes(); if (mode == Mode.LEADER) { - final boolean becomingMaster = getStateForMasterService().term() != getCurrentTerm(); + final boolean becomingClusterManager = getStateForClusterManagerService().term() != getCurrentTerm(); assert coordinationState.get().electionWon(); assert lastKnownLeader.isPresent() && lastKnownLeader.get().equals(getLocalNode()); @@ -896,7 +896,8 @@ public void invariant() { assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; - assert becomingMaster || getStateForMasterService().nodes().getMasterNodeId() != null : getStateForMasterService(); + assert becomingClusterManager || getStateForClusterManagerService().nodes().getMasterNodeId() != null + : getStateForClusterManagerService(); assert leaderChecker.leader() == null : leaderChecker.leader(); assert getLocalNode().equals(applierState.nodes().getMasterNode()) || (applierState.nodes().getMasterNodeId() == null && applierState.term() < getCurrentTerm()); @@ -904,8 +905,9 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert clusterFormationFailureHelper.isRunning() == false; final boolean activePublication = currentPublication.map(CoordinatorPublication::isActiveForCurrentLeader).orElse(false); - if (becomingMaster && activePublication == false) { - // cluster state update task to become master is submitted to MasterService, but publication has not started yet + if (becomingClusterManager && activePublication == false) { + // cluster state update task to become cluster-manager is submitted to MasterService, + // but publication has not started yet assert followersChecker.getKnownFollowers().isEmpty() : followersChecker.getKnownFollowers(); } else { final ClusterState lastPublishedState; @@ -924,7 +926,7 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) + followersChecker.getKnownFollowers(); } - assert becomingMaster + assert becomingClusterManager || activePublication || coordinationState.get() .getLastAcceptedConfiguration() @@ -939,8 +941,8 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; - assert getStateForMasterService().nodes().getMasterNodeId() == null : getStateForMasterService(); - assert leaderChecker.currentNodeIsMaster() == false; + assert getStateForClusterManagerService().nodes().getMasterNodeId() == null : getStateForClusterManagerService(); + assert leaderChecker.currentNodeIsClusterManager() == false; assert lastKnownLeader.equals(Optional.of(leaderChecker.leader())); assert followersChecker.getKnownFollowers().isEmpty(); assert lastKnownLeader.get().equals(applierState.nodes().getMasterNode()) @@ -954,8 +956,8 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert joinAccumulator instanceof JoinHelper.CandidateJoinAccumulator; assert peerFinderLeader.isPresent() == false : peerFinderLeader; assert prevotingRound == null || electionScheduler != null; - assert getStateForMasterService().nodes().getMasterNodeId() == null : getStateForMasterService(); - assert leaderChecker.currentNodeIsMaster() == false; + assert getStateForClusterManagerService().nodes().getMasterNodeId() == null : getStateForClusterManagerService(); + assert leaderChecker.currentNodeIsClusterManager() == false; assert leaderChecker.leader() == null : leaderChecker.leader(); assert followersChecker.getKnownFollowers().isEmpty(); assert applierState.nodes().getMasterNodeId() == null; @@ -967,7 +969,7 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) } public boolean isInitialConfigurationSet() { - return getStateForMasterService().getLastAcceptedConfiguration().isEmpty() == false; + return getStateForClusterManagerService().getLastAcceptedConfiguration().isEmpty() == false; } /** @@ -979,7 +981,7 @@ public boolean isInitialConfigurationSet() { */ public boolean setInitialConfiguration(final VotingConfiguration votingConfiguration) { synchronized (mutex) { - final ClusterState currentState = getStateForMasterService(); + final ClusterState currentState = getStateForClusterManagerService(); if (isInitialConfigurationSet()) { logger.debug("initial configuration already set, ignoring {}", votingConfiguration); @@ -1051,7 +1053,7 @@ ClusterState improveConfiguration(ClusterState clusterState) { // the voting config. We could exclude all the cluster-manager-ineligible nodes here, but there could be quite a few of them and // that makes // the logging much harder to follow. - final Stream masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) + final Stream clusterManagerIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) .filter( n -> n.isMasterNode() == false && (clusterState.getLastAcceptedConfiguration().getNodeIds().contains(n.getId()) @@ -1066,7 +1068,7 @@ ClusterState improveConfiguration(ClusterState clusterState) { .collect(Collectors.toSet()); final VotingConfiguration newConfig = reconfigurator.reconfigure( liveNodes, - Stream.concat(masterIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()), + Stream.concat(clusterManagerIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()), getLocalNode(), clusterState.getLastAcceptedConfiguration() ); @@ -1119,7 +1121,7 @@ private void scheduleReconfigurationIfNeeded() { final ClusterState state = getLastAcceptedState(); if (improveConfiguration(state) != state && reconfigurationTaskScheduled.compareAndSet(false, true)) { logger.trace("scheduling reconfiguration"); - masterService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { + clusterManagerService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { reconfigurationTaskScheduled.set(false); @@ -1148,13 +1150,14 @@ private void handleJoin(Join join) { if (coordinationState.get().electionWon()) { // If we have already won the election then the actual join does not matter for election purposes, so swallow any exception - final boolean isNewJoinFromMasterEligibleNode = handleJoinIgnoringExceptions(join); + final boolean isNewJoinFromClusterManagerEligibleNode = handleJoinIgnoringExceptions(join); - // If we haven't completely finished becoming master then there's already a publication scheduled which will, in turn, + // If we haven't completely finished becoming cluster-manager then there's already a publication scheduled which will, in + // turn, // schedule a reconfiguration if needed. It's benign to schedule a reconfiguration anyway, but it might fail if it wins the // race against the election-winning publication and log a big error message, which we can prevent by checking this here: - final boolean establishedAsMaster = mode == Mode.LEADER && getLastAcceptedState().term() == getCurrentTerm(); - if (isNewJoinFromMasterEligibleNode && establishedAsMaster && publicationInProgress() == false) { + final boolean establishedAsClusterManager = mode == Mode.LEADER && getLastAcceptedState().term() == getCurrentTerm(); + if (isNewJoinFromClusterManagerEligibleNode && establishedAsClusterManager && publicationInProgress() == false) { scheduleReconfigurationIfNeeded(); } } else { @@ -1193,27 +1196,28 @@ private List getDiscoveredNodes() { return nodes; } - ClusterState getStateForMasterService() { + ClusterState getStateForClusterManagerService() { synchronized (mutex) { - // expose last accepted cluster state as base state upon which the master service + // expose last accepted cluster state as base state upon which the cluster_manager service // speculatively calculates the next cluster state update final ClusterState clusterState = coordinationState.get().getLastAcceptedState(); if (mode != Mode.LEADER || clusterState.term() != getCurrentTerm()) { - // the master service checks if the local node is the master node in order to fail execution of the state update early - return clusterStateWithNoMasterBlock(clusterState); + // the cluster-manager service checks if the local node is the cluster-manager node in order to fail execution of the state + // update early + return clusterStateWithNoClusterManagerBlock(clusterState); } return clusterState; } } - private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { + private ClusterState clusterStateWithNoClusterManagerBlock(ClusterState clusterState) { if (clusterState.nodes().getMasterNodeId() != null) { // remove block if it already exists before adding new one assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false : "NO_MASTER_BLOCK should only be added by Coordinator"; final ClusterBlocks clusterBlocks = ClusterBlocks.builder() .blocks(clusterState.blocks()) - .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) + .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) .build(); final DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).masterNodeId(null).build(); return ClusterState.builder(clusterState).blocks(clusterBlocks).nodes(discoveryNodes).build(); @@ -1233,14 +1237,16 @@ public void publish( if (mode != Mode.LEADER || getCurrentTerm() != clusterChangedEvent.state().term()) { logger.debug( () -> new ParameterizedMessage( - "[{}] failed publication as node is no longer master for term {}", + "[{}] failed publication as node is no longer cluster-manager for term {}", clusterChangedEvent.source(), clusterChangedEvent.state().term() ) ); publishListener.onFailure( new FailedToCommitClusterStateException( - "node is no longer master for term " + clusterChangedEvent.state().term() + " while handling publication" + "node is no longer cluster-manager for term " + + clusterChangedEvent.state().term() + + " while handling publication" ) ); return; @@ -1302,12 +1308,12 @@ private boolean assertPreviousStateConsistency(ClusterChangedEvent event) { .equals( XContentHelper.convertToMap( JsonXContent.jsonXContent, - Strings.toString(clusterStateWithNoMasterBlock(coordinationState.get().getLastAcceptedState())), + Strings.toString(clusterStateWithNoClusterManagerBlock(coordinationState.get().getLastAcceptedState())), false ) ) : Strings.toString(event.previousState()) + " vs " - + Strings.toString(clusterStateWithNoMasterBlock(coordinationState.get().getLastAcceptedState())); + + Strings.toString(clusterStateWithNoClusterManagerBlock(coordinationState.get().getLastAcceptedState())); return true; } @@ -1363,10 +1369,10 @@ private class CoordinatorPeerFinder extends PeerFinder { } @Override - protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { + protected void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term) { synchronized (mutex) { - ensureTermAtLeast(masterNode, term); - joinHelper.sendJoinRequest(masterNode, getCurrentTerm(), joinWithDestination(lastJoin, masterNode, term)); + ensureTermAtLeast(clusterManagerNode, term); + joinHelper.sendJoinRequest(clusterManagerNode, getCurrentTerm(), joinWithDestination(lastJoin, clusterManagerNode, term)); } } @@ -1613,12 +1619,12 @@ public void onSuccess(String source) { boolean attemptReconfiguration = true; final ClusterState state = getLastAcceptedState(); // committed state if (localNodeMayWinElection(state) == false) { - final List masterCandidates = completedNodes().stream() + final List clusterManagerCandidates = completedNodes().stream() .filter(DiscoveryNode::isMasterNode) .filter(node -> nodeMayWinElection(state, node)) .filter(node -> { - // check if master candidate would be able to get an election quorum if we were to - // abdicate to it. Assume that every node that completed the publication can provide + // check if cluster_manager candidate would be able to get an election quorum if we were + // to abdicate to it. Assume that every node that completed the publication can provide // a vote in that next election and has the latest state. final long futureElectionTerm = state.term() + 1; final VoteCollection futureVoteCollection = new VoteCollection(); @@ -1638,8 +1644,8 @@ public void onSuccess(String source) { ); }) .collect(Collectors.toList()); - if (masterCandidates.isEmpty() == false) { - abdicateTo(masterCandidates.get(random.nextInt(masterCandidates.size()))); + if (clusterManagerCandidates.isEmpty() == false) { + abdicateTo(clusterManagerCandidates.get(random.nextInt(clusterManagerCandidates.size()))); attemptReconfiguration = false; } } @@ -1665,7 +1671,7 @@ public void onFailure(Exception e) { cancelTimeoutHandlers(); final FailedToCommitClusterStateException exception = new FailedToCommitClusterStateException("publication failed", e); - ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the master. + ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the cluster manager. publishListener.onFailure(exception); } }, OpenSearchExecutors.newDirectExecutorService(), transportService.getThreadPool().getThreadContext()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 5975e5b64214f..693a997d318cd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -138,23 +138,28 @@ public class JoinHelper { @Override public ClusterTasksResult execute(ClusterState currentState, List joiningTasks) throws Exception { - // The current state that MasterService uses might have been updated by a (different) master in a higher term already + // The current state that MasterService uses might have been updated by a (different) cluster-manager in a higher term + // already // Stop processing the current cluster state update, as there's no point in continuing to compute it as // it will later be rejected by Coordinator.publish(...) anyhow if (currentState.term() > term) { - logger.trace("encountered higher term {} than current {}, there is a newer master", currentState.term(), term); + logger.trace("encountered higher term {} than current {}, there is a newer cluster-manager", currentState.term(), term); throw new NotMasterException( - "Higher term encountered (current: " + currentState.term() + " > used: " + term + "), there is a newer master" + "Higher term encountered (current: " + + currentState.term() + + " > used: " + + term + + "), there is a newer cluster-manager" ); } else if (currentState.nodes().getMasterNodeId() == null && joiningTasks.stream().anyMatch(Task::isBecomeMasterTask)) { - assert currentState.term() < term : "there should be at most one become master task per election (= by term)"; + assert currentState.term() < term : "there should be at most one become cluster-manager task per election (= by term)"; final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata()) .term(term) .build(); final Metadata metadata = Metadata.builder(currentState.metadata()).coordinationMetadata(coordinationMetadata).build(); currentState = ClusterState.builder(currentState).metadata(metadata).build(); } else if (currentState.nodes().isLocalNodeElectedMaster()) { - assert currentState.term() == term : "term should be stable for the same master"; + assert currentState.term() == term : "term should be stable for the same cluster-manager"; } return super.execute(currentState, joiningTasks); } @@ -297,7 +302,7 @@ void logLastFailedJoinAttempt() { } public void sendJoinRequest(DiscoveryNode destination, long term, Optional optionalJoin, Runnable onCompletion) { - assert destination.isMasterNode() : "trying to join master-ineligible " + destination; + assert destination.isMasterNode() : "trying to join cluster-manager-ineligible " + destination; final StatusInfo statusInfo = nodeHealthService.getHealth(); if (statusInfo.getStatus() == UNHEALTHY) { logger.debug("dropping join request to [{}]: [{}]", destination, statusInfo.getInfo()); @@ -348,7 +353,7 @@ public String executor() { } public void sendStartJoinRequest(final StartJoinRequest startJoinRequest, final DiscoveryNode destination) { - assert startJoinRequest.getSourceNode().isMasterNode() : "sending start-join request for master-ineligible " + assert startJoinRequest.getSourceNode().isMasterNode() : "sending start-join request for cluster-manager-ineligible " + startJoinRequest.getSourceNode(); transportService.sendRequest(destination, START_JOIN_ACTION_NAME, startJoinRequest, new TransportResponseHandler() { @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java index f18396e78fbf9..84adf834d85e8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java @@ -50,15 +50,15 @@ public class JoinRequest extends TransportRequest { /** * The minimum term for which the joining node will accept any cluster state publications. If the joining node is in a strictly greater - * term than the master it wants to join then the master must enter a new term and hold another election. Doesn't necessarily match + * term than the cluster-manager it wants to join then the cluster-manager must enter a new term and hold another election. Doesn't necessarily match * {@link JoinRequest#optionalJoin} and may be zero in join requests sent prior to {@link LegacyESVersion#V_7_7_0}. */ private final long minimumTerm; /** - * A vote for the receiving node. This vote is optional since the sending node may have voted for a different master in this term. - * That's ok, the sender likely discovered that the master we voted for lost the election and now we're trying to join the winner. Once - * the sender has successfully joined the master, the lack of a vote in its term causes another election (see + * A vote for the receiving node. This vote is optional since the sending node may have voted for a different cluster-manager in this term. + * That's ok, the sender likely discovered that the cluster-manager we voted for lost the election and now we're trying to join the winner. Once + * the sender has successfully joined the cluster-manager, the lack of a vote in its term causes another election (see * {@link Publication#onMissingJoin(DiscoveryNode)}). */ private final Optional optionalJoin; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index ea5c33b4300a5..f0edeeb9319c5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -94,14 +94,19 @@ public String toString() { } public boolean isBecomeMasterTask() { - return reason.equals(BECOME_MASTER_TASK_REASON); + return reason.equals(BECOME_MASTER_TASK_REASON) || reason.equals(BECOME_CLUSTER_MANAGER_TASK_REASON); } public boolean isFinishElectionTask() { return reason.equals(FINISH_ELECTION_TASK_REASON); } + /** + * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #BECOME_CLUSTER_MANAGER_TASK_REASON} + */ + @Deprecated private static final String BECOME_MASTER_TASK_REASON = "_BECOME_MASTER_TASK_"; + private static final String BECOME_CLUSTER_MANAGER_TASK_REASON = "_BECOME_CLUSTER_MANAGER_TASK_"; private static final String FINISH_ELECTION_TASK_REASON = "_FINISH_ELECTION_"; } @@ -129,16 +134,19 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (joiningNodes.size() == 1 && joiningNodes.get(0).isFinishElectionTask()) { return results.successes(joiningNodes).build(currentState); } else if (currentNodes.getMasterNode() == null && joiningNodes.stream().anyMatch(Task::isBecomeMasterTask)) { - assert joiningNodes.stream().anyMatch(Task::isFinishElectionTask) : "becoming a master but election is not finished " + assert joiningNodes.stream().anyMatch(Task::isFinishElectionTask) : "becoming a cluster-manager but election is not finished " + joiningNodes; - // use these joins to try and become the master. + // use these joins to try and become the cluster-manager. // Note that we don't have to do any validation of the amount of joining nodes - the commit // during the cluster state publishing guarantees that we have enough - newState = becomeMasterAndTrimConflictingNodes(currentState, joiningNodes); + newState = becomeClusterManagerAndTrimConflictingNodes(currentState, joiningNodes); nodesChanged = true; } else if (currentNodes.isLocalNodeElectedMaster() == false) { - logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); - throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); + logger.trace( + "processing node joins, but we are not the cluster-manager. current cluster-manager: {}", + currentNodes.getMasterNode() + ); + throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not cluster-manager for join request"); } else { newState = ClusterState.builder(currentState); } @@ -221,12 +229,12 @@ public ClusterTasksResult execute(ClusterState currentState, List jo return results.build(allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).build())); } else { // we must return a new cluster state instance to force publishing. This is important - // for the joining node to finalize its join and set us as a master + // for the joining node to finalize its join and set us as a cluster-manager return results.build(newState.build()); } } - protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { + protected ClusterState.Builder becomeClusterManagerAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { assert currentState.nodes().getMasterNodeId() == null : currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); @@ -256,13 +264,13 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState } } - // now trim any left over dead nodes - either left there when the previous master stepped down + // now trim any left over dead nodes - either left there when the previous cluster-manager stepped down // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .build(); - logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); + logger.trace("becomeClusterManagerAndTrimConflictingNodes: {}", tmpState.nodes()); allocationService.cleanCaches(); tmpState = PersistentTasksCustomMetadata.disassociateDeadNodes(tmpState); return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); @@ -277,7 +285,7 @@ private void refreshDiscoveryNodeVersionAfterUpgrade(DiscoveryNodes currentNodes // updating the version of those node which have connection with the new master. // Note: This should get deprecated with BWC mode logic if (null == transportService) { - // this logic is only applicable when OpenSearch node is master and is noop for zen discovery node + // this logic is only applicable when OpenSearch node is cluster-manager and is noop for zen discovery node return; } if (currentNodes.getMinNodeVersion().before(Version.V_1_0_0)) { @@ -310,7 +318,7 @@ private void refreshDiscoveryNodeVersionAfterUpgrade(DiscoveryNodes currentNodes } } else { // in case existing OpenSearch node is present in the cluster and but there is no connection to that node yet, - // either that node will send new JoinRequest to the master with version >=1.0, then no issue or + // either that node will send new JoinRequest to the cluster-manager/master with version >=1.0, then no issue or // there is an edge case if doesn't send JoinRequest and connection is established, // then it can continue to report version as 7.10.2 instead of actual OpenSearch version. So, // removing the node from cluster state to prevent stale version reporting and let it reconnect. @@ -328,10 +336,22 @@ public boolean runOnlyOnMaster() { return false; } + /** + * a task indicates that the current node should become master + * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #newBecomeClusterManagerTask()} + */ + @Deprecated public static Task newBecomeMasterTask() { return new Task(null, Task.BECOME_MASTER_TASK_REASON); } + /** + * a task indicates that the current node should become cluster-manager + */ + public static Task newBecomeClusterManagerTask() { + return new Task(null, Task.BECOME_CLUSTER_MANAGER_TASK_REASON); + } + /** * a task that is used to signal the election is stopped and we should process pending joins. * it may be used in combination with {@link JoinTaskExecutor#newBecomeMasterTask()} @@ -409,7 +429,7 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version /** * ensures that the joining node's major version is equal or higher to the minClusterNodeVersion. This is needed - * to ensure that if the master is already fully operating under the new major version, it doesn't go back to mixed + * to ensure that if the cluster-manager/master is already fully operating under the new major version, it doesn't go back to mixed * version mode **/ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version minClusterNodeVersion) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java index e599fffa68ff1..70a1c4f3ec220 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java @@ -102,7 +102,7 @@ public void clearTrackedNodes() { public void setAppliedVersion(final DiscoveryNode discoveryNode, final long appliedVersion) { final NodeAppliedStateTracker nodeAppliedStateTracker = appliedStateTrackersByNode.get(discoveryNode); if (nodeAppliedStateTracker == null) { - // Received an ack from a node that a later publication has removed (or we are no longer master). No big deal. + // Received an ack from a node that a later publication has removed (or we are no longer cluster-manager). No big deal. logger.trace("node {} applied version {} but this node's version is not being tracked", discoveryNode, appliedVersion); } else { nodeAppliedStateTracker.increaseAppliedVersion(appliedVersion); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index b4edc9401234d..fcf54aff7f478 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -190,7 +190,7 @@ void setCurrentNodes(DiscoveryNodes discoveryNodes) { } // For assertions - boolean currentNodeIsMaster() { + boolean currentNodeIsClusterManager() { return discoveryNodes.isLocalNodeElectedMaster(); } @@ -208,9 +208,9 @@ private void handleLeaderCheck(LeaderCheckRequest request) { logger.debug(message); throw new NodeHealthCheckFailureException(message); } else if (discoveryNodes.isLocalNodeElectedMaster() == false) { - logger.debug("rejecting leader check on non-master {}", request); + logger.debug("rejecting leader check on non-cluster-manager {}", request); throw new CoordinationStateRejectedException( - "rejecting leader check from [" + request.getSender() + "] sent to a node that is no longer the master" + "rejecting leader check from [" + request.getSender() + "] sent to a node that is no longer the cluster-manager" ); } else if (discoveryNodes.nodeExists(request.getSender()) == false) { logger.debug("rejecting leader check from removed node: {}", request); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java index 8cbb0446a1337..f6420bb32b5f3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java @@ -74,7 +74,7 @@ public class NoMasterBlockService { public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>( "cluster.no_master_block", "write", - NoMasterBlockService::parseNoMasterBlock, + NoMasterBlockService::parseNoClusterManagerBlock, Property.Dynamic, Property.NodeScope, Property.Deprecated @@ -84,19 +84,19 @@ public class NoMasterBlockService { public static final Setting NO_CLUSTER_MANAGER_BLOCK_SETTING = new Setting<>( "cluster.no_cluster_manager_block", NO_MASTER_BLOCK_SETTING, - NoMasterBlockService::parseNoMasterBlock, + NoMasterBlockService::parseNoClusterManagerBlock, Property.Dynamic, Property.NodeScope ); - private volatile ClusterBlock noMasterBlock; + private volatile ClusterBlock noClusterManagerBlock; public NoMasterBlockService(Settings settings, ClusterSettings clusterSettings) { - this.noMasterBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings); + this.noClusterManagerBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(NO_CLUSTER_MANAGER_BLOCK_SETTING, this::setNoMasterBlock); } - private static ClusterBlock parseNoMasterBlock(String value) { + private static ClusterBlock parseNoClusterManagerBlock(String value) { switch (value) { case "all": return NO_MASTER_BLOCK_ALL; @@ -105,15 +105,17 @@ private static ClusterBlock parseNoMasterBlock(String value) { case "metadata_write": return NO_MASTER_BLOCK_METADATA_WRITES; default: - throw new IllegalArgumentException("invalid no-master block [" + value + "], must be one of [all, write, metadata_write]"); + throw new IllegalArgumentException( + "invalid no-cluster-manager block [" + value + "], must be one of [all, write, metadata_write]" + ); } } public ClusterBlock getNoMasterBlock() { - return noMasterBlock; + return noClusterManagerBlock; } - private void setNoMasterBlock(ClusterBlock noMasterBlock) { - this.noMasterBlock = noMasterBlock; + private void setNoMasterBlock(ClusterBlock noClusterManagerBlock) { + this.noClusterManagerBlock = noClusterManagerBlock; } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index 02bdb65c7edf2..e8ab2f8d53d3f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -127,7 +127,7 @@ public void onFailure(final String source, final Exception e) { @Override public void onNoLongerMaster(String source) { - logger.debug("no longer master while processing node removal [{}]", source); + logger.debug("no longer cluster-manager while processing node removal [{}]", source); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index 76be3ebd3a374..e667052ca5fdd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -43,27 +43,27 @@ import java.util.Optional; public class PeersResponse extends TransportResponse { - private final Optional masterNode; + private final Optional clusterManagerNode; private final List knownPeers; private final long term; - public PeersResponse(Optional masterNode, List knownPeers, long term) { - assert masterNode.isPresent() == false || knownPeers.isEmpty(); - this.masterNode = masterNode; + public PeersResponse(Optional clusterManagerNode, List knownPeers, long term) { + assert clusterManagerNode.isPresent() == false || knownPeers.isEmpty(); + this.clusterManagerNode = clusterManagerNode; this.knownPeers = knownPeers; this.term = term; } public PeersResponse(StreamInput in) throws IOException { - masterNode = Optional.ofNullable(in.readOptionalWriteable(DiscoveryNode::new)); + clusterManagerNode = Optional.ofNullable(in.readOptionalWriteable(DiscoveryNode::new)); knownPeers = in.readList(DiscoveryNode::new); term = in.readLong(); - assert masterNode.isPresent() == false || knownPeers.isEmpty(); + assert clusterManagerNode.isPresent() == false || knownPeers.isEmpty(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(masterNode.orElse(null)); + out.writeOptionalWriteable(clusterManagerNode.orElse(null)); out.writeList(knownPeers); out.writeLong(term); } @@ -72,7 +72,7 @@ public void writeTo(StreamOutput out) throws IOException { * @return the node that is currently leading, according to the responding node. */ public Optional getMasterNode() { - return masterNode; + return clusterManagerNode; } /** @@ -93,7 +93,7 @@ public long getTerm() { @Override public String toString() { - return "PeersResponse{" + "masterNode=" + masterNode + ", knownPeers=" + knownPeers + ", term=" + term + '}'; + return "PeersResponse{" + "clusterManagerNode=" + clusterManagerNode + ", knownPeers=" + knownPeers + ", term=" + term + '}'; } @Override @@ -101,11 +101,13 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PeersResponse that = (PeersResponse) o; - return term == that.term && Objects.equals(masterNode, that.masterNode) && Objects.equals(knownPeers, that.knownPeers); + return term == that.term + && Objects.equals(clusterManagerNode, that.clusterManagerNode) + && Objects.equals(knownPeers, that.knownPeers); } @Override public int hashCode() { - return Objects.hash(masterNode, knownPeers, term); + return Objects.hash(clusterManagerNode, knownPeers, term); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index ee97c0e07eb48..9a1a392348660 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -85,7 +85,7 @@ public class PublicationTransportHandler { private final AtomicReference lastSeenClusterState = new AtomicReference<>(); - // the master needs the original non-serialized state as the cluster state contains some volatile information that we + // the cluster-manager needs the original non-serialized state as the cluster state contains some volatile information that we // don't want to be replicated because it's not usable on another node (e.g. UnassignedInfo.unassignedTimeNanos) or // because it's mostly just debugging info that would unnecessarily blow up CS updates (I think there was one in // snapshot code). @@ -337,8 +337,9 @@ public void sendPublishRequest( if (destination.equals(discoveryNodes.getLocalNode())) { // if publishing to self, use original request instead (see currentPublishRequestToSelf for explanation) final PublishRequest previousRequest = currentPublishRequestToSelf.getAndSet(publishRequest); - // we might override an in-flight publication to self in case where we failed as master and became master again, - // and the new publication started before the previous one completed (which fails anyhow because of higher current term) + // we might override an in-flight publication to self in case where we failed as cluster-manager and + // became cluster-manager again, and the new publication started before the previous one completed + // (which fails anyhow because of higher current term) assert previousRequest == null || previousRequest.getAcceptedState().term() < publishRequest.getAcceptedState().term(); responseActionListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java index 77320810eba4c..b4adad898271e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java @@ -50,8 +50,8 @@ public class PublishClusterStateStats implements Writeable, ToXContentObject { private final long compatibleClusterStateDiffReceivedCount; /** - * @param fullClusterStateReceivedCount the number of times this node has received a full copy of the cluster state from the master. - * @param incompatibleClusterStateDiffReceivedCount the number of times this node has received a cluster-state diff from the master. + * @param fullClusterStateReceivedCount the number of times this node has received a full copy of the cluster state from the cluster-manager. + * @param incompatibleClusterStateDiffReceivedCount the number of times this node has received a cluster-state diff from the cluster-manager. * @param compatibleClusterStateDiffReceivedCount the number of times that received cluster-state diffs were compatible with */ public PublishClusterStateStats( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java index 76517573115fd..86ae9ce8bc081 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java @@ -36,7 +36,7 @@ import java.util.Objects; /** - * Request which is used by the master node to publish cluster state changes. + * Request which is used by the cluster-manager node to publish cluster state changes. * Actual serialization of this request is done by {@link PublicationTransportHandler} */ public class PublishRequest { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index b38b0cf0f4693..1c26dff45775f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -53,8 +53,8 @@ public class Reconfigurator { private static final Logger logger = LogManager.getLogger(Reconfigurator.class); /** - * The cluster usually requires a vote from at least half of the master nodes in order to commit a cluster state update, and to achieve - * the best resilience it makes automatic adjustments to the voting configuration as master nodes join or leave the cluster. Adjustments + * The cluster usually requires a vote from at least half of the cluster-manager nodes in order to commit a cluster state update, and to achieve + * the best resilience it makes automatic adjustments to the voting configuration as cluster-manager nodes join or leave the cluster. Adjustments * that fix or increase the size of the voting configuration are always a good idea, but the wisdom of reducing the voting configuration * size is less clear. For instance, automatically reducing the voting configuration down to a single node means the cluster requires * this node to operate, which is not resilient: if it broke we could restore every other cluster-manager-eligible node in the cluster to health @@ -102,24 +102,24 @@ public String toString() { * @param retiredNodeIds Nodes that are leaving the cluster and which should not appear in the configuration if possible. Nodes that are * retired and not in the current configuration will never appear in the resulting configuration; this is useful * for shifting the vote in a 2-node cluster so one of the nodes can be restarted without harming availability. - * @param currentMaster The current master. Unless retired, we prefer to keep the current master in the config. + * @param currentClusterManager The current cluster-manager. Unless retired, we prefer to keep the current cluster-manager in the config. * @param currentConfig The current configuration. As far as possible, we prefer to keep the current config as-is. * @return An optimal configuration, or leave the current configuration unchanged if the optimal configuration has no live quorum. */ public VotingConfiguration reconfigure( Set liveNodes, Set retiredNodeIds, - DiscoveryNode currentMaster, + DiscoveryNode currentClusterManager, VotingConfiguration currentConfig ) { - assert liveNodes.contains(currentMaster) : "liveNodes = " + liveNodes + " master = " + currentMaster; + assert liveNodes.contains(currentClusterManager) : "liveNodes = " + liveNodes + " cluster-manager = " + currentClusterManager; logger.trace( - "{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentMaster={}", + "{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentClusterManager={}", this, currentConfig, liveNodes, retiredNodeIds, - currentMaster + currentClusterManager ); final Set liveNodeIds = liveNodes.stream() @@ -134,7 +134,12 @@ public VotingConfiguration reconfigure( .filter(n -> retiredNodeIds.contains(n.getId()) == false) .forEach( n -> orderedCandidateNodes.add( - new VotingConfigNode(n.getId(), true, n.getId().equals(currentMaster.getId()), currentConfigNodeIds.contains(n.getId())) + new VotingConfigNode( + n.getId(), + true, + n.getId().equals(currentClusterManager.getId()), + currentConfigNodeIds.contains(n.getId()) + ) ) ); currentConfigNodeIds.stream() @@ -166,22 +171,22 @@ public VotingConfiguration reconfigure( static class VotingConfigNode implements Comparable { final String id; final boolean live; - final boolean currentMaster; + final boolean currentClusterManager; final boolean inCurrentConfig; - VotingConfigNode(String id, boolean live, boolean currentMaster, boolean inCurrentConfig) { + VotingConfigNode(String id, boolean live, boolean currentClusterManager, boolean inCurrentConfig) { this.id = id; this.live = live; - this.currentMaster = currentMaster; + this.currentClusterManager = currentClusterManager; this.inCurrentConfig = inCurrentConfig; } @Override public int compareTo(VotingConfigNode other) { - // prefer current master - final int currentMasterComp = Boolean.compare(other.currentMaster, currentMaster); - if (currentMasterComp != 0) { - return currentMasterComp; + // prefer current cluster-manager + final int currentClusterManagerComp = Boolean.compare(other.currentClusterManager, currentClusterManager); + if (currentClusterManagerComp != 0) { + return currentClusterManagerComp; } // prefer nodes that are live final int liveComp = Boolean.compare(other.live, live); @@ -205,8 +210,8 @@ public String toString() { + '\'' + ", live=" + live - + ", currentMaster=" - + currentMaster + + ", currentClusterManager=" + + currentClusterManager + ", inCurrentConfig=" + inCurrentConfig + '}'; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index c6c7e75497e29..6b31c39d71eb3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -68,12 +68,12 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { + "\n" + "Do you want to proceed?\n"; - static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node"; + static final String NOT_CLUSTER_MANAGER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node"; static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; - static final String MASTER_NODE_BOOTSTRAPPED_MSG = "Master node was successfully bootstrapped"; + static final String CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG = "Cluster-manager node was successfully bootstrapped"; static final Setting UNSAFE_BOOTSTRAP = ClusterService.USER_DEFINED_METADATA.getConcreteSetting( "cluster.metadata.unsafe-bootstrap" ); @@ -92,10 +92,10 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { @Override protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); - terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.master setting"); - Boolean master = DiscoveryNode.isMasterNode(settings); - if (master == false) { - throw new OpenSearchException(NOT_MASTER_NODE_MSG); + terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.roles setting"); + Boolean clusterManager = DiscoveryNode.isMasterNode(settings); + if (clusterManager == false) { + throw new OpenSearchException(NOT_CLUSTER_MANAGER_NODE_MSG); } return true; @@ -171,6 +171,6 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc writer.writeFullStateAndCommit(state.v1(), newClusterState); } - terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); + terminal.println(CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 6510c57060fe0..9139cbac2b0be 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -70,6 +70,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -260,6 +261,18 @@ public Iterator> settings() { Property.IndexScope ); + /** + * Used to specify the replication type for the index. By default, document replication is used. + */ + public static final String SETTING_REPLICATION_TYPE = "index.replication.type"; + public static final Setting INDEX_REPLICATION_TYPE_SETTING = new Setting<>( + SETTING_REPLICATION_TYPE, + ReplicationType.DOCUMENT.toString(), + ReplicationType::parseString, + Property.IndexScope, + Property.Final + ); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java index f6f42e0d81063..eda4833a36c96 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java @@ -93,7 +93,7 @@ public MetadataIndexUpgradeService( } /** - * Checks that the index can be upgraded to the current version of the master node. + * Checks that the index can be upgraded to the current version of the cluster-manager node. * *

    * If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index diff --git a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java index 51a2557ef80bb..f07b74575950c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java @@ -56,7 +56,7 @@ public class SystemIndexMetadataUpgradeService implements ClusterStateListener { private final SystemIndices systemIndices; private final ClusterService clusterService; - private boolean master = false; + private boolean clusterManager = false; private volatile ImmutableOpenMap lastIndexMetadataMap = ImmutableOpenMap.of(); private volatile boolean updateTaskPending = false; @@ -68,11 +68,11 @@ public SystemIndexMetadataUpgradeService(SystemIndices systemIndices, ClusterSer @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster() != master) { - this.master = event.localNodeMaster(); + if (event.localNodeMaster() != clusterManager) { + this.clusterManager = event.localNodeMaster(); } - if (master && updateTaskPending == false) { + if (clusterManager && updateTaskPending == false) { final ImmutableOpenMap indexMetadataMap = event.state().metadata().indices(); if (lastIndexMetadataMap != indexMetadataMap) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 6bd943c5e1d0d..740b7a80dd1d9 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -73,7 +73,7 @@ public static boolean nodeRequiresLocalStorage(Settings settings) { boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); if (localStorageEnable == false && (isDataNode(settings) || isMasterNode(settings))) { // TODO: make this a proper setting validation logic, requiring multi-settings validation - throw new IllegalArgumentException("storage can not be disabled for master and data nodes"); + throw new IllegalArgumentException("storage can not be disabled for cluster-manager and data nodes"); } return localStorageEnable; } @@ -453,7 +453,7 @@ public boolean isDataNode() { } /** - * Can this node become master or not. + * Can this node become cluster-manager or not. */ public boolean isMasterNode() { return roles.contains(DiscoveryNodeRole.MASTER_ROLE) || roles.contains(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 8d84869bc8bec..9d79157ad5b22 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -70,10 +70,10 @@ public class DiscoveryNodes extends AbstractDiffable implements private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; - private final ImmutableOpenMap masterNodes; + private final ImmutableOpenMap clusterManagerNodes; private final ImmutableOpenMap ingestNodes; - private final String masterNodeId; + private final String clusterManagerNodeId; private final String localNodeId; private final Version minNonClientNodeVersion; private final Version maxNonClientNodeVersion; @@ -83,9 +83,9 @@ public class DiscoveryNodes extends AbstractDiffable implements private DiscoveryNodes( ImmutableOpenMap nodes, ImmutableOpenMap dataNodes, - ImmutableOpenMap masterNodes, + ImmutableOpenMap clusterManagerNodes, ImmutableOpenMap ingestNodes, - String masterNodeId, + String clusterManagerNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNonClientNodeVersion, @@ -94,9 +94,9 @@ private DiscoveryNodes( ) { this.nodes = nodes; this.dataNodes = dataNodes; - this.masterNodes = masterNodes; + this.clusterManagerNodes = clusterManagerNodes; this.ingestNodes = ingestNodes; - this.masterNodeId = masterNodeId; + this.clusterManagerNodeId = clusterManagerNodeId; this.localNodeId = localNodeId; this.minNonClientNodeVersion = minNonClientNodeVersion; this.maxNonClientNodeVersion = maxNonClientNodeVersion; @@ -110,14 +110,14 @@ public Iterator iterator() { } /** - * Returns {@code true} if the local node is the elected master node. + * Returns {@code true} if the local node is the elected cluster-manager node. */ public boolean isLocalNodeElectedMaster() { if (localNodeId == null) { // we don't know yet the local node id, return false return false; } - return localNodeId.equals(masterNodeId); + return localNodeId.equals(clusterManagerNodeId); } /** @@ -148,12 +148,12 @@ public ImmutableOpenMap getDataNodes() { } /** - * Get a {@link Map} of the discovered master nodes arranged by their ids + * Get a {@link Map} of the discovered cluster-manager nodes arranged by their ids * - * @return {@link Map} of the discovered master nodes arranged by their ids + * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids */ public ImmutableOpenMap getMasterNodes() { - return this.masterNodes; + return this.clusterManagerNodes; } /** @@ -164,35 +164,35 @@ public ImmutableOpenMap getIngestNodes() { } /** - * Get a {@link Map} of the discovered master and data nodes arranged by their ids + * Get a {@link Map} of the discovered cluster-manager and data nodes arranged by their ids * - * @return {@link Map} of the discovered master and data nodes arranged by their ids + * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids */ public ImmutableOpenMap getMasterAndDataNodes() { ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(dataNodes); - nodes.putAll(masterNodes); + nodes.putAll(clusterManagerNodes); return nodes.build(); } /** - * Get a {@link Map} of the coordinating only nodes (nodes which are neither master, nor data, nor ingest nodes) arranged by their ids + * Get a {@link Map} of the coordinating only nodes (nodes which are neither cluster-manager, nor data, nor ingest nodes) arranged by their ids * * @return {@link Map} of the coordinating only nodes arranged by their ids */ public ImmutableOpenMap getCoordinatingOnlyNodes() { ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(this.nodes); - nodes.removeAll(masterNodes.keys()); + nodes.removeAll(clusterManagerNodes.keys()); nodes.removeAll(dataNodes.keys()); nodes.removeAll(ingestNodes.keys()); return nodes.build(); } /** - * Returns a stream of all nodes, with master nodes at the front + * Returns a stream of all nodes, with cluster-manager nodes at the front */ public Stream mastersFirstStream() { return Stream.concat( - StreamSupport.stream(masterNodes.spliterator(), false).map(cur -> cur.value), + StreamSupport.stream(clusterManagerNodes.spliterator(), false).map(cur -> cur.value), StreamSupport.stream(this.spliterator(), false).filter(n -> n.isMasterNode() == false) ); } @@ -230,7 +230,7 @@ public boolean nodeExists(DiscoveryNode node) { /** * Determine if the given node exists and has the right roles. Supported roles vary by version, and our local cluster state might - * have come via an older master, so the roles may differ even if the node is otherwise identical. + * have come via an older cluster-manager, so the roles may differ even if the node is otherwise identical. */ public boolean nodeExistsWithSameRoles(DiscoveryNode discoveryNode) { final DiscoveryNode existing = nodes.get(discoveryNode.getId()); @@ -239,7 +239,7 @@ public boolean nodeExistsWithSameRoles(DiscoveryNode discoveryNode) { /** * Determine if the given node exists and has the right version. During upgrade from Elasticsearch version as OpenSearch node run in - * BWC mode and can have the version as 7.10.2 in cluster state from older master to OpenSearch master. + * BWC mode and can have the version as 7.10.2 in cluster state from older cluster-manager to OpenSearch cluster-manager. */ public boolean nodeExistsWithBWCVersion(DiscoveryNode discoveryNode) { final DiscoveryNode existing = nodes.get(discoveryNode.getId()); @@ -250,12 +250,12 @@ public boolean nodeExistsWithBWCVersion(DiscoveryNode discoveryNode) { } /** - * Get the id of the master node + * Get the id of the cluster-manager node * - * @return id of the master + * @return id of the cluster-manager */ public String getMasterNodeId() { - return this.masterNodeId; + return this.clusterManagerNodeId; } /** @@ -277,12 +277,12 @@ public DiscoveryNode getLocalNode() { } /** - * Returns the master node, or {@code null} if there is no master node + * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node */ @Nullable public DiscoveryNode getMasterNode() { - if (masterNodeId != null) { - return nodes.get(masterNodeId); + if (clusterManagerNodeId != null) { + return nodes.get(clusterManagerNodeId); } return null; } @@ -394,9 +394,9 @@ public String[] resolveNodes(String... nodes) { resolvedNodesIds.add(localNodeId); } } else if (nodeId.equals("_master") || nodeId.equals("_cluster_manager")) { - String masterNodeId = getMasterNodeId(); - if (masterNodeId != null) { - resolvedNodesIds.add(masterNodeId); + String clusterManagerNodeId = getMasterNodeId(); + if (clusterManagerNodeId != null) { + resolvedNodesIds.add(clusterManagerNodeId); } } else if (nodeExists(nodeId)) { resolvedNodesIds.add(nodeId); @@ -421,9 +421,9 @@ public String[] resolveNodes(String... nodes) { } } else if (roleNameIsClusterManager(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { - resolvedNodesIds.addAll(masterNodes.keys()); + resolvedNodesIds.addAll(clusterManagerNodes.keys()); } else { - resolvedNodesIds.removeAll(masterNodes.keys()); + resolvedNodesIds.removeAll(clusterManagerNodes.keys()); } } else if (DiscoveryNodeRole.INGEST_ROLE.roleName().equals(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { @@ -506,7 +506,7 @@ public String toString() { sb.append(", local"); } if (node == getMasterNode()) { - sb.append(", master"); + sb.append(", cluster-manager"); } sb.append("\n"); } @@ -517,21 +517,21 @@ public static class Delta { private final String localNodeId; @Nullable - private final DiscoveryNode previousMasterNode; + private final DiscoveryNode previousClusterManagerNode; @Nullable - private final DiscoveryNode newMasterNode; + private final DiscoveryNode newClusterManagerNode; private final List removed; private final List added; private Delta( - @Nullable DiscoveryNode previousMasterNode, - @Nullable DiscoveryNode newMasterNode, + @Nullable DiscoveryNode previousClusterManagerNode, + @Nullable DiscoveryNode newClusterManagerNode, String localNodeId, List removed, List added ) { - this.previousMasterNode = previousMasterNode; - this.newMasterNode = newMasterNode; + this.previousClusterManagerNode = previousClusterManagerNode; + this.newClusterManagerNode = newClusterManagerNode; this.localNodeId = localNodeId; this.removed = removed; this.added = added; @@ -542,17 +542,17 @@ public boolean hasChanges() { } public boolean masterNodeChanged() { - return Objects.equals(newMasterNode, previousMasterNode) == false; + return Objects.equals(newClusterManagerNode, previousClusterManagerNode) == false; } @Nullable - public DiscoveryNode previousMasterNode() { - return previousMasterNode; + public DiscoveryNode previousClusterManagerNode() { + return previousClusterManagerNode; } @Nullable public DiscoveryNode newMasterNode() { - return newMasterNode; + return newClusterManagerNode; } public boolean removed() { @@ -575,8 +575,8 @@ public String shortSummary() { final StringBuilder summary = new StringBuilder(); if (masterNodeChanged()) { summary.append("cluster-manager node changed {previous ["); - if (previousMasterNode() != null) { - summary.append(previousMasterNode()); + if (previousClusterManagerNode() != null) { + summary.append(previousClusterManagerNode()); } summary.append("], current ["); if (newMasterNode() != null) { @@ -609,11 +609,11 @@ public String shortSummary() { @Override public void writeTo(StreamOutput out) throws IOException { - if (masterNodeId == null) { + if (clusterManagerNodeId == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeString(masterNodeId); + out.writeString(clusterManagerNodeId); } out.writeVInt(nodes.size()); for (DiscoveryNode node : this) { @@ -659,7 +659,7 @@ public static Builder builder(DiscoveryNodes nodes) { public static class Builder { private final ImmutableOpenMap.Builder nodes; - private String masterNodeId; + private String clusterManagerNodeId; private String localNodeId; public Builder() { @@ -667,7 +667,7 @@ public Builder() { } public Builder(DiscoveryNodes nodes) { - this.masterNodeId = nodes.getMasterNodeId(); + this.clusterManagerNodeId = nodes.getMasterNodeId(); this.localNodeId = nodes.getLocalNodeId(); this.nodes = ImmutableOpenMap.builder(nodes.getNodes()); } @@ -712,8 +712,8 @@ public Builder remove(DiscoveryNode node) { return this; } - public Builder masterNodeId(String masterNodeId) { - this.masterNodeId = masterNodeId; + public Builder masterNodeId(String clusterManagerNodeId) { + this.clusterManagerNodeId = clusterManagerNodeId; return this; } @@ -784,7 +784,7 @@ public DiscoveryNodes build() { dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(), - masterNodeId, + clusterManagerNodeId, localNodeId, minNonClientNodeVersion == null ? Version.CURRENT : minNonClientNodeVersion, maxNonClientNodeVersion == null ? Version.CURRENT : maxNonClientNodeVersion, @@ -794,7 +794,7 @@ public DiscoveryNodes build() { } public boolean isLocalNodeElectedMaster() { - return masterNodeId != null && masterNodeId.equals(localNodeId); + return clusterManagerNodeId != null && clusterManagerNodeId.equals(localNodeId); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java index 05c11e112364a..9139c72577c9f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java @@ -50,7 +50,7 @@ /** * A {@link BatchedRerouteService} is a {@link RerouteService} that batches together reroute requests to avoid unnecessary extra reroutes. - * This component only does meaningful work on the elected master node. Reroute requests will fail with a {@link NotMasterException} on + * This component only does meaningful work on the elected cluster-manager node. Reroute requests will fail with a {@link NotMasterException} on * other nodes. */ public class BatchedRerouteService implements RerouteService { @@ -146,7 +146,7 @@ public void onNoLongerMaster(String source) { } } ActionListener.onFailure(currentListeners, new NotMasterException("delayed reroute [" + reason + "] cancelled")); - // no big deal, the new master will reroute again + // no big deal, the new cluster-manager will reroute again } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 543a6cba2e91b..1b0639bc98306 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -268,7 +268,7 @@ public Snapshot snapshot() { /** * Gets the {@link IndexId} of the recovery source. May contain {@link IndexMetadata#INDEX_UUID_NA_VALUE} as the index uuid if it - * was created by an older version master in a mixed version cluster. + * was created by an older version cluster-manager in a mixed version cluster. * * @return IndexId */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 09f1708b01307..8f82d6dcee318 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -301,7 +301,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.reason = Reason.values()[(int) in.readByte()]; this.unassignedTimeMillis = in.readLong(); // As System.nanoTime() cannot be compared across different JVMs, reset it to now. - // This means that in master fail-over situations, elapsed delay time is forgotten. + // This means that in cluster-manager fail-over situations, elapsed delay time is forgotten. this.unassignedTimeNanos = System.nanoTime(); this.delayed = in.readBoolean(); this.message = in.readOptionalString(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java index ca0744f099f84..9286ca3dd533d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java @@ -84,7 +84,7 @@ void allocateUnassigned( AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation); /** - * Called when this node becomes the elected master and when it stops being the elected master, so that implementations can clean up any + * Called when this node becomes the elected cluster-manager and when it stops being the elected cluster-manager, so that implementations can clean up any * in-flight activity from an earlier mastership. */ void cleanCaches(); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 20f8c0a00e32b..221dd3ee55b21 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -270,7 +270,7 @@ public void removeTimeoutListener(TimeoutClusterStateListener listener) { } /** - * Add a listener for on/off local node master events + * Add a listener for on/off local node cluster-manager events */ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { addListener(listener); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 46d65f310a427..27b8e6d29ee49 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -207,7 +207,7 @@ public void removeListener(ClusterStateListener listener) { } /** - * Add a listener for on/off local node master events + * Add a listener for on/off local node cluster-manager events */ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { clusterApplierService.addLocalNodeMasterListener(listener); diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index ad0bc599420f1..1aa2ea921e4b0 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -228,14 +228,14 @@ private static boolean isMasterUpdateThread() { } public static boolean assertMasterUpdateThread() { - assert isMasterUpdateThread() : "not called from the master service thread"; + assert isMasterUpdateThread() : "not called from the cluster-manager service thread"; return true; } public static boolean assertNotMasterUpdateThread(String reason) { assert isMasterUpdateThread() == false : "Expected current thread [" + Thread.currentThread() - + "] to not be the master service thread. Reason: [" + + "] to not be the cluster-maanger service thread. Reason: [" + reason + "]"; return true; @@ -244,16 +244,16 @@ assert isMasterUpdateThread() == false : "Expected current thread [" private void runTasks(TaskInputs taskInputs) { final String summary = taskInputs.summary; if (!lifecycle.started()) { - logger.debug("processing [{}]: ignoring, master service not started", summary); + logger.debug("processing [{}]: ignoring, cluster-manager service not started", summary); return; } logger.debug("executing cluster state update for [{}]", summary); final ClusterState previousClusterState = state(); - if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyWhenMaster()) { - logger.debug("failing [{}]: local node is no longer master", summary); - taskInputs.onNoLongerMaster(); + if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyWhenClusterManager()) { + logger.debug("failing [{}]: local node is no longer cluster-manager", summary); + taskInputs.onNoLongerClusterManager(); return; } @@ -402,7 +402,7 @@ private ClusterState patchVersions(ClusterState previousClusterState, ClusterTas ClusterState newClusterState = executionResult.resultingState; if (previousClusterState != newClusterState) { - // only the master controls the version numbers + // only the cluster-manager controls the version numbers Builder builder = incrementVersion(newClusterState); if (previousClusterState.routingTable() != newClusterState.routingTable()) { builder.routingTable( @@ -616,7 +616,10 @@ public void onNoLongerMaster(String source) { listener.onNoLongerMaster(source); } catch (Exception e) { logger.error( - () -> new ParameterizedMessage("exception thrown by listener while notifying no longer master from [{}]", source), + () -> new ParameterizedMessage( + "exception thrown by listener while notifying no longer cluster-manager from [{}]", + source + ), e ); } @@ -722,7 +725,7 @@ private static class AckCountDownListener implements Discovery.AckListener { private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; - private final DiscoveryNode masterNode; + private final DiscoveryNode clusterManagerNode; private final ThreadPool threadPool; private final long clusterStateVersion; private volatile Scheduler.Cancellable ackTimeoutCallback; @@ -737,11 +740,11 @@ private static class AckCountDownListener implements Discovery.AckListener { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; this.threadPool = threadPool; - this.masterNode = nodes.getMasterNode(); + this.clusterManagerNode = nodes.getMasterNode(); int countDown = 0; for (DiscoveryNode node : nodes) { - // we always wait for at least the master node - if (node.equals(masterNode) || ackedTaskListener.mustAck(node)) { + // we always wait for at least the cluster-manager node + if (node.equals(clusterManagerNode) || ackedTaskListener.mustAck(node)) { countDown++; } } @@ -771,7 +774,7 @@ public void onCommit(TimeValue commitTime) { @Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - if (node.equals(masterNode) == false && ackedTaskListener.mustAck(node) == false) { + if (node.equals(clusterManagerNode) == false && ackedTaskListener.mustAck(node) == false) { return; } if (e == null) { @@ -879,11 +882,11 @@ private class TaskInputs { this.updateTasks = updateTasks; } - boolean runOnlyWhenMaster() { + boolean runOnlyWhenClusterManager() { return executor.runOnlyOnMaster(); } - void onNoLongerMaster() { + void onNoLongerClusterManager() { updateTasks.forEach(task -> task.listener.onNoLongerMaster(task.source())); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c67d299be3435..94f30016a7b8b 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -40,7 +40,6 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -571,8 +570,7 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.THROUGHPUT_DEGRADATION_LIMITS, ShardIndexingPressureMemoryManager.SUCCESSFUL_REQUEST_ELAPSED_TIMEOUT, ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, - IndexingPressure.MAX_INDEXING_BYTES, - TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED + IndexingPressure.MAX_INDEXING_BYTES ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index fbb37651198d4..8f3cb9e90ee56 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -87,7 +87,7 @@ public ConsistentSettingsService(Settings settings, ClusterService clusterServic /** * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are - * published by the master node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. + * published by the cluster-manager node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. */ public LocalNodeMasterListener newHashPublisher() { // eagerly compute hashes to be published @@ -116,7 +116,7 @@ public boolean areAllConsistent() { concreteSecureSetting.getKey() ); } else if (publishedSaltAndHash == null && localHash != null) { - // setting missing on master but present locally + // setting missing on cluster-manager but present locally logger.warn( "no published hash for the consistent secure setting [{}] but it exists on the local node", concreteSecureSetting.getKey() @@ -256,7 +256,7 @@ static final class HashesPublisher implements LocalNodeMasterListener { } @Override - public void onMaster() { + public void onClusterManager() { clusterService.submitStateUpdateTask("publish-secure-settings-hashes", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { @@ -282,7 +282,7 @@ public void onFailure(String source, Exception e) { } @Override - public void offMaster() { + public void offClusterManager() { logger.trace("I am no longer master, nothing to do"); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 528d6cc9f5e23..68e1b5b598d40 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; @@ -207,6 +208,16 @@ public final class IndexScopedSettings extends AbstractScopedSettings { ) ); + /** + * Map of feature flag name to feature-flagged index setting. Once each feature + * is ready for production release, the feature flag can be removed, and the + * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. + */ + public static final Map FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( + FeatureFlags.REPLICATION_TYPE, + IndexMetadata.INDEX_REPLICATION_TYPE_SETTING + ); + public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index 79ee0bf9f975a..0874814f940d4 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -37,6 +37,7 @@ import org.opensearch.common.Strings; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Module; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; @@ -85,6 +86,12 @@ public SettingsModule( registerSetting(setting); } + for (Map.Entry featureFlaggedSetting : IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS.entrySet()) { + if (FeatureFlags.isEnabled(featureFlaggedSetting.getKey())) { + registerSetting(featureFlaggedSetting.getValue()); + } + } + for (Setting setting : additionalSettings) { registerSetting(setting); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java new file mode 100644 index 0000000000000..34c613f5423d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +/** + * Utility class to manage feature flags. Feature flags are system properties that must be set on the JVM. + * These are used to gate the visibility/availability of incomplete features. Fore more information, see + * https://featureflags.io/feature-flag-introduction/ + */ +public class FeatureFlags { + + /** + * Gates the visibility of the index setting that allows changing of replication type. + * Once the feature is ready for production release, this feature flag can be removed. + */ + public static final String REPLICATION_TYPE = "opensearch.experimental.feature.replication_type.enabled"; + + /** + * Used to test feature flags whose values are expected to be booleans. + * This method returns true if the value is "true" (case-insensitive), + * and false otherwise. + */ + public static boolean isEnabled(String featureFlagName) { + return "true".equalsIgnoreCase(System.getProperty(featureFlagName)); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index 9e28bb2b795c3..5a967528a6ae2 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -40,8 +40,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.node.Node; -import org.opensearch.threadpool.RunnableTaskExecutionListener; -import org.opensearch.threadpool.TaskAwareRunnable; import java.util.List; import java.util.Optional; @@ -57,7 +55,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; public class OpenSearchExecutors { @@ -175,39 +172,14 @@ public static OpenSearchThreadPoolExecutor newFixed( ); } - public static OpenSearchThreadPoolExecutor newAutoQueueFixed( - String name, - int size, - int initialQueueCapacity, - int minQueueSize, - int maxQueueSize, - int frameSize, - TimeValue targetedResponseTime, - ThreadFactory threadFactory, - ThreadContext contextHolder - ) { - return newAutoQueueFixed( - name, - size, - initialQueueCapacity, - minQueueSize, - maxQueueSize, - frameSize, - targetedResponseTime, - threadFactory, - contextHolder, - null - ); - } - /** * Return a new executor that will automatically adjust the queue size based on queue throughput. * - * @param size number of fixed threads to use for executing tasks + * @param size number of fixed threads to use for executing tasks * @param initialQueueCapacity initial size of the executor queue - * @param minQueueSize minimum queue size that the queue can be adjusted to - * @param maxQueueSize maximum queue size that the queue can be adjusted to - * @param frameSize number of tasks during which stats are collected before adjusting queue size + * @param minQueueSize minimum queue size that the queue can be adjusted to + * @param maxQueueSize maximum queue size that the queue can be adjusted to + * @param frameSize number of tasks during which stats are collected before adjusting queue size */ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( String name, @@ -218,8 +190,7 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( int frameSize, TimeValue targetedResponseTime, ThreadFactory threadFactory, - ThreadContext contextHolder, - AtomicReference runnableTaskListener + ThreadContext contextHolder ) { if (initialQueueCapacity <= 0) { throw new IllegalArgumentException( @@ -230,17 +201,6 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( ConcurrentCollections.newBlockingQueue(), initialQueueCapacity ); - - Function runnableWrapper; - if (runnableTaskListener != null) { - runnableWrapper = (runnable) -> { - TaskAwareRunnable taskAwareRunnable = new TaskAwareRunnable(contextHolder, runnable, runnableTaskListener); - return new TimedRunnable(taskAwareRunnable); - }; - } else { - runnableWrapper = TimedRunnable::new; - } - return new QueueResizingOpenSearchThreadPoolExecutor( name, size, @@ -250,7 +210,7 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( queue, minQueueSize, maxQueueSize, - runnableWrapper, + TimedRunnable::new, frameSize, targetedResponseTime, threadFactory, diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 35d7d925ce106..d844a8f158ea4 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -66,7 +66,6 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -135,23 +134,16 @@ public StoredContext stashContext() { * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. * Otherwise when context is stash, it should be empty. */ - - ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT; - if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { - threadContextStruct = threadContextStruct.putHeaders( + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders( MapBuilder.newMapBuilder() .put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)) .immutableMap() ); + threadLocal.set(threadContextStruct); + } else { + threadLocal.set(DEFAULT_CONTEXT); } - - if (context.transientHeaders.containsKey(TASK_ID)) { - threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); - } - - threadLocal.set(threadContextStruct); - return () -> { // If the node and thus the threadLocal get closed while this task // is still executing, we don't want this runnable to fail with an diff --git a/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java b/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java index 161450308b384..e281bbbfacba1 100644 --- a/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java @@ -53,11 +53,11 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP * Creates a new AckClusterStatePublishResponseHandler * @param publishingToNodes the set of nodes to which the cluster state will be published and should respond * @param ackListener the {@link org.opensearch.discovery.Discovery.AckListener} to notify for each response - * gotten from non master nodes + * gotten from non cluster-manager nodes */ public AckClusterStatePublishResponseHandler(Set publishingToNodes, Discovery.AckListener ackListener) { - // Don't count the master as acknowledged, because it's not done yet - // otherwise we might end up with all the nodes but the master holding the latest cluster state + // Don't count the cluster-manager as acknowledged, because it's not done yet + // otherwise we might end up with all the nodes but the cluster-manager holding the latest cluster state super(publishingToNodes); this.ackListener = ackListener; } diff --git a/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java b/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java index c0cd390b66f78..6ee01d055be04 100644 --- a/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java @@ -41,8 +41,8 @@ import java.util.concurrent.TimeUnit; /** - * Handles responses obtained when publishing a new cluster state from master to all non master nodes. - * Allows to await a reply from all non master nodes, up to a timeout + * Handles responses obtained when publishing a new cluster state from cluster-manager to all non cluster-manager nodes. + * Allows to await a reply from all non cluster-manager nodes, up to a timeout */ public class BlockingClusterStatePublishResponseHandler { @@ -62,7 +62,7 @@ public BlockingClusterStatePublishResponseHandler(Set publishingT } /** - * Called for each response obtained from non master nodes + * Called for each response obtained from non cluster-manager nodes * * @param node the node that replied to the publish event */ @@ -73,7 +73,7 @@ public void onResponse(DiscoveryNode node) { } /** - * Called for each failure obtained from non master nodes + * Called for each failure obtained from non cluster-manager nodes * @param node the node that replied to the publish event */ public void onFailure(DiscoveryNode node, Exception e) { @@ -85,7 +85,7 @@ public void onFailure(DiscoveryNode node, Exception e) { } /** - * Allows to wait for all non master nodes to reply to the publish event up to a timeout + * Allows to wait for all non cluster-manager nodes to reply to the publish event up to a timeout * @param timeout the timeout * @return true if the timeout expired or not, false otherwise */ diff --git a/server/src/main/java/org/opensearch/discovery/Discovery.java b/server/src/main/java/org/opensearch/discovery/Discovery.java index ac5028f6dfc51..25d136d8a2563 100644 --- a/server/src/main/java/org/opensearch/discovery/Discovery.java +++ b/server/src/main/java/org/opensearch/discovery/Discovery.java @@ -37,7 +37,7 @@ /** * A pluggable module allowing to implement discovery of other nodes, publishing of the cluster - * state to all nodes, electing a master of the cluster that raises cluster state change + * state to all nodes, electing a cluster-manager of the cluster that raises cluster state change * events. */ public interface Discovery extends LifecycleComponent, ClusterStatePublisher { diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 427615da7e4d0..af3d07a1b12d5 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -119,7 +119,7 @@ public DiscoveryModule( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, - MasterService masterService, + MasterService clusterManagerService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, @@ -195,7 +195,7 @@ public DiscoveryModule( transportService, namedWriteableRegistry, allocationService, - masterService, + clusterManagerService, gatewayMetaState::getPersistedState, seedHostsProvider, clusterApplier, diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index fe669e7b6d073..c174016925696 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -208,7 +208,7 @@ private DiscoveryNode getLocalNode() { * Invoked on receipt of a PeersResponse from a node that believes it's an active leader, which this node should therefore try and join. * Note that invocations of this method are not synchronised. By the time it is called we may have been deactivated. */ - protected abstract void onActiveMasterFound(DiscoveryNode masterNode, long term); + protected abstract void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term); /** * Invoked when the set of found peers changes. Note that invocations of this method are not fully synchronised, so we only guarantee @@ -449,7 +449,7 @@ public void handleResponse(PeersResponse response) { if (response.getMasterNode().equals(Optional.of(discoveryNode))) { // Must not hold lock here to avoid deadlock assert holdsLock() == false : "PeerFinder mutex is held in error"; - onActiveMasterFound(discoveryNode, response.getTerm()); + onActiveClusterManagerFound(discoveryNode, response.getTerm()); } } diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index cb431a6a5d0de..dd4819f5804ac 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -92,13 +92,13 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc assert DiscoveryNode.isDataNode(env.settings()) == false; if (DiscoveryNode.isMasterNode(env.settings()) == false) { - processNoMasterNoDataNode(terminal, dataPaths, env); + processNoClusterManagerNoDataNode(terminal, dataPaths, env); } else { - processMasterNoDataNode(terminal, dataPaths, env); + processClusterManagerNoDataNode(terminal, dataPaths, env); } } - private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + private void processNoClusterManagerNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); @@ -126,7 +126,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi outputVerboseInformation(terminal, indexPaths, indexUUIDs, metadata); - terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size())); + terminal.println(noClusterManagerMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size())); outputHowToSeeVerboseInformation(terminal); terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); @@ -140,7 +140,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); } - private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + private void processClusterManagerNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); @@ -205,7 +205,7 @@ private Set indexUUIDsFor(Set indexPaths) { return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet()); } - static String noMasterMessage(int indexes, int shards, int indexMetadata) { + static String noClusterManagerMessage(int indexes, int shards, int indexMetadata) { return "Found " + indexes + " indices (" + shards + " shards and " + indexMetadata + " index meta data) to clean up"; } diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index 3baa5bfb9e410..25a1096919939 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -261,7 +261,7 @@ private IndexMetadata stripAliases(IndexMetadata indexMetadata) { } /** - * Allocates the detected list of dangling indices by sending them to the master node + * Allocates the detected list of dangling indices by sending them to the cluster-manager node * for allocation, provided auto-import is enabled via the * {@link #AUTO_IMPORT_DANGLING_INDICES_SETTING} setting. * @param metadata the current cluster metadata, used to filter out dangling indices that cannot be allocated diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 3081c4da8f7a7..0ca70f37afa83 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -90,12 +90,12 @@ * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that * the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link * ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and - * non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster. + * non-stale state, and cluster-manager-ineligible nodes receive the real cluster state from the elected cluster-manager after joining the cluster. */ public class GatewayMetaState implements Closeable { /** - * Fake node ID for a voting configuration written by a master-ineligible data node to indicate that its on-disk state is potentially + * Fake node ID for a voting configuration written by a cluster-manager-ineligible data node to indicate that its on-disk state is potentially * stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is * restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. */ @@ -502,8 +502,8 @@ static class LucenePersistedState implements PersistedState { // (2) the index is currently empty since it was opened with IndexWriterConfig.OpenMode.CREATE // In the common case it's actually sufficient to commit() the existing state and not do any indexing. For instance, - // this is true if there's only one data path on this master node, and the commit we just loaded was already written out - // by this version of OpenSearch. TODO TBD should we avoid indexing when possible? + // this is true if there's only one data path on this cluster-manager node, and the commit we just loaded was already written + // out by this version of OpenSearch. TODO TBD should we avoid indexing when possible? final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); try { writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayService.java b/server/src/main/java/org/opensearch/gateway/GatewayService.java index 47347cea50e27..1a0efbcdf5bfb 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayService.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayService.java @@ -132,8 +132,8 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final int expectedNodes; private final int recoverAfterDataNodes; private final int expectedDataNodes; - private final int recoverAfterMasterNodes; - private final int expectedMasterNodes; + private final int recoverAfterClusterManagerNodes; + private final int expectedClusterManagerNodes; private final Runnable recoveryRunnable; @@ -155,22 +155,22 @@ public GatewayService( // allow to control a delay of when indices will get created this.expectedNodes = EXPECTED_NODES_SETTING.get(settings); this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings); - this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(settings); + this.expectedClusterManagerNodes = EXPECTED_MASTER_NODES_SETTING.get(settings); if (RECOVER_AFTER_TIME_SETTING.exists(settings)) { recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(settings); - } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) { + } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedClusterManagerNodes >= 0) { recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET; } else { recoverAfterTime = null; } this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(settings); this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(settings); - // default the recover after master nodes to the minimum master nodes in the discovery + // default the recover after cluster-manager nodes to the minimum cluster-manager nodes in the discovery if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(settings)) { - recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings); + recoverAfterClusterManagerNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings); } else { - recoverAfterMasterNodes = -1; + recoverAfterClusterManagerNodes = -1; } if (discovery instanceof Coordinator) { @@ -216,7 +216,7 @@ public void clusterChanged(final ClusterChangedEvent event) { final DiscoveryNodes nodes = state.nodes(); if (state.nodes().getMasterNodeId() == null) { - logger.debug("not recovering from gateway, no master elected yet"); + logger.debug("not recovering from gateway, no cluster-manager elected yet"); } else if (recoverAfterNodes != -1 && (nodes.getMasterAndDataNodes().size()) < recoverAfterNodes) { logger.debug( "not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", @@ -229,16 +229,16 @@ public void clusterChanged(final ClusterChangedEvent event) { nodes.getDataNodes().size(), recoverAfterDataNodes ); - } else if (recoverAfterMasterNodes != -1 && nodes.getMasterNodes().size() < recoverAfterMasterNodes) { + } else if (recoverAfterClusterManagerNodes != -1 && nodes.getMasterNodes().size() < recoverAfterClusterManagerNodes) { logger.debug( "not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", nodes.getMasterNodes().size(), - recoverAfterMasterNodes + recoverAfterClusterManagerNodes ); } else { boolean enforceRecoverAfterTime; String reason; - if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) { + if (expectedNodes == -1 && expectedClusterManagerNodes == -1 && expectedDataNodes == -1) { // no expected is set, honor the setting if they are there enforceRecoverAfterTime = true; reason = "recover_after_time was set to [" + recoverAfterTime + "]"; @@ -252,10 +252,14 @@ public void clusterChanged(final ClusterChangedEvent event) { } else if (expectedDataNodes != -1 && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedDataNodes + "] data nodes, but only have [" + nodes.getDataNodes().size() + "]"; - } else if (expectedMasterNodes != -1 && (nodes.getMasterNodes().size() < expectedMasterNodes)) { + } else if (expectedClusterManagerNodes != -1 && (nodes.getMasterNodes().size() < expectedClusterManagerNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; - reason = "expecting [" + expectedMasterNodes + "] master nodes, but only have [" + nodes.getMasterNodes().size() + "]"; + reason = "expecting [" + + expectedClusterManagerNodes + + "] cluster-manager nodes, but only have [" + + nodes.getMasterNodes().size() + + "]"; } } performStateRecovery(enforceRecoverAfterTime, reason); @@ -333,7 +337,7 @@ public void clusterStateProcessed(final String source, final ClusterState oldSta @Override public void onNoLongerMaster(String source) { - logger.debug("stepped down as master before recovering state [{}]", source); + logger.debug("stepped down as cluster-manager before recovering state [{}]", source); resetRecoveredFlags(); } diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index c8ace3d218864..4c29bc6f2692f 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -103,9 +103,9 @@ public LocalAllocateDangledIndices( public void allocateDangled(Collection indices, ActionListener listener) { ClusterState clusterState = clusterService.state(); - DiscoveryNode masterNode = clusterState.nodes().getMasterNode(); - if (masterNode == null) { - listener.onFailure(new MasterNotDiscoveredException("no master to send allocate dangled request")); + DiscoveryNode clusterManagerNode = clusterState.nodes().getMasterNode(); + if (clusterManagerNode == null) { + listener.onFailure(new MasterNotDiscoveredException("no cluster-manager to send allocate dangled request")); return; } AllocateDangledRequest request = new AllocateDangledRequest( @@ -113,7 +113,7 @@ public void allocateDangled(Collection indices, ActionListener(listener, AllocateDangledResponse::new, ThreadPool.Names.SAME) diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index aa69417af1897..8ba9c47902115 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -46,6 +46,7 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; import org.opensearch.node.Node; @@ -530,6 +531,7 @@ public final class IndexSettings { private final String nodeName; private final Settings nodeSettings; private final int numberOfShards; + private final ReplicationType replicationType; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -681,6 +683,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); + replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -915,6 +918,13 @@ public int getNumberOfReplicas() { return settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, null); } + /** + * Returns true if segment replication is enabled on the index. + */ + public boolean isSegRepEnabled() { + return ReplicationType.SEGMENT.equals(replicationType); + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index bcafddd6d5816..df8eb8f38cfcb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -455,21 +455,23 @@ private static void innerParseObject( private static void nested(ParseContext context, ObjectMapper.Nested nested) { ParseContext.Document nestedDoc = context.doc(); ParseContext.Document parentDoc = nestedDoc.getParent(); + Version indexVersion = context.indexSettings().getIndexVersionCreated(); if (nested.isIncludeInParent()) { - addFields(nestedDoc, parentDoc); + addFields(indexVersion, nestedDoc, parentDoc); } if (nested.isIncludeInRoot()) { ParseContext.Document rootDoc = context.rootDoc(); - // don't add it twice, if its included in parent, and we are handling the master doc... + // don't add it twice, if its included in parent, and we are handling the cluster-manager doc... if (!nested.isIncludeInParent() || parentDoc != rootDoc) { - addFields(nestedDoc, rootDoc); + addFields(indexVersion, nestedDoc, rootDoc); } } } - private static void addFields(ParseContext.Document nestedDoc, ParseContext.Document rootDoc) { + private static void addFields(Version indexVersion, ParseContext.Document nestedDoc, ParseContext.Document rootDoc) { + String nestedPathFieldName = NestedPathFieldMapper.name(indexVersion); for (IndexableField field : nestedDoc.getFields()) { - if (!field.name().equals(TypeFieldMapper.NAME)) { + if (field.name().equals(nestedPathFieldName) == false) { rootDoc.add(field); } } @@ -498,7 +500,7 @@ private static ParseContext nestedContext(ParseContext context, ObjectMapper map // the type of the nested doc starts with __, so we can identify that its a nested one in filters // note, we don't prefix it with the type of the doc since it allows us to execute a nested query // across types (for example, with similar nested objects) - nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + nestedDoc.add(NestedPathFieldMapper.field(context.indexSettings().getIndexVersionCreated(), mapper.nestedTypePath())); return context; } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 819df4a6f396e..33c6ff2bc4391 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -94,7 +94,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { */ public enum MergeReason { /** - * Pre-flight check before sending a mapping update to the master + * Pre-flight check before sending a mapping update to the cluster-manager */ MAPPING_UPDATE_PREFLIGHT, /** @@ -303,7 +303,7 @@ public boolean updateMapping(final IndexMetadata currentIndexMetadata, final Ind } // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same - // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the + // mapping, in this case, we send to the cluster-manager to refresh its own version of the mappings (to conform with the // merge version of it, which it does when refreshing the mappings), and warn log it. if (documentMapper().mappingSource().equals(incomingMappingSource) == false) { logger.debug( diff --git a/server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java new file mode 100644 index 0000000000000..f420897ca187f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.lookup.SearchLookup; + +import java.util.Collections; + +public class NestedPathFieldMapper extends MetadataFieldMapper { + // OpenSearch version 2.0 removed types; this name is used for bwc + public static final String LEGACY_NAME = "_type"; + public static final String NAME = "_nested_path"; + + public static class Defaults { + public static final FieldType FIELD_TYPE = new FieldType(); + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.freeze(); + } + } + + /** private ctor; using SINGLETON to control BWC */ + private NestedPathFieldMapper(String name) { + super(new NestedPathFieldType(name)); + } + + /** returns the field name */ + public static String name(Version version) { + if (version.before(Version.V_2_0_0)) { + return LEGACY_NAME; + } + return NAME; + } + + @Override + protected String contentType() { + return NAME; + } + + private static final NestedPathFieldMapper LEGACY_INSTANCE = new NestedPathFieldMapper(LEGACY_NAME); + private static final NestedPathFieldMapper INSTANCE = new NestedPathFieldMapper(NAME); + + public static final TypeParser PARSER = new FixedTypeParser( + c -> c.indexVersionCreated().before(Version.V_2_0_0) ? LEGACY_INSTANCE : INSTANCE + ); + + /** helper method to create a lucene field based on the opensearch version */ + public static Field field(Version version, String path) { + return new Field(name(version), path, Defaults.FIELD_TYPE); + } + + /** helper method to create a query based on the opensearch version */ + public static Query filter(Version version, String path) { + return new TermQuery(new Term(name(version), new BytesRef(path))); + } + + /** field type for the NestPath field */ + public static final class NestedPathFieldType extends StringFieldType { + private NestedPathFieldType(String name) { + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + } + + @Override + public String typeName() { + return NAME; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException("Cannot run exists() query against the nested field path"); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index a9923d7c6d756..d3c2e7f1e5372 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -32,11 +32,9 @@ package org.opensearch.index.mapper; -import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.Nullable; import org.opensearch.common.collect.CopyOnWriteHashMap; @@ -388,8 +386,7 @@ protected static void parseProperties(ObjectMapper.Builder objBuilder, Map appliedClusterStateVersion) { - // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode + // check that the cluster-manager does not fabricate new in-sync entries out of thin air once we are in primary mode assert !primaryMode || inSyncAllocationIds.stream().allMatch(inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync) - : "update from master in primary mode contains in-sync ids " + : "update from cluster-manager in primary mode contains in-sync ids " + inSyncAllocationIds + " that have no matching entries in " + checkpoints; - // remove entries which don't exist on master + // remove entries which don't exist on cluster-manager Set initializingAllocationIds = routingTable.getAllInitializingShards() .stream() .map(ShardRouting::allocationId) @@ -1197,7 +1197,7 @@ public synchronized void updateFromMaster( for (String initializingId : initializingAllocationIds) { if (checkpoints.containsKey(initializingId) == false) { final boolean inSync = inSyncAllocationIds.contains(initializingId); - assert inSync == false : "update from master in primary mode has " + assert inSync == false : "update from cluster-manager in primary mode has " + initializingId + " as in-sync but it does not exist locally"; final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -1475,7 +1475,7 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_3_0); throw new IllegalStateException("primary context [" + primaryContext + "] does not contain " + shardAllocationId); } - final Runnable runAfter = getMasterUpdateOperationFromCurrentState(); + final Runnable runAfter = getClusterManagerUpdateOperationFromCurrentState(); primaryMode = true; // capture current state to possibly replay missed cluster state update appliedClusterStateVersion = primaryContext.clusterStateVersion(); @@ -1541,7 +1541,7 @@ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener } } - private Runnable getMasterUpdateOperationFromCurrentState() { + private Runnable getClusterManagerUpdateOperationFromCurrentState() { assert primaryMode == false; final long lastAppliedClusterStateVersion = appliedClusterStateVersion; final Set inSyncAllocationIds = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index 69f283a53ca79..c07798202144b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -161,7 +161,7 @@ default void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {} default void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) {} /** - * Called on the Master node only before the {@link IndexService} instances is created to simulate an index creation. + * Called on the cluster-manager node only before the {@link IndexService} instances is created to simulate an index creation. * This happens right before the index and it's metadata is registered in the cluster state */ default void beforeIndexAddedToCluster(Index index, Settings indexSettings) {} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f2630ad05b488..7a12952316c67 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -516,7 +516,7 @@ public void updateShardState( assert currentRouting.isRelocationTarget() == false || currentRouting.primary() == false || replicationTracker.isPrimaryMode() - : "a primary relocation is completed by the master, but primary mode is not active " + currentRouting; + : "a primary relocation is completed by the cluster-managerr, but primary mode is not active " + currentRouting; changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); } else if (currentRouting.primary() @@ -539,7 +539,7 @@ public void updateShardState( if (newRouting.primary()) { if (newPrimaryTerm == pendingPrimaryTerm) { if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) { - // the master started a recovering primary, activate primary mode. + // the cluster-manager started a recovering primary, activate primary mode. replicationTracker.activatePrimaryMode(getLocalCheckpoint()); ensurePeerRecoveryRetentionLeasesExist(); } @@ -549,10 +549,10 @@ public void updateShardState( * in one state causing it's term to be incremented. Note that if both current shard state and new * shard state are initializing, we could replace the current shard and reinitialize it. It is however * possible that this shard is being started. This can happen if: - * 1) Shard is post recovery and sends shard started to the master + * 1) Shard is post recovery and sends shard started to the cluster-manager * 2) Node gets disconnected and rejoins - * 3) Master assigns the shard back to the node - * 4) Master processes the shard started and starts the shard + * 3) Cluster-manager assigns the shard back to the node + * 4) Cluster-manager processes the shard started and starts the shard * 5) The node process the cluster state where the shard is both started and primary term is incremented. * * We could fail the shard in that case, but this will cause it to be removed from the insync allocations list @@ -757,7 +757,7 @@ private void verifyRelocatingState() { throw new IndexShardNotStartedException(shardId, state); } /* - * If the master cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible + * If the cluster-manager cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible * that we concurrently end up here and therefore have to protect that we do not mark the shard as relocated when its shard routing * says otherwise. */ @@ -3398,7 +3398,7 @@ private void innerAcquireReplicaOperationPermit( final IndexShardState shardState = state(); // only roll translog and update primary term if shard has made it past recovery // Having a new primary term here means that the old primary failed and that there is a new primary, which again - // means that the master will fail this shard as all initializing shards are failed when a primary is selected + // means that the cluster-manager will fail this shard as all initializing shards are failed when a primary is selected // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint if (shardState != IndexShardState.POST_RECOVERY && shardState != IndexShardState.STARTED) { throw new IndexShardNotStartedException(shardId, shardState); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 387f77a839d35..6eb7a29984e1c 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -561,7 +561,7 @@ private void restore( final StepListener indexIdListener = new StepListener<>(); // If the index UUID was not found in the recovery source we will have to load RepositoryData and resolve it by index name if (indexId.getId().equals(IndexMetadata.INDEX_UUID_NA_VALUE)) { - // BwC path, running against an old version master that did not add the IndexId to the recovery source + // BwC path, running against an old version cluster-manager that did not add the IndexId to the recovery source repository.getRepositoryData( ActionListener.map(indexIdListener, repositoryData -> repositoryData.resolveIndexId(indexId.getName())) ); diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index 9a7b91f020e36..5e59908e741ba 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -57,6 +57,7 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.mapper.RangeType; @@ -184,6 +185,7 @@ private static Map initBuiltInMetadataMa builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(DataStreamFieldMapper.NAME, DataStreamFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); + builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); builtInMetadataMappers.put(VersionFieldMapper.NAME, VersionFieldMapper.PARSER); builtInMetadataMappers.put(SeqNoFieldMapper.NAME, SeqNoFieldMapper.PARSER); // _field_names must be added last so that it has a chance to see all the other mappers diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 9463b51ca3792..858cd238ad700 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -226,7 +226,7 @@ public synchronized void applyClusterState(final ClusterChangedEvent event) { final ClusterState state = event.state(); // we need to clean the shards and indices we have on this node, since we - // are going to recover them again once state persistence is disabled (no master / not recovered) + // are going to recover them again once state persistence is disabled (no cluster-manager / not recovered) // TODO: feels hacky, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks? if (state.blocks().disableStatePersistence()) { for (AllocatedIndex indexService : indicesService) { @@ -244,7 +244,7 @@ public synchronized void applyClusterState(final ClusterChangedEvent event) { failMissingShards(state); - removeShards(state); // removes any local shards that doesn't match what the master expects + removeShards(state); // removes any local shards that doesn't match what the cluster-manager expects updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache @@ -267,17 +267,21 @@ private void updateFailedShardsCache(final ClusterState state) { return; } - DiscoveryNode masterNode = state.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = state.nodes().getMasterNode(); - // remove items from cache which are not in our routing table anymore and resend failures that have not executed on master yet + // remove items from cache which are not in our routing table anymore and + // resend failures that have not executed on cluster-manager yet for (Iterator> iterator = failedShardsCache.entrySet().iterator(); iterator.hasNext();) { ShardRouting failedShardRouting = iterator.next().getValue(); ShardRouting matchedRouting = localRoutingNode.getByShardId(failedShardRouting.shardId()); if (matchedRouting == null || matchedRouting.isSameAllocation(failedShardRouting) == false) { iterator.remove(); } else { - if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? - String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure"; + // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? + if (clusterManagerNode != null) { + String message = "cluster-manager " + + clusterManagerNode + + " has not removed previously failed shard. resending shard failure"; logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message); shardStateAction.localShardFailed(matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER, state); } @@ -401,7 +405,7 @@ private void removeIndices(final ClusterChangedEvent event) { } /** - * Notifies master about shards that don't exist but are supposed to be active on this node. + * Notifies cluster-manager about shards that don't exist but are supposed to be active on this node. * * @param state new cluster state */ @@ -415,7 +419,7 @@ private void failMissingShards(final ClusterState state) { if (shardRouting.initializing() == false && failedShardsCache.containsKey(shardId) == false && indicesService.getShardOrNull(shardId) == null) { - // the master thinks we are active, but we don't have this shard at all, mark it as failed + // the cluster-manager thinks we are active, but we don't have this shard at all, mark it as failed sendFailShard( shardRouting, "master marked shard as active, but shard has not been created, mark shard as failed", @@ -664,12 +668,12 @@ private void updateShard( final IndexShardState state = shard.state(); if (shardRouting.initializing() && (state == IndexShardState.STARTED || state == IndexShardState.POST_RECOVERY)) { - // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting - // for master to confirm a shard started message (either master failover, or a cluster event before - // we managed to tell the master we started), mark us as started + // the cluster-manager thinks we are initializing, but we are already started or on POST_RECOVERY and waiting + // for cluster-manager to confirm a shard started message (either cluster-manager failover, or a cluster event before + // we managed to tell the cluster-manager we started), mark us as started if (logger.isTraceEnabled()) { logger.trace( - "{} master marked shard as initializing, but shard has state [{}], resending shard started to {}", + "{} cluster-manager marked shard as initializing, but shard has state [{}], resending shard started to {}", shardRouting.shardId(), state, nodes.getMasterNode() diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index f56b2f98f0f6e..23ce1b277aeeb 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.plugins.MapperPlugin; import java.util.Collections; @@ -50,6 +51,7 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; + private final Map metadataMapperParsersPre20; private final Function> fieldFilter; public MapperRegistry( @@ -59,6 +61,9 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); + Map tempPre20 = new LinkedHashMap<>(metadataMapperParsers); + tempPre20.remove(NestedPathFieldMapper.NAME); + this.metadataMapperParsersPre20 = Collections.unmodifiableMap(tempPre20); this.fieldFilter = fieldFilter; } @@ -75,7 +80,7 @@ public Map getMapperParsers() { * returned map uses the name of the field as a key. */ public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return metadataMapperParsers; + return indexCreatedVersion.onOrAfter(Version.V_2_0_0) ? metadataMapperParsers : metadataMapperParsersPre20; } /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 127127f5feace..8d9eab24f6027 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -159,7 +159,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); this.maxConcurrentOperations = INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) - // and we want to give the master time to remove a faulty node + // and we want to give the cluster-manager time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 394b093059385..6dbbf21eb9360 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -242,7 +242,7 @@ public void cancel(String reason) { * fail the recovery and call listener * * @param e exception that encapsulating the failure - * @param sendShardFailure indicates whether to notify the master of the shard failure + * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure */ public void fail(RecoveryFailedException e, boolean sendShardFailure) { if (finished.compareAndSet(false, true)) { diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java new file mode 100644 index 0000000000000..98d68d67ba5e3 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +/** + * Enumerates the types of replication strategies supported by OpenSearch. + * For more information, see https://github.com/opensearch-project/OpenSearch/issues/1694 + */ +public enum ReplicationType { + + DOCUMENT, + SEGMENT; + + public static ReplicationType parseString(String replicationType) { + try { + return ReplicationType.valueOf(replicationType); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Could not parse ReplicationStrategy for [" + replicationType + "]"); + } catch (NullPointerException npe) { + // return a default value for null input + return DOCUMENT; + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index d385b2e6aa74e..538a8c871cb5f 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -203,8 +203,8 @@ private StoreFilesMetadata listStoreMetadata(NodeRequest request) throws IOExcep return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); } // note that this may fail if it can't get access to the shard lock. Since we check above there is an active shard, this means: - // 1) a shard is being constructed, which means the master will not use a copy of this replica - // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the master may not + // 1) a shard is being constructed, which means the cluster-manager will not use a copy of this replica + // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the cluster-manager may not // reuse local resources. final Store.MetadataSnapshot metadataSnapshot = Store.readMetadataSnapshot( shardPath.resolveIndex(), diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c929c7c013b13..c1152afd6fe44 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -37,8 +37,6 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.opensearch.index.IndexingPressureService; -import org.opensearch.tasks.TaskResourceTrackingService; -import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -215,7 +213,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.UnaryOperator; import java.util.stream.Collectors; @@ -327,7 +324,6 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; final NamedWriteableRegistry namedWriteableRegistry; - private final AtomicReference runnableTaskListener; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -437,8 +433,7 @@ protected Node( final List> executorBuilders = pluginsService.getExecutorBuilders(settings); - runnableTaskListener = new AtomicReference<>(); - final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); + final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -1062,11 +1057,6 @@ public Node start() throws NodeValidationException { TransportService transportService = injector.getInstance(TransportService.class); transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); - - TaskResourceTrackingService taskResourceTrackingService = injector.getInstance(TaskResourceTrackingService.class); - transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); - runnableTaskListener.set(taskResourceTrackingService); - transportService.start(); assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) @@ -1192,7 +1182,7 @@ private Node stop() { // stop any changes happening as a result of cluster state changes injector.getInstance(IndicesClusterStateService.class).stop(); // close discovery early to not react to pings anymore. - // This can confuse other nodes and delay things - mostly if we're the master and we're running tests. + // This can confuse other nodes and delay things - mostly if we're the cluster manager and we're running tests. injector.getInstance(Discovery.class).stop(); // we close indices first, so operations won't be allowed on it injector.getInstance(ClusterService.class).stop(); @@ -1468,7 +1458,7 @@ protected ClusterInfoService newClusterInfoService( ) { final InternalClusterInfoService service = new InternalClusterInfoService(settings, clusterService, threadPool, client); if (DiscoveryNode.isMasterNode(settings)) { - // listen for state changes (this node starts/stops being the elected master, or new nodes are added) + // listen for state changes (this node starts/stops being the elected cluster manager, or new nodes are added) clusterService.addListener(service); } return service; @@ -1500,5 +1490,4 @@ DiscoveryNode getNode() { return localNode.get(); } } - } diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index eaa623b53ac1c..e6a9ae673211f 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -60,7 +60,7 @@ import java.util.Objects; /** - * Component that runs only on the master node and is responsible for assigning running tasks to nodes + * Component that runs only on the cluster-manager node and is responsible for assigning running tasks to nodes */ public class PersistentTasksClusterService implements ClusterStateListener, Closeable { @@ -114,7 +114,7 @@ public void close() { } /** - * Creates a new persistent task on master node + * Creates a new persistent task on cluster-manager node * * @param taskId the task's id * @param taskName the task's name diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java index 4a12208d675e9..0a88204c7cfe9 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java @@ -87,7 +87,7 @@ public PersistentTasksNodeService( @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // wait until the gateway has recovered from disk, otherwise if the only master restarts + // wait until the gateway has recovered from disk, otherwise if the only cluster-manager restarts // we start cancelling all local tasks before cluster has a chance to recover. return; } @@ -103,18 +103,18 @@ public void clusterChanged(ClusterChangedEvent event) { // NULL STARTED Remove locally, Mark as PENDING_CANCEL, Cancel // NULL COMPLETED Remove locally - // Master states: + // Cluster-manager states: // NULL - doesn't exist in the cluster state // STARTED - exist in the cluster state // Local state: // NULL - we don't have task registered locally in runningTasks - // STARTED - registered in TaskManager, requires master notification when finishes - // PENDING_CANCEL - registered in TaskManager, doesn't require master notification when finishes - // COMPLETED - not registered in TaskManager, notified, waiting for master to remove it from CS so we can remove locally + // STARTED - registered in TaskManager, requires cluster-manager notification when finishes + // PENDING_CANCEL - registered in TaskManager, doesn't require cluster-manager notification when finishes + // COMPLETED - not registered in TaskManager, notified, waiting for cluster-manager to remove it from CS so we can remove locally // When task finishes if it is marked as STARTED or PENDING_CANCEL it is marked as COMPLETED and unregistered, - // If the task was STARTED, the master notification is also triggered (this is handled by unregisterTask() method, which is + // If the task was STARTED, the cluster-manager notification is also triggered (this is handled by unregisterTask() method, which is // triggered by PersistentTaskListener if (Objects.equals(tasks, previousTasks) == false || event.nodesChanged()) { @@ -162,7 +162,7 @@ public void clusterChanged(ClusterChangedEvent event) { ); runningTasks.remove(id); } else { - // task is running locally, but master doesn't know about it - that means that the persistent task was removed + // task is running locally, but cluster-manager doesn't know about it - that means that the persistent task was removed // cancel the task without notifying master logger.trace( "Found unregistered persistent task [{}] with id [{}] and allocation id [{}] - cancelling", @@ -286,7 +286,7 @@ public void onFailure(Exception notificationException) { } /** - * Unregisters and then cancels the locally running task using the task manager. No notification to master will be send upon + * Unregisters and then cancels the locally running task using the task manager. No notification to cluster-manager will be send upon * cancellation. */ private void cancelTask(Long allocationId) { diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java index a52b623a7a843..c3d78bb614200 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java @@ -54,7 +54,7 @@ /** * This service is used by persistent tasks and allocated persistent tasks to communicate changes - * to the master node so that the master can update the cluster state and can track of the states + * to the cluster-manager node so that the cluster-manager can update the cluster state and can track of the states * of the persistent tasks. */ public class PersistentTasksService { @@ -74,7 +74,7 @@ public PersistentTasksService(ClusterService clusterService, ThreadPool threadPo } /** - * Notifies the master node to create new persistent task and to assign it to a node. + * Notifies the cluster-manager node to create new persistent task and to assign it to a node. */ public void sendStartRequest( final String taskId, @@ -89,7 +89,7 @@ public void sendStartRequest( } /** - * Notifies the master node about the completion of a persistent task. + * Notifies the cluster-manager node about the completion of a persistent task. *

    * When {@code failure} is {@code null}, the persistent task is considered as successfully completed. */ @@ -118,7 +118,7 @@ void sendCancelRequest(final long taskId, final String reason, final ActionListe } /** - * Notifies the master node that the state of a persistent task has changed. + * Notifies the cluster-manager node that the state of a persistent task has changed. *

    * Persistent task implementers shouldn't call this method directly and use * {@link AllocatedPersistentTask#updatePersistentTaskState} instead @@ -138,7 +138,7 @@ void sendUpdateStateRequest( } /** - * Notifies the master node to remove a persistent task from the cluster state + * Notifies the cluster-manager node to remove a persistent task from the cluster state */ public void sendRemoveRequest(final String taskId, final ActionListener> listener) { RemovePersistentTaskAction.Request request = new RemovePersistentTaskAction.Request(taskId); diff --git a/server/src/main/java/org/opensearch/persistent/package-info.java b/server/src/main/java/org/opensearch/persistent/package-info.java index 00260b56d2ddb..3eff441642c90 100644 --- a/server/src/main/java/org/opensearch/persistent/package-info.java +++ b/server/src/main/java/org/opensearch/persistent/package-info.java @@ -32,12 +32,12 @@ * In order to be resilient to node restarts, the persistent tasks are using the cluster state instead of a transport service to send * requests and responses. The execution is done in six phases: *

    - * 1. The coordinating node sends an ordinary transport request to the master node to start a new persistent task. This task is handled + * 1. The coordinating node sends an ordinary transport request to the cluster-manager node to start a new persistent task. This task is handled * by the {@link org.opensearch.persistent.PersistentTasksService}, which is using * {@link org.opensearch.persistent.PersistentTasksClusterService} to update cluster state with the record about running persistent * task. *

    - * 2. The master node updates the {@link org.opensearch.persistent.PersistentTasksCustomMetadata} in the cluster state to indicate + * 2. The cluster-manager node updates the {@link org.opensearch.persistent.PersistentTasksCustomMetadata} in the cluster state to indicate * that there is a new persistent task running in the system. *

    * 3. The {@link org.opensearch.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes in diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index e7c5804f458a0..b521da8453bb2 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -140,7 +140,7 @@ public RepositoriesService( /** * Registers new repository in the cluster *

    - * This method can be only called on the master node. It tries to create a new repository on the master + * This method can be only called on the cluster-manager node. It tries to create a new repository on the master * and if it was successful it adds new repository to cluster metadata. * * @param request register repository request @@ -172,7 +172,7 @@ public void registerRepository(final PutRepositoryRequest request, final ActionL registrationListener = listener; } - // Trying to create the new repository on master to make sure it works + // Trying to create the new repository on cluster-manager to make sure it works try { closeRepository(createRepository(newRepositoryMetadata, typesRegistry)); } catch (Exception e) { @@ -235,7 +235,7 @@ public void onFailure(String source, Exception e) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { - // repository is created on both master and data nodes + // repository is created on both cluster-manager and data nodes return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); } } @@ -245,7 +245,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { /** * Unregisters repository in the cluster *

    - * This method can be only called on the master node. It removes repository information from cluster metadata. + * This method can be only called on the cluster-manager node. It removes repository information from cluster metadata. * * @param request unregister repository request * @param listener unregister repository listener @@ -290,7 +290,7 @@ public ClusterState execute(ClusterState currentState) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { - // repository was created on both master and data nodes + // repository was created on both cluster-manager and data nodes return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); } } @@ -457,7 +457,7 @@ public void getRepositoryData(final String repositoryName, final ActionListener< /** * Returns registered repository *

    - * This method is called only on the master node + * This method is called only on the cluster-manager node * * @param repositoryName repository name * @return registered repository diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index c8907393824c2..18ad02bab48f9 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -68,7 +68,7 @@ *

      *
    • Data nodes call {@link Repository#snapshotShard} * for each shard
    • - *
    • When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures
    • + *
    • When all shard calls return cluster-manager calls {@link #finalizeSnapshot} with possible list of failures
    • *
    */ public interface Repository extends LifecycleComponent { @@ -134,7 +134,7 @@ default Repository create(RepositoryMetadata metadata, Function - * This method is called on master after all shards are snapshotted. + * This method is called on cluster-manager after all shards are snapshotted. * * @param shardGenerations updated shard generations * @param repositoryStateId the unique id identifying the state of the repository when the snapshot began @@ -197,7 +197,7 @@ default RepositoryStats stats() { } /** - * Verifies repository on the master node and returns the verification token. + * Verifies repository on the cluster-manager node and returns the verification token. *

    * If the verification token is not null, it's passed to all data nodes for verification. If it's null - no * additional verification is required diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 7d6cdef76198f..d95612e31ca38 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -318,7 +318,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp *

  • All repositories that are read-only, i.e. for which {@link #isReadOnly()} returns {@code true} because there are no * guarantees that another cluster is not writing to the repository at the same time
  • *
  • The node finds itself in a mixed-version cluster containing nodes older than - * {@link RepositoryMetadata#REPO_GEN_IN_CS_VERSION} where the master node does not update the value of + * {@link RepositoryMetadata#REPO_GEN_IN_CS_VERSION} where the cluster-manager node does not update the value of * {@link RepositoryMetadata#generation()} when writing a new {@code index-N} blob
  • *
  • The value of {@link RepositoryMetadata#generation()} for this repository is {@link RepositoryData#UNKNOWN_REPO_GEN} * indicating that no consistent repository generation is tracked in the cluster state yet.
  • @@ -726,8 +726,8 @@ public void deleteSnapshots( protected void doRun() throws Exception { final Map rootBlobs = blobContainer().listBlobs(); final RepositoryData repositoryData = safeRepositoryData(repositoryStateId, rootBlobs); - // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never - // delete an index that was created by another master node after writing this index-N blob. + // Cache the indices that were found before writing out the new index-N blob so that a stuck cluster-manager will never + // delete an index that was created by another cluster-manager node after writing this index-N blob. final Map foundIndices = blobStore().blobContainer(indicesPath()).children(); doDeleteShardSnapshots( snapshotIds, @@ -1371,9 +1371,9 @@ public void finalizeSnapshot( ); }, onUpdateFailure), 2 + indices.size()); - // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method will - // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the - // index or global metadata will be compatible with the segments written in this snapshot as well. + // We ignore all FileAlreadyExistsException when writing metadata since otherwise a cluster-manager failover + // while in this method will mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because + // any updated version of the index or global metadata will be compatible with the segments written in this snapshot as well. // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way // that decrements the generation it points at @@ -1546,7 +1546,11 @@ public String startVerification() { return seed; } } catch (Exception exp) { - throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp); + throw new RepositoryVerificationException( + metadata.name(), + "path " + basePath() + " is not accessible on cluster-manager node", + exp + ); } } @@ -2782,15 +2786,15 @@ public void verify(String seed, DiscoveryNode localNode) { } catch (NoSuchFileException e) { throw new RepositoryVerificationException( metadata.name(), - "a file written by master to the store [" + "a file written by cluster-manager to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. " + "This might indicate that the store [" + blobStore() - + "] is not shared between this node and the master node or " - + "that permissions on the store don't allow reading files written by the master node", + + "] is not shared between this node and the cluster-manager node or " + + "that permissions on the store don't allow reading files written by the cluster-manager node", e ); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index a960cfe70aee7..aacd386cd4bd7 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -39,7 +39,7 @@ * {@link org.opensearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.

    * *

    The blob store is written to and read from by cluster-manager-eligible nodes and data nodes. All metadata related to a snapshot's - * scope and health is written by the master node.

    + * scope and health is written by the cluster-manager node.

    *

    The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for * shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of * the shard's segment files to the repository as well as metadata about all the segment files that the repository stores for the shard.

    @@ -131,19 +131,19 @@ *

    Writing Updated RepositoryData to the Repository

    * *

    Writing an updated {@link org.opensearch.repositories.RepositoryData} to a blob store repository is an operation that uses - * the cluster state to ensure that a specific {@code index-N} blob is never accidentally overwritten in a master failover scenario. + * the cluster state to ensure that a specific {@code index-N} blob is never accidentally overwritten in a cluster-manager failover scenario. * The specific steps to writing a new {@code index-N} blob and thus making changes from a snapshot-create or delete operation visible - * to read operations on the repository are as follows and all run on the master node:

    + * to read operations on the repository are as follows and all run on the cluster-manager node:

    * *
      *
    1. Write an updated value of {@link org.opensearch.cluster.metadata.RepositoryMetadata} for the repository that has the same * {@link org.opensearch.cluster.metadata.RepositoryMetadata#generation()} as the existing entry and has a value of * {@link org.opensearch.cluster.metadata.RepositoryMetadata#pendingGeneration()} one greater than the {@code pendingGeneration} of the * existing entry.
    2. - *
    3. On the same master node, after the cluster state has been updated in the first step, write the new {@code index-N} blob and + *
    4. On the same cluster-manager node, after the cluster state has been updated in the first step, write the new {@code index-N} blob and * also update the contents of the {@code index.latest} blob. Note that updating the index.latest blob is done on a best effort - * basis and that there is a chance for a stuck master-node to overwrite the contents of the {@code index.latest} blob after a newer - * {@code index-N} has been written by another master node. This is acceptable since the contents of {@code index.latest} are not used + * basis and that there is a chance for a stuck cluster-manager node to overwrite the contents of the {@code index.latest} blob after a newer + * {@code index-N} has been written by another cluster-manager node. This is acceptable since the contents of {@code index.latest} are not used * during normal operation of the repository and must only be correct for purposes of mounting the contents of a * {@link org.opensearch.repositories.blobstore.BlobStoreRepository} as a read-only url repository.
    5. *
    6. After the write has finished, set the value of {@code RepositoriesState.State#generation} to the value used for @@ -152,7 +152,7 @@ * last valid {@code index-N} blob in the repository.
    7. *
    * - *

    If either of the last two steps in the above fails or master fails over to a new node at any point, then a subsequent operation + *

    If either of the last two steps in the above fails or cluster-manager fails over to a new node at any point, then a subsequent operation * trying to write a new {@code index-N} blob will never use the same value of {@code N} used by a previous attempt. It will always start * over at the first of the above three steps, incrementing the {@code pendingGeneration} generation before attempting a write, thus * ensuring no overwriting of a {@code index-N} blob ever to occur. The use of the cluster state to track the latest repository generation @@ -208,7 +208,7 @@ * *

    Finalizing the Snapshot

    * - *

    After all primaries have finished writing the necessary segment files to the blob store in the previous step, the master node moves on + *

    After all primaries have finished writing the necessary segment files to the blob store in the previous step, the cluster-manager node moves on * to finalizing the snapshot by invoking {@link org.opensearch.repositories.Repository#finalizeSnapshot}. This method executes the * following actions in order:

    *
      @@ -222,7 +222,7 @@ * *

      Deleting a Snapshot

      * - *

      Deleting a snapshot is an operation that is exclusively executed on the master node that runs through the following sequence of + *

      Deleting a snapshot is an operation that is exclusively executed on the cluster-manager node that runs through the following sequence of * action when {@link org.opensearch.repositories.blobstore.BlobStoreRepository#deleteSnapshots} is invoked:

      * *
        diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java index e57246265bb66..51417733ebe61 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java @@ -118,7 +118,7 @@ public FsRepository( if (location.isEmpty()) { logger.warn( "the repository location is missing, it should point to a shared file system location" - + " that is available on all master and data nodes" + + " that is available on all cluster-manager and data nodes" ); throw new RepositoryException(metadata.name(), "missing location"); } diff --git a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java index 536972aa5a5d7..eb5fcccee3868 100644 --- a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java @@ -58,6 +58,7 @@ public abstract class AbstractRestChannel implements RestChannel { private final String filterPath; private final boolean pretty; private final boolean human; + private final String acceptHeader; private BytesStreamOutput bytesOut; @@ -71,7 +72,8 @@ public abstract class AbstractRestChannel implements RestChannel { protected AbstractRestChannel(RestRequest request, boolean detailedErrorsEnabled) { this.request = request; this.detailedErrorsEnabled = detailedErrorsEnabled; - this.format = request.param("format", request.header("Accept")); + this.format = request.param("format"); + this.acceptHeader = request.header("Accept"); this.filterPath = request.param("filter_path", null); this.pretty = request.paramAsBoolean("pretty", false); this.human = request.paramAsBoolean("human", false); @@ -112,7 +114,11 @@ public XContentBuilder newBuilder( boolean useFiltering ) throws IOException { if (responseContentType == null) { - responseContentType = XContentType.fromMediaTypeOrFormat(format); + // TODO should format vs acceptHeader always be the same, do we allow overriding? + responseContentType = XContentType.fromFormat(format); + if (responseContentType == null) { + responseContentType = XContentType.fromMediaType(acceptHeader); + } } // try to determine the response content type from the media type or the format query string parameter, with the format parameter // taking precedence over the Accept header diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java index 4ff519e81f9cd..44e2ace0f7cf4 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -187,7 +187,7 @@ static RestResponse buildRestResponse( public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. // We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here - // in when 8.0 becomes the new version in the master branch. + // in when 8.0 becomes the new version in the main branch. final boolean namesProvided = request.hasParam("name"); final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name"); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 1b70603edf6e1..d6a620316f489 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -211,7 +211,7 @@ private void sendGetSettingsRequest( final String[] indices, final IndicesOptions indicesOptions, final boolean local, - final TimeValue masterNodeTimeout, + final TimeValue clusterManagerNodeTimeout, final NodeClient client, final ActionListener listener ) { @@ -219,7 +219,7 @@ private void sendGetSettingsRequest( request.indices(indices); request.indicesOptions(indicesOptions); request.local(local); - request.masterNodeTimeout(masterNodeTimeout); + request.masterNodeTimeout(clusterManagerNodeTimeout); request.names(IndexSettings.INDEX_SEARCH_THROTTLED.getKey()); client.admin().indices().getSettings(request, listener); @@ -229,7 +229,7 @@ private void sendClusterStateRequest( final String[] indices, final IndicesOptions indicesOptions, final boolean local, - final TimeValue masterNodeTimeout, + final TimeValue clusterManagerNodeTimeout, final NodeClient client, final ActionListener listener ) { @@ -238,7 +238,7 @@ private void sendClusterStateRequest( request.indices(indices); request.indicesOptions(indicesOptions); request.local(local); - request.masterNodeTimeout(masterNodeTimeout); + request.masterNodeTimeout(clusterManagerNodeTimeout); client.admin().cluster().state(request, listener); } @@ -247,7 +247,7 @@ private void sendClusterHealthRequest( final String[] indices, final IndicesOptions indicesOptions, final boolean local, - final TimeValue masterNodeTimeout, + final TimeValue clusterManagerNodeTimeout, final NodeClient client, final ActionListener listener ) { @@ -256,7 +256,7 @@ private void sendClusterHealthRequest( request.indices(indices); request.indicesOptions(indicesOptions); request.local(local); - request.masterNodeTimeout(masterNodeTimeout); + request.masterNodeTimeout(clusterManagerNodeTimeout); client.admin().cluster().health(request, listener); } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java index 4bcb16c741ecf..64103cba556eb 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java @@ -106,17 +106,17 @@ private Table buildTable(RestRequest request, ClusterStateResponse state) { DiscoveryNodes nodes = state.getState().nodes(); table.startRow(); - DiscoveryNode master = nodes.get(nodes.getMasterNodeId()); - if (master == null) { + DiscoveryNode clusterManager = nodes.get(nodes.getMasterNodeId()); + if (clusterManager == null) { table.addCell("-"); table.addCell("-"); table.addCell("-"); table.addCell("-"); } else { - table.addCell(master.getId()); - table.addCell(master.getHostName()); - table.addCell(master.getHostAddress()); - table.addCell(master.getName()); + table.addCell(clusterManager.getId()); + table.addCell(clusterManager.getHostName()); + table.addCell(clusterManager.getHostAddress()); + table.addCell(clusterManager.getName()); } table.endRow(); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 3052a9736f9a3..820031f9e57e4 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -334,7 +334,7 @@ Table buildTable( ) { DiscoveryNodes nodes = state.getState().nodes(); - String masterId = nodes.getMasterNodeId(); + String clusterManagerId = nodes.getMasterNodeId(); Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { @@ -424,7 +424,7 @@ Table buildTable( roles = node.getRoles().stream().map(DiscoveryNodeRole::roleNameAbbreviation).sorted().collect(Collectors.joining()); } table.addCell(roles); - table.addCell(masterId == null ? "x" : masterId.equals(node.getId()) ? "*" : "-"); + table.addCell(clusterManagerId == null ? "x" : clusterManagerId.equals(node.getId()) ? "*" : "-"); table.addCell(node.getName()); CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion(); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java index 69977cb73645a..542c428901475 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java @@ -64,13 +64,20 @@ public class RestTable { public static RestResponse buildResponse(Table table, RestChannel channel) throws Exception { RestRequest request = channel.request(); - XContentType xContentType = XContentType.fromMediaTypeOrFormat(request.param("format", request.header("Accept"))); + XContentType xContentType = getXContentType(request); if (xContentType != null) { return buildXContentBuilder(table, channel); } return buildTextPlainResponse(table, channel); } + private static XContentType getXContentType(RestRequest request) { + if (request.hasParam("format")) { + return XContentType.fromFormat(request.param("format")); + } + return XContentType.fromMediaType(request.header("Accept")); + } + public static RestResponse buildXContentBuilder(Table table, RestChannel channel) throws Exception { RestRequest request = channel.request(); XContentBuilder builder = channel.newBuilder(); diff --git a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java index 5b44b435042e5..2314cbd11dfdd 100644 --- a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java +++ b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java @@ -176,8 +176,8 @@ public void clusterChanged(ClusterChangedEvent event) { } } else if (event.previousState().nodes().isLocalNodeElectedMaster()) { - // TODO Maybe just clear out non-ongoing snapshot recoveries is the node is master eligible, so that we don't - // have to repopulate the data over and over in an unstable master situation? + // TODO Maybe just clear out non-ongoing snapshot recoveries is the node is cluster-manager eligible, so that we don't + // have to repopulate the data over and over in an unstable cluster-manager situation? synchronized (mutex) { // information only needed on current master knownSnapshotShards = ImmutableOpenMap.of(); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index e1b143b5f5274..4e9f13408e547 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -958,7 +958,7 @@ public void onFailure(final String source, final Exception e) { @Override public void onNoLongerMaster(String source) { - logger.debug("no longer master while processing restore state update [{}]", source); + logger.debug("no longer cluster-manager while processing restore state update [{}]", source); } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index b6c0b63efe3d3..800728077472d 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -103,7 +103,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final Map> shardSnapshots = new HashMap<>(); - // A map of snapshots to the shardIds that we already reported to the master as failed + // A map of snapshots to the shardIds that we already reported to the cluster-manager as failed private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = new TransportRequestDeduplicator<>(); @@ -148,9 +148,9 @@ public void clusterChanged(ClusterChangedEvent event) { } } - String previousMasterNodeId = event.previousState().nodes().getMasterNodeId(); + String previousClusterManagerNodeId = event.previousState().nodes().getMasterNodeId(); String currentMasterNodeId = event.state().nodes().getMasterNodeId(); - if (currentMasterNodeId != null && currentMasterNodeId.equals(previousMasterNodeId) == false) { + if (currentMasterNodeId != null && currentMasterNodeId.equals(previousClusterManagerNodeId) == false) { syncShardStatsOnNewMaster(event); } @@ -251,7 +251,7 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); if (snapshotStatus == null) { // due to CS batching we might have missed the INIT state and straight went into ABORTED - // notify master that abort has completed by moving to FAILED + // notify cluster-manager that abort has completed by moving to FAILED if (shard.value.state() == ShardState.ABORTED && localNodeId.equals(shard.value.nodeId())) { notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason()); } @@ -424,7 +424,7 @@ private static String getShardStateId(IndexShard indexShard, IndexCommit snapsho } /** - * Checks if any shards were processed that the new master doesn't know about + * Checks if any shards were processed that the new cluster-manager doesn't know about */ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); @@ -433,7 +433,7 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { } // Clear request deduplicator since we need to send all requests that were potentially not handled by the previous - // master again + // cluster-manager again remoteFailedRequestDeduplicator.clear(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { @@ -446,11 +446,11 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { if (masterShard != null && masterShard.state().completed() == false) { final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); final Stage stage = indexShardSnapshotStatus.getStage(); - // Master knows about the shard and thinks it has not completed + // cluster-manager knows about the shard and thinks it has not completed if (stage == Stage.DONE) { - // but we think the shard is done - we need to make new master know that the shard is done + // but we think the shard is done - we need to make new cluster-manager know that the shard is done logger.debug( - "[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "[{}] new cluster-manager thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId @@ -458,9 +458,9 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localShard.getValue().generation()); } else if (stage == Stage.FAILURE) { - // but we think the shard failed - we need to make new master know that the shard failed + // but we think the shard failed - we need to make new cluster-manager know that the shard failed logger.debug( - "[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "[{}] new cluster-manager thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId @@ -474,7 +474,7 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { } } - /** Notify the master node that the given shard has been successfully snapshotted **/ + /** Notify the cluster-manager node that the given shard has been successfully snapshotted **/ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, String generation) { assert generation != null; sendSnapshotShardUpdate( @@ -484,7 +484,7 @@ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardI ); } - /** Notify the master node that the given shard failed to be snapshotted **/ + /** Notify the cluster-manager node that the given shard failed to be snapshotted **/ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String failure) { sendSnapshotShardUpdate( snapshot, @@ -493,7 +493,7 @@ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId sh ); } - /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ + /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the cluster-manager node */ private void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status) { remoteFailedRequestDeduplicator.executeOnce( new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 122c13337fa70..746cccef8e596 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -134,7 +134,7 @@ import static org.opensearch.cluster.SnapshotsInProgress.completed; /** - * Service responsible for creating snapshots. This service runs all the steps executed on the master node during snapshot creation and + * Service responsible for creating snapshots. This service runs all the steps executed on the cluster-manager node during snapshot creation and * deletion. * See package level documentation of {@link org.opensearch.snapshots} for details. */ @@ -303,8 +303,8 @@ public ClusterState execute(ClusterState currentState) { } SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous master that we can simply ignore and remove from the cluster state because we would clean it up from the - // cluster state anyway in #applyClusterState. + // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from + // the cluster state anyway in #applyClusterState. if (snapshots != null && snapshots.entries() .stream() @@ -452,7 +452,8 @@ public ClusterState execute(ClusterState currentState) { ); } // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous master that we can simply ignore and remove from the cluster state because we would clean it up from the + // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from + // the // cluster state anyway in #applyClusterState. if (concurrentOperationsAllowed == false && runningSnapshots.stream().anyMatch(entry -> entry.state() != State.INIT)) { throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); @@ -807,7 +808,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, repository); } } else { - // Extremely unlikely corner case of master failing over between between starting the clone and + // Extremely unlikely corner case of cluster-manager failing over between between starting the clone and // starting shard clones. logger.warn("Did not find expected entry [{}] in the cluster state", cloneEntry); } @@ -986,8 +987,10 @@ protected void doRun() { ); } if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a master failover to an - // older version master node snapshot finalization (that assumes initializeSnapshot was called) produces a valid + // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager + // failover to an + // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a + // valid // snapshot. repository.initializeSnapshot( snapshot.snapshot().getSnapshotId(), @@ -1116,11 +1119,14 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - // We are not longer a master - we shouldn't try to do any cleanup - // The new master will take care of it - logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId()); + // We are not longer a cluster-manager - we shouldn't try to do any cleanup + // The new cluster-manager will take care of it + logger.warn( + "[{}] failed to create snapshot - no longer a cluster-manager", + snapshot.snapshot().getSnapshotId() + ); userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization") + new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") ); } @@ -1238,7 +1244,7 @@ private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, /** * Returns status of the currently running snapshots *

        - * This method is executed on master node + * This method is executed on cluster-manager node *

        * * @param snapshotsInProgress snapshots in progress in the cluster state @@ -1298,20 +1304,22 @@ public static List currentSnapshots( public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { - // We don't remove old master when master flips anymore. So, we need to check for change in master + // We don't remove old cluster-manager when cluster-manager flips anymore. So, we need to check for change in + // cluster-manager SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); - final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; + final boolean newClusterManager = event.previousState().nodes().isLocalNodeElectedMaster() == false; processExternalChanges( - newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), + newClusterManager || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) ); } else if (snapshotCompletionListeners.isEmpty() == false) { - // We have snapshot listeners but are not the master any more. Fail all waiting listeners except for those that already + // We have snapshot listeners but are not the cluster-manager any more. Fail all waiting listeners except for those that + // already // have their snapshots finalizing (those that are already finalizing will fail on their own from to update the cluster // state). for (Snapshot snapshot : new HashSet<>(snapshotCompletionListeners.keySet())) { if (endingSnapshots.add(snapshot)) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer master")); + failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer cluster-manager")); } } } @@ -1326,7 +1334,7 @@ public void applyClusterState(ClusterChangedEvent event) { /** * Cleanup all snapshots found in the given cluster state that have no more work left: * 1. Completed snapshots - * 2. Snapshots in state INIT that a previous master of an older version failed to start + * 2. Snapshots in state INIT that a previous cluster-manager of an older version failed to start * 3. Snapshots in any other state that have all their shard tasks completed */ private void endCompletedSnapshots(ClusterState state) { @@ -1402,11 +1410,11 @@ private static boolean assertNoDanglingSnapshots(ClusterState state) { } /** - * Updates the state of in-progress snapshots in reaction to a change in the configuration of the cluster nodes (master fail-over or + * Updates the state of in-progress snapshots in reaction to a change in the configuration of the cluster nodes (cluster-manager fail-over or * disconnect of a data node that was executing a snapshot) or a routing change that started shards whose snapshot state is * {@link SnapshotsInProgress.ShardState#WAITING}. * - * @param changedNodes true iff either a master fail-over occurred or a data node that was doing snapshot work got removed from the + * @param changedNodes true iff either a cluster-manager fail-over occurred or a data node that was doing snapshot work got removed from the * cluster * @param startShards true iff any waiting shards were started due to a routing change */ @@ -1863,7 +1871,7 @@ private List>> endAndGetListe /** * Handles failure to finalize a snapshot. If the exception indicates that this node was unable to publish a cluster state and stopped - * being the master node, then fail all snapshot create and delete listeners executing on this node by delegating to + * being the cluster-manager node, then fail all snapshot create and delete listeners executing on this node by delegating to * {@link #failAllListenersOnMasterFailOver}. Otherwise, i.e. as a result of failing to write to the snapshot repository for some * reason, remove the snapshot's {@link SnapshotsInProgress.Entry} from the cluster state and move on with other queued snapshot * operations if there are any. @@ -1875,7 +1883,7 @@ private List>> endAndGetListe private void handleFinalizationFailure(Exception e, SnapshotsInProgress.Entry entry, RepositoryData repositoryData) { Snapshot snapshot = entry.snapshot(); if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { - // Failure due to not being master any more, don't try to remove snapshot from cluster state the next master + // Failure due to not being cluster-manager any more, don't try to remove snapshot from cluster state the next cluster-manager // will try ending this snapshot again logger.debug(() -> new ParameterizedMessage("[{}] failed to update cluster state during snapshot finalization", snapshot), e); failSnapshotCompletionListeners( @@ -2082,7 +2090,7 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - failure.addSuppressed(new SnapshotException(snapshot, "no longer master")); + failure.addSuppressed(new SnapshotException(snapshot, "no longer cluster-manager")); failSnapshotCompletionListeners(snapshot, failure); failAllListenersOnMasterFailOver(new NotMasterException(source)); if (listener != null) { @@ -2249,7 +2257,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { SnapshotsInProgress.of( snapshots.entries() .stream() - // remove init state snapshot we found from a previous master if there was one + // remove init state snapshot we found from a previous cluster-manager if there was one .filter(existing -> abortedDuringInit == false || existing.equals(snapshotEntry) == false) .map(existing -> { if (existing.equals(snapshotEntry)) { @@ -2297,8 +2305,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS ); }, e -> { if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { - logger.warn("master failover before deleted snapshot could complete", e); - // Just pass the exception to the transport handler as is so it is retried on the new master + logger.warn("cluster-manager failover before deleted snapshot could complete", e); + // Just pass the exception to the transport handler as is so it is retried on the new cluster-manager listener.onFailure(e); } else { logger.warn("deleted snapshot failed", e); @@ -2588,7 +2596,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS */ private static boolean isWritingToRepository(SnapshotsInProgress.Entry entry) { if (entry.state().completed()) { - // Entry is writing to the repo because it's finalizing on master + // Entry is writing to the repo because it's finalizing on cluster-manager return true; } for (ObjectCursor value : entry.shards().values()) { @@ -2769,19 +2777,19 @@ protected void handleListeners(List> deleteListeners) { } /** - * Handle snapshot or delete failure due to not being master any more so we don't try to do run additional cluster state updates. - * The next master will try handling the missing operations. All we can do is fail all the listeners on this master node so that + * Handle snapshot or delete failure due to not being cluster-manager any more so we don't try to do run additional cluster state updates. + * The next cluster-manager will try handling the missing operations. All we can do is fail all the listeners on this cluster-manager node so that * transport requests return and we don't leak listeners. * - * @param e exception that caused us to realize we are not master any longer + * @param e exception that caused us to realize we are not cluster-manager any longer */ private void failAllListenersOnMasterFailOver(Exception e) { - logger.debug("Failing all snapshot operation listeners because this node is not master any longer", e); + logger.debug("Failing all snapshot operation listeners because this node is not cluster-manager any longer", e); synchronized (currentlyFinalizing) { if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { repositoryOperations.clear(); for (Snapshot snapshot : new HashSet<>(snapshotCompletionListeners.keySet())) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer master")); + failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer cluster-manager")); } final Exception wrapped = new RepositoryException("_all", "Failed to update cluster state during repository operation", e); for (Iterator>> iterator = snapshotDeletionListeners.values().iterator(); iterator.hasNext();) { @@ -3213,7 +3221,7 @@ public boolean assertAllListenersResolved() { * * If the inner loop finds that a shard update task applies to a given snapshot and either a shard-snapshot or shard-clone operation in * it then it will update the state of the snapshot entry accordingly. If that update was a noop, then the task is removed from the - * iteration as it was already applied before and likely just arrived on the master node again due to retries upstream. + * iteration as it was already applied before and likely just arrived on the cluster-manager node again due to retries upstream. * If the update was not a noop, then it means that the shard it applied to is now available for another snapshot or clone operation * to be re-assigned if there is another snapshot operation that is waiting for the shard to become available. We therefore record the * fact that a task was executed by adding it to a collection of executed tasks. If a subsequent execution of the outer loop finds that @@ -3267,7 +3275,8 @@ public boolean assertAllListenersResolved() { updateSnapshotState, entry ); - assert false : "This should never happen, master will not submit a state update for a non-existing clone"; + assert false + : "This should never happen, cluster-manager will not submit a state update for a non-existing clone"; continue; } if (existing.state().completed()) { @@ -3810,8 +3819,8 @@ synchronized void addFinalization(SnapshotsInProgress.Entry entry, Metadata meta } /** - * Clear all state associated with running snapshots. To be used on master-failover if the current node stops - * being master. + * Clear all state associated with running snapshots. To be used on cluster-manager-failover if the current node stops + * being cluster-manager. */ synchronized void clear() { snapshotsToFinalize.clear(); diff --git a/server/src/main/java/org/opensearch/snapshots/package-info.java b/server/src/main/java/org/opensearch/snapshots/package-info.java index 82d7a0d88ff00..f43509cf671f9 100644 --- a/server/src/main/java/org/opensearch/snapshots/package-info.java +++ b/server/src/main/java/org/opensearch/snapshots/package-info.java @@ -30,13 +30,13 @@ * *

        Preliminaries

        * - *

        There are two communication channels between all nodes and master in the snapshot functionality:

        + *

        There are two communication channels between all nodes and cluster-manager in the snapshot functionality:

        *
          - *
        • The master updates the cluster state by adding, removing or altering the contents of its custom entry + *
        • The cluster-manager updates the cluster state by adding, removing or altering the contents of its custom entry * {@link org.opensearch.cluster.SnapshotsInProgress}. All nodes consume the state of the {@code SnapshotsInProgress} and will start or * abort relevant shard snapshot tasks accordingly.
        • *
        • Nodes that are executing shard snapshot tasks report either success or failure of their snapshot task by submitting a - * {@link org.opensearch.snapshots.UpdateIndexShardSnapshotStatusRequest} to the master node that will update the + * {@link org.opensearch.snapshots.UpdateIndexShardSnapshotStatusRequest} to the cluster-manager node that will update the * snapshot's entry in the cluster state accordingly.
        • *
        * @@ -57,8 +57,8 @@ * the {@code SnapshotShardsService} will check if any local primary shards are to be snapshotted (signaled by the shard's snapshot state * being {@code INIT}). For those local primary shards found in state {@code INIT}) the snapshot process of writing the shard's data files * to the snapshot's {@link org.opensearch.repositories.Repository} is executed. Once the snapshot execution finishes for a shard an - * {@code UpdateIndexShardSnapshotStatusRequest} is sent to the master node signaling either status {@code SUCCESS} or {@code FAILED}. - * The master node will then update a shard's state in the snapshots {@code SnapshotsInProgress.Entry} whenever it receives such a + * {@code UpdateIndexShardSnapshotStatusRequest} is sent to the cluster-manager node signaling either status {@code SUCCESS} or {@code FAILED}. + * The cluster-manager node will then update a shard's state in the snapshots {@code SnapshotsInProgress.Entry} whenever it receives such a * {@code UpdateIndexShardSnapshotStatusRequest}. * *
      1. If as a result of the received status update requests, all shards in the cluster state are in a completed state, i.e are marked as @@ -82,12 +82,12 @@ *
      2. Aborting a snapshot starts by updating the state of the snapshot's {@code SnapshotsInProgress.Entry} to {@code ABORTED}.
      3. * *
      4. The snapshot's state change to {@code ABORTED} in cluster state is then picked up by the {@code SnapshotShardsService} on all nodes. - * Those nodes that have shard snapshot actions for the snapshot assigned to them, will abort them and notify master about the shards + * Those nodes that have shard snapshot actions for the snapshot assigned to them, will abort them and notify cluster-manager about the shards * snapshot status accordingly. If the shard snapshot action completed or was in state {@code FINALIZE} when the abort was registered by - * the {@code SnapshotShardsService}, then the shard's state will be reported to master as {@code SUCCESS}. + * the {@code SnapshotShardsService}, then the shard's state will be reported to cluster-manager as {@code SUCCESS}. * Otherwise, it will be reported as {@code FAILED}.
      5. * - *
      6. Once all the shards are reported to master as either {@code SUCCESS} or {@code FAILED} the {@code SnapshotsService} on the master + *
      7. Once all the shards are reported to cluster-manager as either {@code SUCCESS} or {@code FAILED} the {@code SnapshotsService} on the master * will finish the snapshot process as all shard's states are now completed and hence the snapshot can be completed as explained in point 4 * of the snapshot creation section above.
      8. *
      @@ -109,7 +109,7 @@ * *

      Cloning a Snapshot

      * - *

      Cloning part of a snapshot is a process executed entirely on the master node. On a high level, the process of cloning a snapshot is + *

      Cloning part of a snapshot is a process executed entirely on the cluster-manager node. On a high level, the process of cloning a snapshot is * analogous to that of creating a snapshot from data in the cluster except that the source of data files is the snapshot repository * instead of the data nodes. It begins with cloning all shards and then finalizes the cloned snapshot the same way a normal snapshot would * be finalized. Concretely, it is executed as follows:

      @@ -132,7 +132,7 @@ * failures of the relevant indices. *
    1. Once all shard counts are known and the health of all source indices data has been verified, we populate the * {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the the relevant shard clone tasks.
    2. - *
    3. After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, master executes them on its snapshot thread-pool + *
    4. After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, cluster-manager executes them on its snapshot thread-pool * by invoking {@link org.opensearch.repositories.Repository#cloneShardSnapshot} for each shard that is to be cloned. Each completed * shard snapshot triggers a call to the {@link org.opensearch.snapshots.SnapshotsService#SHARD_STATE_EXECUTOR} which updates the * clone's {@code SnapshotsInProgress.Entry} to mark the shard clone operation completed.
    5. @@ -151,7 +151,7 @@ * * If multiple snapshot creation jobs are started at the same time, the data-node operations of multiple snapshots may run in parallel * across different shards. If multiple snapshots want to snapshot a certain shard, then the shard snapshots for that shard will be - * executed one by one. This is enforced by the master node setting the shard's snapshot state to + * executed one by one. This is enforced by the cluster-manager node setting the shard's snapshot state to * {@link org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus#UNASSIGNED_QUEUED} for all but one snapshot. The order of * operations on a single shard is given by the order in which the snapshots were started. * As soon as all shards for a given snapshot have finished, it will be finalized as explained above. Finalization will happen one snapshot diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index a51af17ae8ea2..62453d08724ce 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -32,6 +32,8 @@ package org.opensearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; @@ -51,6 +53,8 @@ */ public class Task { + private static final Logger logger = LogManager.getLogger(Task.class); + /** * The request header to mark tasks with specific ids */ @@ -285,7 +289,7 @@ public void startThreadResourceTracking(long threadId, ResourceStatsType statsTy ); } } - threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); + threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics)); } /** @@ -332,17 +336,6 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); } - /** - * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that - * the ThreadPool on which the task runs on have runnable wrapper similar to - * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newAutoQueueFixed} - * - * @return true if resource tracking is supported by the task - */ - public boolean supportsResourceTracking() { - return false; - } - /** * Report of the internal status of a task. These can vary wildly from task * to task because each task is implemented differently but we should try diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 37c10dfc0e6ab..1f6169768f245 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -89,9 +89,7 @@ public class TaskManager implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); - /** - * Rest headers that are copied to the task - */ + /** Rest headers that are copied to the task */ private final List taskHeaders; private final ThreadPool threadPool; @@ -105,7 +103,6 @@ public class TaskManager implements ClusterStateApplier { private final Map banedParents = new ConcurrentHashMap<>(); private TaskResultsService taskResultsService; - private final SetOnce taskResourceTrackingService = new SetOnce<>(); private volatile DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; @@ -128,10 +125,6 @@ public void setTaskCancellationService(TaskCancellationService taskCancellationS this.cancellationService.set(taskCancellationService); } - public void setTaskResourceTrackingService(TaskResourceTrackingService taskResourceTrackingService) { - this.taskResourceTrackingService.set(taskResourceTrackingService); - } - /** * Registers a task without parent task */ @@ -209,11 +202,6 @@ public void cancel(CancellableTask task, String reason, Runnable listener) { */ public Task unregister(Task task) { logger.trace("unregister task for id: {}", task.getId()); - - if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { - taskResourceTrackingService.get().stopTracking(task); - } - if (task instanceof CancellableTask) { CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); if (holder != null) { @@ -373,7 +361,6 @@ public int getBanCount() { * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. *

      * This method is called when a parent task that has children is cancelled. - * * @return a list of pending cancellable child tasks */ public List setBan(TaskId parentTaskId, String reason) { @@ -461,18 +448,6 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); } - /** - * Takes actions when a task is registered and its execution starts - * - * @param task getting executed. - * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns - */ - public ThreadContext.StoredContext taskExecutionStarted(Task task) { - if (taskResourceTrackingService.get() == null) return () -> {}; - - return taskResourceTrackingService.get().startTracking(task); - } - private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java deleted file mode 100644 index 71b829e023385..0000000000000 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tasks; - -import com.sun.management.ThreadMXBean; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.ConcurrentMapLong; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.threadpool.RunnableTaskExecutionListener; -import org.opensearch.threadpool.ThreadPool; - -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; - -/** - * Service that helps track resource usage of tasks running on a node. - */ -@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") -public class TaskResourceTrackingService implements RunnableTaskExecutionListener { - - private static final Logger logger = LogManager.getLogger(TaskManager.class); - - public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( - "task_resource_tracking.enabled", - true, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - public static final String TASK_ID = "TASK_ID"; - - private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - - private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - private final ThreadPool threadPool; - private volatile boolean taskResourceTrackingEnabled; - - @Inject - public TaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); - this.threadPool = threadPool; - clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); - } - - public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { - this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; - } - - public boolean isTaskResourceTrackingEnabled() { - return taskResourceTrackingEnabled; - } - - public boolean isTaskResourceTrackingSupported() { - return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); - } - - /** - * Executes logic only if task supports resource tracking and resource tracking setting is enabled. - *

      - * 1. Starts tracking the task in map of resourceAwareTasks. - * 2. Adds Task Id in thread context to make sure it's available while task is processed across multiple threads. - * - * @param task for which resources needs to be tracked - * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. - */ - public ThreadContext.StoredContext startTracking(Task task) { - if (task.supportsResourceTracking() == false - || isTaskResourceTrackingEnabled() == false - || isTaskResourceTrackingSupported() == false) { - return () -> {}; - } - - logger.debug("Starting resource tracking for task: {}", task.getId()); - resourceAwareTasks.put(task.getId(), task); - return addTaskIdToThreadContext(task); - } - - /** - * Stops tracking task registered earlier for tracking. - *

      - * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. - *

      - * It's also responsible to stop tracking the current thread's resources against this task if not already done. - * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister - * happens before runnable finishes. - * - * @param task task which has finished and doesn't need resource tracking. - */ - public void stopTracking(Task task) { - logger.debug("Stopping resource tracking for task: {}", task.getId()); - try { - if (isCurrentThreadWorkingOnTask(task)) { - taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); - } - - List threadsWorkingOnTask = getThreadsWorkingOnTask(task); - if (threadsWorkingOnTask.size() > 0) { - logger.warn("No thread should be active when task finishes. Active threads: {}", threadsWorkingOnTask); - assert false : "No thread should be marked active when task finishes"; - } - } catch (Exception e) { - logger.warn("Failed while trying to mark the task execution on current thread completed.", e); - assert false; - } finally { - resourceAwareTasks.remove(task.getId()); - } - } - - /** - * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these - * and how much resources these have consumed till now. - * - * @param tasks for which resource stats needs to be refreshed. - */ - public void refreshResourceStats(Task... tasks) { - if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { - return; - } - - for (Task task : tasks) { - if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { - refreshResourceStats(task); - } - } - } - - private void refreshResourceStats(Task resourceAwareTask) { - try { - logger.debug("Refreshing resource stats for Task: {}", resourceAwareTask.getId()); - List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); - threadsWorkingOnTask.forEach( - threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) - ); - } catch (IllegalStateException e) { - logger.debug("Resource stats already updated."); - } - - } - - /** - * Called when a thread starts working on a task's runnable. - * - * @param taskId of the task for which runnable is starting - * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this - * thread - */ - @Override - public void taskExecutionStartedOnThread(long taskId, long threadId) { - try { - if (resourceAwareTasks.containsKey(taskId)) { - logger.debug("Task execution started on thread. Task: {}, Thread: {}", taskId, threadId); - - resourceAwareTasks.get(taskId) - .startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); - } - } catch (Exception e) { - logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); - assert false; - } - - } - - /** - * Called when a thread finishes working on a task's runnable. - * - * @param taskId of the task for which runnable is complete - * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread - */ - @Override - public void taskExecutionFinishedOnThread(long taskId, long threadId) { - try { - if (resourceAwareTasks.containsKey(taskId)) { - logger.debug("Task execution finished on thread. Task: {}, Thread: {}", taskId, threadId); - resourceAwareTasks.get(taskId) - .stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); - } - } catch (Exception e) { - logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); - assert false; - } - } - - public Map getResourceAwareTasks() { - return Collections.unmodifiableMap(resourceAwareTasks); - } - - private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { - ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( - ResourceStats.MEMORY, - threadMXBean.getThreadAllocatedBytes(threadId) - ); - ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); - return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; - } - - private boolean isCurrentThreadWorkingOnTask(Task task) { - long threadId = Thread.currentThread().getId(); - List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); - - for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { - if (threadResourceInfo.isActive()) { - return true; - } - } - return false; - } - - private List getThreadsWorkingOnTask(Task task) { - List activeThreads = new ArrayList<>(); - for (List threadResourceInfos : task.getResourceStats().values()) { - for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { - if (threadResourceInfo.isActive()) { - activeThreads.add(threadResourceInfo.getThreadId()); - } - } - } - return activeThreads; - } - - /** - * Adds Task Id in the ThreadContext. - *

      - * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext - * as well. - * - * @param task for which Task Id needs to be added in ThreadContext. - * @return StoredContext reference to restore the ThreadContext from which we created a new one. - * Caller can call context.restore() to get the existing ThreadContext back. - */ - private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { - ThreadContext threadContext = threadPool.getThreadContext(); - ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); - threadContext.putTransient(TASK_ID, task.getId()); - return storedContext; - } - -} diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java index 9ee683e3928f6..8b45c38c8fb63 100644 --- a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java @@ -15,13 +15,11 @@ * for a specific stats type like worker_stats or response_stats etc., */ public class ThreadResourceInfo { - private final long threadId; private volatile boolean isActive = true; private final ResourceStatsType statsType; private final ResourceUsageInfo resourceUsageInfo; - public ThreadResourceInfo(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { - this.threadId = threadId; + public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { this.statsType = statsType; this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics); } @@ -45,16 +43,12 @@ public ResourceStatsType getStatsType() { return statsType; } - public long getThreadId() { - return threadId; - } - public ResourceUsageInfo getResourceUsageInfo() { return resourceUsageInfo; } @Override public String toString() { - return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive + ", threadId=" + threadId; + return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive; } } diff --git a/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java index 55b92c5d8bfcb..2bac5eba9fc28 100644 --- a/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java @@ -48,7 +48,6 @@ import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicReference; /** * A builder for executors that automatically adjust the queue length as needed, depending on @@ -62,7 +61,6 @@ public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder maxQueueSizeSetting; private final Setting targetedResponseTimeSetting; private final Setting frameSizeSetting; - private final AtomicReference runnableTaskListener; AutoQueueAdjustingExecutorBuilder( final Settings settings, @@ -72,19 +70,6 @@ public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder runnableTaskListener ) { super(name); final String prefix = "thread_pool." + name; @@ -199,7 +184,6 @@ public Iterator> settings() { Setting.Property.Deprecated, Setting.Property.Deprecated ); - this.runnableTaskListener = runnableTaskListener; } @Override @@ -246,8 +230,7 @@ ThreadPool.ExecutorHolder build(final AutoExecutorSettings settings, final Threa frameSize, targetedResponseTime, threadFactory, - threadContext, - runnableTaskListener + threadContext ); // TODO: in a subsequent change we hope to extend ThreadPool.Info to be more specific for the thread pool type final ThreadPool.Info info = new ThreadPool.Info( diff --git a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java deleted file mode 100644 index 03cd66f80d044..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.threadpool; - -/** - * Listener for events when a runnable execution starts or finishes on a thread and is aware of the task for which the - * runnable is associated to. - */ -public interface RunnableTaskExecutionListener { - - /** - * Sends an update when ever a task's execution start on a thread - * - * @param taskId of task which has started - * @param threadId of thread which is executing the task - */ - void taskExecutionStartedOnThread(long taskId, long threadId); - - /** - * - * Sends an update when task execution finishes on a thread - * - * @param taskId of task which has finished - * @param threadId of thread which executed the task - */ - void taskExecutionFinishedOnThread(long taskId, long threadId); -} diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java deleted file mode 100644 index 183b9b2f4cf9a..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.threadpool; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.ExceptionsHelper; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.concurrent.WrappedRunnable; -import org.opensearch.tasks.TaskManager; - -import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; - -import static java.lang.Thread.currentThread; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; - -/** - * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to - * entities listening to the events. - * - * It's able to associate runnable with a task with the help of task Id available in thread context. - */ -public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { - - private static final Logger logger = LogManager.getLogger(TaskManager.class); - - private final Runnable original; - private final ThreadContext threadContext; - private final AtomicReference runnableTaskListener; - - public TaskAwareRunnable( - final ThreadContext threadContext, - final Runnable original, - final AtomicReference runnableTaskListener - ) { - this.original = original; - this.threadContext = threadContext; - this.runnableTaskListener = runnableTaskListener; - } - - @Override - public void onFailure(Exception e) { - ExceptionsHelper.reThrowIfNotNull(e); - } - - @Override - public boolean isForceExecution() { - return original instanceof AbstractRunnable && ((AbstractRunnable) original).isForceExecution(); - } - - @Override - public void onRejection(final Exception e) { - if (original instanceof AbstractRunnable) { - ((AbstractRunnable) original).onRejection(e); - } else { - ExceptionsHelper.reThrowIfNotNull(e); - } - } - - @Override - protected void doRun() throws Exception { - assert runnableTaskListener.get() != null : "Listener should be attached"; - Long taskId = threadContext.getTransient(TASK_ID); - if (Objects.nonNull(taskId)) { - runnableTaskListener.get().taskExecutionStartedOnThread(taskId, currentThread().getId()); - } else { - logger.debug("Task Id not available in thread context. Skipping update. Thread Info: {}", Thread.currentThread()); - } - try { - original.run(); - } finally { - if (Objects.nonNull(taskId)) { - runnableTaskListener.get().taskExecutionFinishedOnThread(taskId, currentThread().getId()); - } - } - } - - @Override - public Runnable unwrap() { - return original; - } -} diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 5e8f515f6c577..c2530ccee5588 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -68,7 +68,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -185,14 +184,6 @@ public Collection builders() { ); public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { - this(settings, null, customBuilders); - } - - public ThreadPool( - final Settings settings, - final AtomicReference runnableTaskListener, - final ExecutorBuilder... customBuilders - ) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -206,20 +197,11 @@ public ThreadPool( builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); builders.put( Names.SEARCH, - new AutoQueueAdjustingExecutorBuilder( - settings, - Names.SEARCH, - searchThreadPoolSize(allocatedProcessors), - 1000, - 1000, - 1000, - 2000, - runnableTaskListener - ) + new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000, 1000, 1000, 2000) ); builders.put( Names.SEARCH_THROTTLED, - new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200, runnableTaskListener) + new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200) ); builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded diff --git a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java index 8ef42436546f1..61129565b23f3 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java @@ -100,7 +100,7 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); - // if we are not master eligible we don't need a dedicated channel to publish the state + // if we are not cluster-manager eligible we don't need a dedicated channel to publish the state builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); // if we are not a data-node we don't need any dedicated channels for recovery builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index 73be6e5b601e9..dcb021531f0ac 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -37,7 +37,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.search.internal.ShardSearchRequest; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -82,8 +81,6 @@ public Request newRequest(StreamInput in) throws IOException { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); - ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); - Releasable unregisterTask = () -> taskManager.unregister(task); try { if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { @@ -102,7 +99,6 @@ public void processMessageReceived(Request request, TransportChannel channel) th unregisterTask = null; } finally { Releasables.close(unregisterTask); - contextToRestore.restore(); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java b/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java index 4d33f071328c1..ba58bb37d8d48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java @@ -68,7 +68,7 @@ public void executeOnce(T request, ActionListener listener, BiConsumer> getEvents() { return Collections.unmodifiableList(new ArrayList<>(events)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java deleted file mode 100644 index 23877ac0b7395..0000000000000 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ /dev/null @@ -1,633 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.admin.cluster.node.tasks; - -import com.sun.management.ThreadMXBean; -import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; -import org.opensearch.action.support.nodes.BaseNodesRequest; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.tasks.CancellableTask; -import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; -import org.opensearch.tasks.TaskInfo; -import org.opensearch.test.tasks.MockTaskManager; -import org.opensearch.test.tasks.MockTaskManagerListener; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; - -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; - -@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") -public class ResourceAwareTasksTests extends TaskManagerTestCase { - - private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - - public static class ResourceAwareNodeRequest extends BaseNodeRequest { - protected String requestName; - - public ResourceAwareNodeRequest() { - super(); - } - - public ResourceAwareNodeRequest(StreamInput in) throws IOException { - super(in); - requestName = in.readString(); - } - - public ResourceAwareNodeRequest(NodesRequest request) { - requestName = request.requestName; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(requestName); - } - - @Override - public String getDescription() { - return "ResourceAwareNodeRequest[" + requestName + "]"; - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { - @Override - public boolean shouldCancelChildrenOnCancellation() { - return false; - } - - @Override - public boolean supportsResourceTracking() { - return true; - } - }; - } - } - - public static class NodesRequest extends BaseNodesRequest { - private final String requestName; - - private NodesRequest(StreamInput in) throws IOException { - super(in); - requestName = in.readString(); - } - - public NodesRequest(String requestName, String... nodesIds) { - super(nodesIds); - this.requestName = requestName; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(requestName); - } - - @Override - public String getDescription() { - return "NodesRequest[" + requestName + "]"; - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { - @Override - public boolean shouldCancelChildrenOnCancellation() { - return true; - } - }; - } - } - - /** - * Simulates a task which executes work on search executor. - */ - class ResourceAwareNodesAction extends AbstractTestNodesAction { - private final TaskTestContext taskTestContext; - private final boolean blockForCancellation; - - ResourceAwareNodesAction( - String actionName, - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - boolean shouldBlock, - TaskTestContext taskTestContext - ) { - super(actionName, threadPool, clusterService, transportService, NodesRequest::new, ResourceAwareNodeRequest::new); - this.taskTestContext = taskTestContext; - this.blockForCancellation = shouldBlock; - } - - @Override - protected ResourceAwareNodeRequest newNodeRequest(NodesRequest request) { - return new ResourceAwareNodeRequest(request); - } - - @Override - protected NodeResponse nodeOperation(ResourceAwareNodeRequest request, Task task) { - assert task.supportsResourceTracking(); - - AtomicLong threadId = new AtomicLong(); - Future result = threadPool.executor(ThreadPool.Names.SEARCH).submit(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - ExceptionsHelper.reThrowIfNotNull(e); - } - - @Override - @SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") - protected void doRun() { - taskTestContext.memoryConsumptionWhenExecutionStarts = threadMXBean.getThreadAllocatedBytes( - Thread.currentThread().getId() - ); - threadId.set(Thread.currentThread().getId()); - - if (taskTestContext.operationStartValidator != null) { - try { - taskTestContext.operationStartValidator.accept(threadId.get()); - } catch (AssertionError error) { - throw new RuntimeException(error); - } - } - - Object[] allocation1 = new Object[1000000]; // 4MB - - if (blockForCancellation) { - // Simulate a job that takes forever to finish - // Using periodic checks method to identify that the task was cancelled - try { - boolean taskCancelled = waitUntil(((CancellableTask) task)::isCancelled); - if (taskCancelled) { - throw new TaskCancelledException("Task Cancelled"); - } else { - fail("It should have thrown an exception"); - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - - } - - Object[] allocation2 = new Object[1000000]; // 4MB - } - }); - - try { - result.get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e.getCause()); - } finally { - if (taskTestContext.operationFinishedValidator != null) { - taskTestContext.operationFinishedValidator.accept(threadId.get()); - } - } - - return new NodeResponse(clusterService.localNode()); - } - - @Override - protected NodeResponse nodeOperation(ResourceAwareNodeRequest request) { - throw new UnsupportedOperationException("the task parameter is required"); - } - } - - private TaskTestContext startResourceAwareNodesAction( - TestNode node, - boolean blockForCancellation, - TaskTestContext taskTestContext, - ActionListener listener - ) { - NodesRequest request = new NodesRequest("Test Request", node.getNodeId()); - - taskTestContext.requestCompleteLatch = new CountDownLatch(1); - - ResourceAwareNodesAction action = new ResourceAwareNodesAction( - "internal:resourceAction", - threadPool, - node.clusterService, - node.transportService, - blockForCancellation, - taskTestContext - ); - taskTestContext.mainTask = action.execute(request, listener); - return taskTestContext; - } - - private static class TaskTestContext { - private Task mainTask; - private CountDownLatch requestCompleteLatch; - private Consumer operationStartValidator; - private Consumer operationFinishedValidator; - private long memoryConsumptionWhenExecutionStarts; - } - - public void testBasicTaskResourceTracking() throws Exception { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // One thread is currently working on task but not finished - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); - assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); - }; - - taskTestContext.operationFinishedValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // Thread has finished working on the task's runnable - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - - long expectedArrayAllocationOverhead = 2 * 4012688; // Task's memory overhead due to array allocations - long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); - - assertTrue(actualTaskMemoryOverhead - expectedArrayAllocationOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); - assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskResourceTrackingDuringTaskCancellation() throws Exception { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // One thread is currently working on task but not finished - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); - assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); - }; - - taskTestContext.operationFinishedValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // Thread has finished working on the task's runnable - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - - // allocations are completed before the task is cancelled - long expectedArrayAllocationOverhead = 4012688; // Task's memory overhead due to array allocations - long taskCancellationOverhead = 30000; // Task cancellation overhead ~ 30Kb - long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); - - long expectedOverhead = expectedArrayAllocationOverhead + taskCancellationOverhead; - assertTrue(actualTaskMemoryOverhead - expectedOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); - assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], true, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Cancel main task - CancelTasksRequest request = new CancelTasksRequest(); - request.setReason("Cancelling request to verify Task resource tracking behaviour"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), taskTestContext.mainTask.getId())); - ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertEquals(0, resourceTasks.size()); - assertNull(throwableReference.get()); - assertNotNull(responseReference.get()); - assertEquals(1, responseReference.get().failureCount()); - assertEquals(TaskCancelledException.class, findActualException(responseReference.get().failures().get(0)).getClass()); - } - - public void testTaskResourceTrackingDisabled() throws Exception { - setup(false, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; - - taskTestContext.operationFinishedValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskResourceTrackingDisabledWhileTaskInProgress() throws Exception { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - // One thread is currently working on task but not finished - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); - assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); - - testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(false); - }; - - taskTestContext.operationFinishedValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - // Thread has finished working on the task's runnable - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - - long expectedArrayAllocationOverhead = 2 * 4012688; // Task's memory overhead due to array allocations - long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); - - assertTrue(actualTaskMemoryOverhead - expectedArrayAllocationOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); - assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskResourceTrackingEnabledWhileTaskInProgress() throws Exception { - setup(false, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - assertEquals(0, resourceTasks.size()); - - testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - }; - - taskTestContext.operationFinishedValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( - testNodes[0].transportListTasksAction, - new ListTasksRequest().setActions("internal:resourceAction*").setDetailed(true) - ); - - TaskInfo taskInfo = listTasksResponse.getTasks().get(1); - - assertNotNull(taskInfo.getResourceStats()); - assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getCpuTimeInNanos() > 0); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getMemoryInBytes() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskIdPersistsInThreadContext() throws InterruptedException { - setup(true, true); - - final List taskIdsAddedToThreadContext = new ArrayList<>(); - final List taskIdsRemovedFromThreadContext = new ArrayList<>(); - AtomicLong actualTaskIdInThreadContext = new AtomicLong(-1); - AtomicLong expectedTaskIdInThreadContext = new AtomicLong(-2); - - ((MockTaskManager) testNodes[0].transportService.getTaskManager()).addListener(new MockTaskManagerListener() { - @Override - public void waitForTaskCompletion(Task task) {} - - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) { - if (closeableInvoked) { - taskIdsRemovedFromThreadContext.add(task.getId()); - } else { - taskIdsAddedToThreadContext.add(task.getId()); - } - } - - @Override - public void onTaskRegistered(Task task) {} - - @Override - public void onTaskUnregistered(Task task) { - if (task.getAction().equals("internal:resourceAction[n]")) { - expectedTaskIdInThreadContext.set(task.getId()); - actualTaskIdInThreadContext.set(threadPool.getThreadContext().getTransient(TASK_ID)); - } - } - }); - - TaskTestContext taskTestContext = new TaskTestContext(); - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - taskTestContext.requestCompleteLatch.await(); - - assertEquals(expectedTaskIdInThreadContext.get(), actualTaskIdInThreadContext.get()); - assertThat(taskIdsAddedToThreadContext, containsInAnyOrder(taskIdsRemovedFromThreadContext.toArray())); - } - - private void setup(boolean resourceTrackingEnabled, boolean useMockTaskManager) { - Settings settings = Settings.builder() - .put("task_resource_tracking.enabled", resourceTrackingEnabled) - .put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), useMockTaskManager) - .build(); - setupTestNodes(settings); - connectNodes(testNodes[0]); - - runnableTaskListener.set(testNodes[0].taskResourceTrackingService); - } - - private Throwable findActualException(Exception e) { - Throwable throwable = e.getCause(); - while (throwable.getCause() != null) { - throwable = throwable.getCause(); - } - return throwable; - } - - private void assertTasksRequestFinishedSuccessfully(int activeResourceTasks, NodesResponse nodesResponse, Throwable throwable) { - assertEquals(0, activeResourceTasks); - assertNull(throwable); - assertNotNull(nodesResponse); - assertEquals(0, nodesResponse.failureCount()); - } - -} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 51fc5d80f2de3..c8411b31e0709 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -59,10 +59,8 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; -import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -76,7 +74,6 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static java.util.Collections.emptyMap; @@ -92,12 +89,10 @@ public abstract class TaskManagerTestCase extends OpenSearchTestCase { protected ThreadPool threadPool; protected TestNode[] testNodes; protected int nodesCount; - protected AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - runnableTaskListener = new AtomicReference<>(); - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); } public void setupTestNodes(Settings settings) { @@ -230,22 +225,14 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool transportService.start(); clusterService = createClusterService(threadPool, discoveryNode.get()); clusterService.addStateApplier(transportService.getTaskManager()); - taskResourceTrackingService = new TaskResourceTrackingService(settings, clusterService.getClusterSettings(), threadPool); - transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); ActionFilters actionFilters = new ActionFilters(emptySet()); - transportListTasksAction = new TransportListTasksAction( - clusterService, - transportService, - actionFilters, - taskResourceTrackingService - ); + transportListTasksAction = new TransportListTasksAction(clusterService, transportService, actionFilters); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); transportService.acceptIncomingRequests(); } public final ClusterService clusterService; public final TransportService transportService; - public final TaskResourceTrackingService taskResourceTrackingService; private final SetOnce discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 202f1b7dcb5b4..4b98870422ce8 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -91,7 +91,6 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Answers.RETURNS_MOCKS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; @@ -225,7 +224,7 @@ public void setupAction() { remoteResponseHandler = ArgumentCaptor.forClass(TransportResponseHandler.class); // setup services that will be called by action - transportService = mock(TransportService.class, RETURNS_MOCKS); + transportService = mock(TransportService.class); clusterService = mock(ClusterService.class); localIngest = true; // setup nodes for local and remote diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 43bee8c7bc6bd..744c833fa54e9 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -125,7 +125,7 @@ public void setOnAfterWaitForNewMasterAndRetry(Runnable onAfterWaitForNewMasterA } @Override - protected void waitForNewMasterAndRetry( + protected void waitForNewClusterManagerAndRetry( String actionName, ClusterStateObserver observer, TransportRequest request, @@ -133,7 +133,7 @@ protected void waitForNewMasterAndRetry( Predicate changePredicate ) { onBeforeWaitForNewMasterAndRetry.run(); - super.waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); + super.waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); onAfterWaitForNewMasterAndRetry.run(); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 49ef48cd1e9c6..6bd2d1e70033a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -50,6 +50,7 @@ import java.util.HashSet; +import static org.hamcrest.Matchers.is; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -198,4 +199,14 @@ public void testUpdatesNodeWithNewRoles() throws Exception { assertThat(result.resultingState.getNodes().get(actualNode.getId()).getRoles(), equalTo(actualNode.getRoles())); } + + /** + * Validate isBecomeMasterTask() can identify "become cluster manager task" properly + */ + public void testIsBecomeClusterManagerTask() { + JoinTaskExecutor.Task joinTaskOfMaster = JoinTaskExecutor.newBecomeMasterTask(); + assertThat(joinTaskOfMaster.isBecomeMasterTask(), is(true)); + JoinTaskExecutor.Task joinTaskOfClusterManager = JoinTaskExecutor.newBecomeClusterManagerTask(); + assertThat(joinTaskOfClusterManager.isBecomeMasterTask(), is(true)); + } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index 69dc332e2bd29..b06799312d99a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -509,7 +509,7 @@ public void testLeaderBehaviour() { CoordinationStateRejectedException cause = (CoordinationStateRejectedException) handler.transportException.getRootCause(); assertThat( cause.getMessage(), - equalTo("rejecting leader check from [" + otherNode + "] sent to a node that is no longer the master") + equalTo("rejecting leader check from [" + otherNode + "] sent to a node that is no longer the cluster-manager") ); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 3b309908a1df0..f00361160f2d7 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -329,17 +329,17 @@ public void testJoinWithHigherTermElectsLeader() { () -> new StatusInfo(HEALTHY, "healthy-info") ); assertFalse(isLocalNodeElectedMaster()); - assertNull(coordinator.getStateForMasterService().nodes().getMasterNodeId()); + assertNull(coordinator.getStateForClusterManagerService().nodes().getMasterNodeId()); long newTerm = initialTerm + randomLongBetween(1, 10); SimpleFuture fut = joinNodeAsync( new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion))) ); assertEquals(Coordinator.Mode.LEADER, coordinator.getMode()); - assertNull(coordinator.getStateForMasterService().nodes().getMasterNodeId()); + assertNull(coordinator.getStateForClusterManagerService().nodes().getMasterNodeId()); deterministicTaskQueue.runAllRunnableTasks(); assertTrue(fut.isDone()); assertTrue(isLocalNodeElectedMaster()); - assertTrue(coordinator.getStateForMasterService().nodes().isLocalNodeElectedMaster()); + assertTrue(coordinator.getStateForClusterManagerService().nodes().isLocalNodeElectedMaster()); } public void testJoinWithHigherTermButBetterStateGetsRejected() { diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index aff9e1cfe7a8c..bc36a57fed125 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -277,9 +277,9 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); if (masterA == null) { - assertThat(delta.previousMasterNode(), nullValue()); + assertThat(delta.previousClusterManagerNode(), nullValue()); } else { - assertThat(delta.previousMasterNode().getId(), equalTo(masterAId)); + assertThat(delta.previousClusterManagerNode().getId(), equalTo(masterAId)); } if (masterB == null) { assertThat(delta.newMasterNode(), nullValue()); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index b3c24ef55c3ba..04b4044864dbd 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -298,12 +298,12 @@ public void testLocalNodeMasterListenerCallbacks() { AtomicBoolean isMaster = new AtomicBoolean(); timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { @Override - public void onMaster() { + public void onClusterManager() { isMaster.set(true); } @Override - public void offMaster() { + public void offClusterManager() { isMaster.set(false); } }); diff --git a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java index e7873723bec22..8a872bc50aeb0 100644 --- a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java @@ -75,7 +75,7 @@ public void testSingleStringSetting() throws Exception { // hashes not yet published assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); ConsistentSettingsService consistentService = new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)); assertThat(consistentService.areAllConsistent(), is(true)); // change value @@ -83,7 +83,7 @@ public void testSingleStringSetting() throws Exception { assertThat(consistentService.areAllConsistent(), is(false)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); // publish change - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); assertThat(consistentService.areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); } @@ -108,7 +108,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); ConsistentSettingsService consistentService = new ConsistentSettingsService( settings, clusterService, @@ -123,7 +123,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish change - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(consistentService.areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); // add value @@ -136,7 +136,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); // remove value secureSettings = new MockSecureSettings(); @@ -173,7 +173,7 @@ public void testStringAndAffixSettings() throws Exception { is(false) ); // publish only the simple string setting - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); assertThat( new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), @@ -184,7 +184,7 @@ public void testStringAndAffixSettings() throws Exception { is(false) ); // publish only the affix string setting - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); assertThat( @@ -193,7 +193,7 @@ public void testStringAndAffixSettings() throws Exception { ); // publish both settings new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)).newHashPublisher() - .onMaster(); + .onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); assertThat( diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java new file mode 100644 index 0000000000000..1084f9c658db4 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.junit.BeforeClass; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.test.OpenSearchTestCase; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +public class FeatureFlagTests extends OpenSearchTestCase { + + @SuppressForbidden(reason = "sets the feature flag") + @BeforeClass + public static void enableFeature() { + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); + } + + public void testReplicationTypeFeatureFlag() { + String replicationTypeFlag = FeatureFlags.REPLICATION_TYPE; + assertNotNull(System.getProperty(replicationTypeFlag)); + assertTrue(FeatureFlags.isEnabled(replicationTypeFlag)); + } + + public void testMissingFeatureFlag() { + String testFlag = "missingFeatureFlag"; + assertNull(System.getProperty(testFlag)); + assertFalse(FeatureFlags.isEnabled(testFlag)); + } + + public void testNonBooleanFeatureFlag() { + String javaVersionProperty = "java.version"; + assertNotNull(System.getProperty(javaVersionProperty)); + assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); + } +} diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 64286e47b4966..9c70accaca3e4 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -48,7 +48,6 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.sameInstance; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class ThreadContextTests extends OpenSearchTestCase { @@ -155,15 +154,6 @@ public void testNewContextWithClearedTransients() { assertEquals(1, threadContext.getResponseHeaders().get("baz").size()); } - public void testStashContextWithPreservedTransients() { - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - threadContext.putTransient("foo", "bar"); - threadContext.putTransient(TASK_ID, 1); - threadContext.stashContext(); - assertNull(threadContext.getTransient("foo")); - assertEquals(1, (int) threadContext.getTransient(TASK_ID)); - } - public void testStashWithOrigin() { final String origin = randomAlphaOfLengthBetween(4, 16); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java index 4d4fea5a41b82..978db14225f00 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java @@ -43,72 +43,80 @@ public class XContentTypeTests extends OpenSearchTestCase { public void testFromJson() throws Exception { String mediaType = "application/json"; XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + } + + public void testFromNdJson() throws Exception { + String mediaType = "application/x-ndjson"; + XContentType expectedXContentType = XContentType.JSON; + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); } public void testFromJsonUppercase() throws Exception { String mediaType = "application/json".toUpperCase(Locale.ROOT); XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); } public void testFromYaml() throws Exception { String mediaType = "application/yaml"; XContentType expectedXContentType = XContentType.YAML; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); } public void testFromSmile() throws Exception { String mediaType = "application/smile"; XContentType expectedXContentType = XContentType.SMILE; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromCbor() throws Exception { String mediaType = "application/cbor"; XContentType expectedXContentType = XContentType.CBOR; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromWildcard() throws Exception { String mediaType = "application/*"; XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromWildcardUppercase() throws Exception { String mediaType = "APPLICATION/*"; XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromRubbish() throws Exception { - assertThat(XContentType.fromMediaTypeOrFormat(null), nullValue()); - assertThat(XContentType.fromMediaTypeOrFormat(""), nullValue()); - assertThat(XContentType.fromMediaTypeOrFormat("text/plain"), nullValue()); - assertThat(XContentType.fromMediaTypeOrFormat("gobbly;goop"), nullValue()); + assertThat(XContentType.fromMediaType(null), nullValue()); + assertThat(XContentType.fromMediaType(""), nullValue()); + assertThat(XContentType.fromMediaType("text/plain"), nullValue()); + assertThat(XContentType.fromMediaType("gobbly;goop"), nullValue()); } public void testVersionedMediaType() throws Exception { - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON)); - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML)); - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR)); - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE)); - - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON)); - assertThat( - XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+json ;compatible-with=7;charset=utf-8"), - equalTo(XContentType.JSON) - ); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON)); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML)); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR)); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE)); + + assertThat(XContentType.fromMediaType("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON)); + + String mthv = "application/vnd.opensearch+json ;compatible-with=7;charset=utf-8"; + assertThat(XContentType.fromMediaType(mthv), equalTo(XContentType.JSON)); + assertThat(XContentType.fromMediaType(mthv.toUpperCase(Locale.ROOT)), equalTo(XContentType.JSON)); } } diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 6558f9d06c2f7..d6cafb3421f7d 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -173,7 +173,7 @@ class TestPeerFinder extends PeerFinder { } @Override - protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { + protected void onActiveClusterManagerFound(DiscoveryNode masterNode, long term) { assert holdsLock() == false : "PeerFinder lock held in error"; assertThat(discoveredMasterNode, nullValue()); assertFalse(discoveredMasterTerm.isPresent()); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index 9897ad1a3650b..7a346d4cf9fc5 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -160,7 +160,7 @@ public void testCleanupAll() throws Exception { boolean hasClusterState = randomBoolean(); createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); - String messageText = NodeRepurposeCommand.noMasterMessage(1, environment.dataFiles().length * shardCount, 0); + String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, environment.dataFiles().length * shardCount, 0); Matcher outputMatcher = allOf( containsString(messageText), diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java index 0ad8dc3f138e0..659042c37d650 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java @@ -248,14 +248,14 @@ public void testNestedHaveIdAndTypeFields() throws Exception { assertNotNull(result.docs().get(0).getField(IdFieldMapper.NAME)); assertEquals(Uid.encodeId("1"), result.docs().get(0).getField(IdFieldMapper.NAME).binaryValue()); assertEquals(IdFieldMapper.Defaults.NESTED_FIELD_TYPE, result.docs().get(0).getField(IdFieldMapper.NAME).fieldType()); - assertNotNull(result.docs().get(0).getField(TypeFieldMapper.NAME)); - assertEquals("__foo", result.docs().get(0).getField(TypeFieldMapper.NAME).stringValue()); + assertNotNull(result.docs().get(0).getField(NestedPathFieldMapper.NAME)); + assertEquals("foo", result.docs().get(0).getField(NestedPathFieldMapper.NAME).stringValue()); assertEquals("value1", result.docs().get(0).getField("foo.bar").binaryValue().utf8ToString()); // Root document: assertNotNull(result.docs().get(1).getField(IdFieldMapper.NAME)); assertEquals(Uid.encodeId("1"), result.docs().get(1).getField(IdFieldMapper.NAME).binaryValue()); assertEquals(IdFieldMapper.Defaults.FIELD_TYPE, result.docs().get(1).getField(IdFieldMapper.NAME).fieldType()); - assertNull(result.docs().get(1).getField(TypeFieldMapper.NAME)); + assertNull(result.docs().get(1).getField(NestedPathFieldMapper.NAME)); assertEquals("value2", result.docs().get(1).getField("baz").binaryValue().utf8ToString()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java index 7ffc22f92d839..92de2707078f3 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java @@ -220,7 +220,7 @@ private static ObjectMapper createObjectMapper(String name) { ObjectMapper.Nested.NO, ObjectMapper.Dynamic.FALSE, emptyMap(), - Settings.EMPTY + SETTINGS ); } @@ -232,7 +232,7 @@ private static ObjectMapper createNestedObjectMapper(String name) { ObjectMapper.Nested.newNested(), ObjectMapper.Dynamic.FALSE, emptyMap(), - Settings.EMPTY + SETTINGS ); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index fe3ce5da6c90a..245ba1404cb5c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -149,7 +149,7 @@ public void testSingleNested() throws Exception { ); assertThat(doc.docs().size(), equalTo(2)); - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(0).get(NestedPathFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePath())); assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); @@ -180,10 +180,10 @@ public void testSingleNested() throws Exception { ); assertThat(doc.docs().size(), equalTo(3)); - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(0).get(NestedPathFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePath())); assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); - assertThat(doc.docs().get(1).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(1).get(NestedPathFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePath())); assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3")); assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4")); diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java new file mode 100644 index 0000000000000..6ad1d0f7f09b9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; + +/** tests for {@link org.opensearch.index.mapper.NestedPathFieldMapper} */ +public class NestedPathFieldMapperTests extends OpenSearchSingleNodeTestCase { + + public void testDefaultConfig() throws IOException { + Settings indexSettings = Settings.EMPTY; + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent("{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{}}"), + MapperService.MergeReason.MAPPING_UPDATE + ); + ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), XContentType.JSON)); + assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(NestedPathFieldMapper.NAME))); + } + + public void testUpdatesWithSameMappings() throws IOException { + Settings indexSettings = Settings.EMPTY; + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent("{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{}}"), + MapperService.MergeReason.MAPPING_UPDATE + ); + mapper.merge(mapper.mapping(), MapperService.MergeReason.MAPPING_UPDATE); + } +} diff --git a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java index c02df8168afee..7c9895a9e0642 100644 --- a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java @@ -47,6 +47,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -324,7 +325,7 @@ public void testNested() throws IOException { Query expectedChildQuery = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), Occur.MUST) // we automatically add a filter since the inner query might match non-nested docs - .add(new TermQuery(new Term("_type", "__nested1")), Occur.FILTER) + .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested1")), Occur.FILTER) .build(); assertEquals(expectedChildQuery, query.getChildQuery()); @@ -352,7 +353,7 @@ public void testNested() throws IOException { // we need to add the filter again because of include_in_parent expectedChildQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.MUST) - .add(new TermQuery(new Term("_type", "__nested2")), Occur.FILTER) + .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested2")), Occur.FILTER) .build(); assertEquals(expectedChildQuery, query.getChildQuery()); @@ -367,7 +368,7 @@ public void testNested() throws IOException { // we need to add the filter again because of include_in_root expectedChildQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.MUST) - .add(new TermQuery(new Term("_type", "__nested3")), Occur.FILTER) + .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested3")), Occur.FILTER) .build(); assertEquals(expectedChildQuery, query.getChildQuery()); diff --git a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java index a8cd6c5411875..726e9f56f98c1 100644 --- a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java @@ -68,6 +68,7 @@ import org.opensearch.index.fielddata.NoOrdinalsStringFieldDataTests; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -103,14 +104,14 @@ public void testDuel() throws Exception { for (int j = 0; j < numChildren; ++j) { Document doc = new Document(); doc.add(new StringField("f", TestUtil.randomSimpleString(random(), 2), Field.Store.NO)); - doc.add(new StringField("__type", "child", Field.Store.NO)); + doc.add(new StringField(NestedPathFieldMapper.NAME, "child", Field.Store.NO)); docs.add(doc); } if (randomBoolean()) { docs.add(new Document()); } Document parent = new Document(); - parent.add(new StringField("__type", "parent", Field.Store.NO)); + parent.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); docs.add(parent); writer.addDocuments(docs); if (rarely()) { // we need to have a bit more segments than what RandomIndexWriter would do by default @@ -149,8 +150,8 @@ private TopDocs getTopDocs( int n, boolean reverse ) throws IOException { - Query parentFilter = new TermQuery(new Term("__type", "parent")); - Query childFilter = new TermQuery(new Term("__type", "child")); + Query parentFilter = new TermQuery(new Term(NestedPathFieldMapper.NAME, "parent")); + Query childFilter = new TermQuery(new Term(NestedPathFieldMapper.NAME, "child")); SortField sortField = indexFieldData.sortField(missingValue, sortMode, createNested(searcher, parentFilter, childFilter), reverse); Query query = new ConstantScoreQuery(parentFilter); Sort sort = new Sort(sortField); @@ -172,7 +173,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "a", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -192,7 +193,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "b", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -211,7 +212,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "c", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -230,7 +231,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "F", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "d", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -250,7 +251,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "F", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "f", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -269,14 +270,14 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "g", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); // This doc will not be included, because it doesn't have nested docs document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "h", Field.Store.NO)); writer.addDocument(document); @@ -294,7 +295,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "F", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "i", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -316,7 +317,7 @@ public void testNestedSorting() throws Exception { reader = OpenSearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(reader); PagedBytesIndexFieldData indexFieldData = getForField("field2"); - Query parentFilter = new TermQuery(new Term("__type", "parent")); + Query parentFilter = new TermQuery(new Term(NestedPathFieldMapper.NAME, "parent")); Query childFilter = Queries.not(parentFilter); BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource( indexFieldData, @@ -472,53 +473,52 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 743)); document.add(new IntPoint("chapters.paragraphs.word_count", 743)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 3", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 400)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 400)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 234)); document.add(new IntPoint("chapters.paragraphs.word_count", 234)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 2", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 200)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 200)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 2", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 478)); document.add(new IntPoint("chapters.paragraphs.word_count", 478)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 849)); document.add(new IntPoint("chapters.paragraphs.word_count", 849)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 1400)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 1400)); book.add(document); document = new Document(); document.add(new StringField("genre", "science fiction", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "1", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -528,20 +528,19 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Introduction", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 76)); document.add(new IntPoint("chapters.paragraphs.word_count", 76)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 20)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 20)); book.add(document); document = new Document(); document.add(new StringField("genre", "romance", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "2", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -551,20 +550,19 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "A bad dream", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 976)); document.add(new IntPoint("chapters.paragraphs.word_count", 976)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "The beginning of the end", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 1200)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 1200)); book.add(document); document = new Document(); document.add(new StringField("genre", "horror", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "3", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -574,47 +572,46 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "macaroni", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 180)); document.add(new IntPoint("chapters.paragraphs.word_count", 180)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "hamburger", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 150)); document.add(new IntPoint("chapters.paragraphs.word_count", 150)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "tosti", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 120)); document.add(new IntPoint("chapters.paragraphs.word_count", 120)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "easy meals", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 800)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 800)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "introduction", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 87)); document.add(new IntPoint("chapters.paragraphs.word_count", 87)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "introduction", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 10)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 10)); book.add(document); document = new Document(); document.add(new StringField("genre", "cooking", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "4", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -624,7 +621,6 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new StringField("genre", "unknown", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "5", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java index b39ff0c9b97b3..04dcea210640c 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java @@ -52,9 +52,9 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.common.settings.Settings; import org.opensearch.index.mapper.IdFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.test.OpenSearchTestCase; @@ -88,7 +88,7 @@ public void testSplitOnID() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); @@ -142,7 +142,7 @@ public void testSplitOnRouting() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); @@ -215,7 +215,7 @@ public void testSplitOnIdOrRouting() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); @@ -258,7 +258,7 @@ public void testSplitOnRoutingPartitioned() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index 8123f044798bd..afcc6aa006500 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -41,6 +41,7 @@ import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MetadataFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; @@ -94,6 +95,7 @@ public Map getMetadataMappers() { IndexFieldMapper.NAME, DataStreamFieldMapper.NAME, SourceFieldMapper.NAME, + NestedPathFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME }; @@ -101,11 +103,7 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween( - random(), - Version.CURRENT.minimumIndexCompatibilityVersion(), - Version.CURRENT - ); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); Map metadataMapperParsers = module.getMapperRegistry() @@ -116,6 +114,14 @@ public void testBuiltinMappers() { assertEquals(EXPECTED_METADATA_FIELDS[i++], field); } } + { + Version version = VersionUtils.randomVersionBetween( + random(), + Version.V_1_0_0, + VersionUtils.getPreviousVersion(Version.V_2_0_0) + ); + assertEquals(EXPECTED_METADATA_FIELDS.length - 1, module.getMapperRegistry().getMetadataMapperParsers(version).size()); + } } public void testBuiltinWithPlugins() { diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 8dd156dfcd0d2..da984084321e1 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -567,7 +568,13 @@ public void testIsMetadataField() { final Version randVersion = VersionUtils.randomIndexCompatibleVersion(random()); assertFalse(indicesService.isMetadataField(randVersion, randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { - assertTrue(indicesService.isMetadataField(randVersion, builtIn)); + if (NestedPathFieldMapper.NAME.equals(builtIn) && randVersion.before(Version.V_2_0_0)) { + continue; // nested field mapper does not exist prior to 2.0 + } + assertTrue( + "Expected " + builtIn + " to be a metadata field for version " + randVersion, + indicesService.isMetadataField(randVersion, builtIn) + ); } } diff --git a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java index 4e6d9b25409a8..1ea7f006cf482 100644 --- a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java @@ -325,7 +325,7 @@ public void testErrorToAndFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); - Map params = Collections.singletonMap("format", xContentType.mediaType()); + Map params = Collections.singletonMap("format", xContentType.format()); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = detailed ? new DetailedExceptionRestChannel(request) : new SimpleExceptionRestChannel(request); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 8ab0cc0023346..65ce02333bae0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -57,9 +57,9 @@ import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.script.MockScriptEngine; @@ -343,15 +343,15 @@ public void testResetRootDocId() throws Exception { // 1 segment with, 1 root document, with 3 nested sub docs Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.FIELD_TYPE)); @@ -365,7 +365,7 @@ public void testResetRootDocId() throws Exception { // 1 document, with 1 nested subdoc document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.FIELD_TYPE)); @@ -376,7 +376,7 @@ public void testResetRootDocId() throws Exception { // and 1 document, with 1 nested subdoc document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.FIELD_TYPE)); @@ -613,13 +613,13 @@ public void testPreGetChildLeafCollectors() throws IOException { List documents = new ArrayList<>(); Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a1"))); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b1"))); documents.add(document); @@ -633,13 +633,13 @@ public void testPreGetChildLeafCollectors() throws IOException { document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a2"))); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b2"))); documents.add(document); @@ -653,13 +653,13 @@ public void testPreGetChildLeafCollectors() throws IOException { document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a3"))); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b3"))); documents.add(document); @@ -863,7 +863,7 @@ public static CheckedConsumer buildResellerData( } Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(p)), IdFieldMapper.Defaults.FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); document.add(new SortedNumericDocValuesField("product_id", p)); documents.add(document); @@ -891,7 +891,7 @@ private static double[] generateDocuments(List documents, int numNeste for (int nested = 0; nested < numNestedDocs; nested++) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(id)), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__" + path, TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, path, NestedPathFieldMapper.Defaults.FIELD_TYPE)); long value = randomNonNegativeLong() % 10000; document.add(new SortedNumericDocValuesField(fieldName, value)); documents.add(document); @@ -906,7 +906,7 @@ private List generateBook(String id, String[] authors, int[] numPages) for (int numPage : numPages) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_chapters", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_chapters", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("num_pages", numPage)); documents.add(document); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java index cf0e31bc63467..61df6d01aef64 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java @@ -42,9 +42,9 @@ import org.apache.lucene.store.Directory; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; @@ -133,7 +133,7 @@ public void testMaxFromParentDocs() throws IOException { document.add( new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.NESTED_FIELD_TYPE) ); - document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT, TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, NESTED_OBJECT, NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); expectedNestedDocs++; } @@ -193,7 +193,7 @@ public void testFieldAlias() throws IOException { document.add( new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.NESTED_FIELD_TYPE) ); - document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT, TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, NESTED_OBJECT, NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); } Document document = new Document(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java index 9a9a03e715644..678bc2fc6f536 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java @@ -60,11 +60,11 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.Aggregation; @@ -551,7 +551,7 @@ private List generateDocsWithNested(String id, int value, int[] nested for (int nestedValue : nestedValues) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_object", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_object", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("nested_value", nestedValue)); documents.add(document); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index a9e819e7cbaf2..cb47bf6cba6a9 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -66,11 +66,11 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -1464,7 +1464,7 @@ private List generateDocsWithNested(String id, int value, int[] nested for (int nestedValue : nestedValues) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_object", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_object", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("nested_value", nestedValue)); documents.add(document); } diff --git a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java index 44d48e9073e23..bcf458c5028cd 100644 --- a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java @@ -63,8 +63,8 @@ import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -325,7 +325,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery()); sortBuilder = new FieldSortBuilder("fieldName").setNestedPath("path") .setNestedFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value")); diff --git a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java index c14deb6add083..87adbd9532665 100644 --- a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java @@ -48,7 +48,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.TypeFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; @@ -552,7 +552,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery()); sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedPath("path") .setNestedFilter(QueryBuilders.matchAllQuery()); diff --git a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java index c1e430abbe3d2..53e15c1c094ab 100644 --- a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java @@ -43,7 +43,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; -import org.opensearch.index.mapper.TypeFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -344,7 +344,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery()); sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedPath("path") .setNestedFilter(QueryBuilders.matchAllQuery()); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 5f303bc774930..a896aab0f70c9 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -198,7 +198,6 @@ import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1739,8 +1738,6 @@ public void onFailure(final Exception e) { final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); - transportService.getTaskManager() - .setTaskResourceTrackingService(new TaskResourceTrackingService(settings, clusterSettings, threadPool)); repositoriesService = new RepositoriesService( settings, clusterService, diff --git a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java index ab49109eb8247..0f09b0de34206 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java @@ -40,7 +40,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.FakeTcpChannel; @@ -60,7 +59,6 @@ import java.util.Set; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -69,12 +67,10 @@ public class TaskManagerTests extends OpenSearchTestCase { private ThreadPool threadPool; - private AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - runnableTaskListener = new AtomicReference<>(); - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); } @After diff --git a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java deleted file mode 100644 index 8ba23c5d3219c..0000000000000 --- a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tasks; - -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; -import org.opensearch.action.search.SearchTask; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.util.HashMap; -import java.util.concurrent.atomic.AtomicReference; - -import static org.opensearch.tasks.ResourceStats.MEMORY; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; - -public class TaskResourceTrackingServiceTests extends OpenSearchTestCase { - - private ThreadPool threadPool; - private TaskResourceTrackingService taskResourceTrackingService; - - @Before - public void setup() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), new AtomicReference<>()); - taskResourceTrackingService = new TaskResourceTrackingService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ); - } - - @After - public void terminateThreadPool() { - terminate(threadPool); - } - - public void testThreadContextUpdateOnTrackingStart() { - taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - - Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); - - String key = "KEY"; - String value = "VALUE"; - - // Prepare thread context - threadPool.getThreadContext().putHeader(key, value); - threadPool.getThreadContext().putTransient(key, value); - threadPool.getThreadContext().addResponseHeader(key, value); - - ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); - - // All headers should be preserved and Task Id should also be included in thread context - verifyThreadContextFixedHeaders(key, value); - assertEquals((long) threadPool.getThreadContext().getTransient(TASK_ID), task.getId()); - - storedContext.restore(); - - // Post restore only task id should be removed from the thread context - verifyThreadContextFixedHeaders(key, value); - assertNull(threadPool.getThreadContext().getTransient(TASK_ID)); - } - - public void testStopTrackingHandlesCurrentActiveThread() { - taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); - ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); - long threadId = Thread.currentThread().getId(); - taskResourceTrackingService.taskExecutionStartedOnThread(task.getId(), threadId); - - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue()); - - taskResourceTrackingService.stopTracking(task); - - // Makes sure stop tracking marks the current active thread inactive and refreshes the resource stats before returning. - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - assertTrue(task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue() > 0); - } - - private void verifyThreadContextFixedHeaders(String key, String value) { - assertEquals(threadPool.getThreadContext().getHeader(key), value); - assertEquals(threadPool.getThreadContext().getTransient(key), value); - assertEquals(threadPool.getThreadContext().getResponseHeaders().get(key).get(0), value); - } - -} diff --git a/settings.gradle b/settings.gradle index 183a5ec8d1ae1..52e1e16fc1c01 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.9" + id "com.gradle.enterprise" version "3.10" } rootProject.name = "OpenSearch" diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java index 291eee501c4df..2f1e18058d544 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java @@ -149,8 +149,8 @@ static class ClusterNode { void reboot() { if (localNode.isMasterNode() == false && rarely()) { - // master-ineligible nodes can't be trusted to persist the cluster state properly, but will not lose the fact that they - // were bootstrapped + // cluster-manager-ineligible nodes can't be trusted to persist the cluster state properly, + // but will not lose the fact that they were bootstrapped final CoordinationMetadata.VotingConfiguration votingConfiguration = persistedState.getLastAcceptedState() .getLastAcceptedConfiguration() .isEmpty() diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 9624a9d3d0554..f2b68b6fdaca0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -136,7 +136,7 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase { * Convert the entity from a {@link Response} into a map of maps. */ public static Map entityAsMap(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -154,7 +154,7 @@ public static Map entityAsMap(Response response) throws IOExcept * Convert the entity from a {@link Response} into a list of maps. */ public static List entityAsList(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -1082,7 +1082,7 @@ protected static Map getAsMap(final String endpoint) throws IOEx } protected static Map responseAsMap(Response response) throws IOException { - XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); Map responseEntity = XContentHelper.convertToMap( entityContentType.xContent(), response.getEntity().getContent(), diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java index 4e8799b9a618e..8fc0554e2b31e 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java @@ -66,7 +66,7 @@ public ClientYamlTestResponse(Response response) throws IOException { this.response = response; if (response.getEntity() != null) { String contentType = response.getHeader("Content-Type"); - this.bodyContentType = XContentType.fromMediaTypeOrFormat(contentType); + this.bodyContentType = XContentType.fromMediaType(contentType); try { byte[] bytes = EntityUtils.toByteArray(response.getEntity()); // skip parsing if we got text back (e.g. if we called _cat apis) diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java index 0ff1b36d1f5ae..473511825ef60 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java @@ -57,7 +57,7 @@ public class ObjectPath { public static ObjectPath createFromResponse(Response response) throws IOException { byte[] bytes = EntityUtils.toByteArray(response.getEntity()); String contentType = response.getHeader("Content-Type"); - XContentType xContentType = XContentType.fromMediaTypeOrFormat(contentType); + XContentType xContentType = XContentType.fromMediaType(contentType); return ObjectPath.createFromXContent(xContentType.xContent(), new BytesArray(bytes)); } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java index 677ec7a0a6600..e60871f67ea54 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java @@ -39,7 +39,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskManager; @@ -128,21 +127,6 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { super.waitForTaskCompletion(task, untilInNanos); } - @Override - public ThreadContext.StoredContext taskExecutionStarted(Task task) { - for (MockTaskManagerListener listener : listeners) { - listener.taskExecutionStarted(task, false); - } - - ThreadContext.StoredContext storedContext = super.taskExecutionStarted(task); - return () -> { - for (MockTaskManagerListener listener : listeners) { - listener.taskExecutionStarted(task, true); - } - storedContext.restore(); - }; - } - public void addListener(MockTaskManagerListener listener) { listeners.add(listener); } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java index f15f878995aa2..eb8361ac552fc 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java @@ -43,7 +43,4 @@ public interface MockTaskManagerListener { void onTaskUnregistered(Task task); void waitForTaskCompletion(Task task); - - void taskExecutionStarted(Task task, Boolean closeableInvoked); - } diff --git a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java index 2d97d5bffee01..5f8611d99f0a0 100644 --- a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java @@ -40,7 +40,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicReference; public class TestThreadPool extends ThreadPool { @@ -48,29 +47,12 @@ public class TestThreadPool extends ThreadPool { private volatile boolean returnRejectingExecutor = false; private volatile ThreadPoolExecutor rejectingExecutor; - public TestThreadPool( - String name, - AtomicReference runnableTaskListener, - ExecutorBuilder... customBuilders - ) { - this(name, Settings.EMPTY, runnableTaskListener, customBuilders); - } - public TestThreadPool(String name, ExecutorBuilder... customBuilders) { this(name, Settings.EMPTY, customBuilders); } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - this(name, settings, null, customBuilders); - } - - public TestThreadPool( - String name, - Settings settings, - AtomicReference runnableTaskListener, - ExecutorBuilder... customBuilders - ) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), runnableTaskListener, customBuilders); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); } @Override diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index fc4564ce55df7..f995e18d0f2df 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -486,7 +486,7 @@ public void testAddingDoWithNodeSelectorWithoutSkipNodeSelector() { int lineNumber = between(1, 10000); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); doSection.setApiCallSection(apiCall); ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); @@ -553,7 +553,7 @@ public void testMultipleValidationErrors() { { DoSection doSection = new DoSection(new XContentLocation(thirdLineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); doSection.setApiCallSection(apiCall); doSections.add(doSection); } @@ -593,7 +593,7 @@ public void testAddingDoWithNodeSelectorWithSkip() { SkipSection skipSection = new SkipSection(null, singletonList("node_selector"), null); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); doSection.setApiCallSection(apiCall); createTestSuite(skipSection, doSection).validate(); }