diff --git a/CHANGELOG.md b/CHANGELOG.md index 209eb8a77d7a0..c96bed52c5dc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) ### Fixed diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f34e5f7bc121a..888fa886c3c5e 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -181,18 +181,7 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } } - if (minNodeVersion.before(LegacyESVersion.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set") - )); - } else { - client().performRequest(bulk); - } + client().performRequest(bulk); break; case UPGRADED: client().performRequest(bulk); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b4287f201489b..c0fabb8becf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -37,7 +37,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -47,7 +46,6 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; @@ -1386,44 +1384,6 @@ public void testPartialSnapshotAllShardsMissing() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } - /** - * Tests for the legacy snapshot path that is normally executed if the cluster contains any nodes older than - * {@link SnapshotsService#NO_REPO_INITIALIZE_VERSION}. - * Makes sure that blocking as well as non-blocking snapshot create paths execute cleanly as well as that error handling works out - * correctly by testing a snapshot name collision. - */ - public void testCreateSnapshotLegacyPath() throws Exception { - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - createRepository(repoName, "fs"); - createIndex("some-index"); - - final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); - final Snapshot snapshot1 = PlainActionFuture.get( - f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) - ); - awaitNoMoreRunningOperations(clusterManagerNode); - - final InvalidSnapshotNameException sne = expectThrows( - InvalidSnapshotNameException.class, - () -> PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, snapshot1.getSnapshotId().getName()), f) - ) - ); - - assertThat(sne.getMessage(), containsString("snapshot with the same name already exists")); - final SnapshotInfo snapshot2 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-2"), f) - ); - assertThat(snapshot2.state(), is(SnapshotState.SUCCESS)); - - final SnapshotInfo snapshot3 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-3").indices("does-not-exist-*"), f) - ); - assertThat(snapshot3.state(), is(SnapshotState.SUCCESS)); - } - public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 1eb22a6bef3b5..257373cf1a2c6 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -53,12 +53,6 @@ public class LegacyESVersion extends Version { public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final LegacyESVersion V_7_3_1 = new LegacyESVersion(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final LegacyESVersion V_7_3_2 = new LegacyESVersion(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_5_0 = new LegacyESVersion(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_1 = new LegacyESVersion(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_2 = new LegacyESVersion(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 17ece23f819a2..34eb52803b312 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -49,7 +49,6 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; -import org.opensearch.search.SearchException; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.transport.TcpTransport; @@ -317,10 +316,6 @@ public void writeTo(StreamOutput out) throws IOException { public static OpenSearchException readException(StreamInput input, int id) throws IOException { CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); if (opensearchException == null) { - if (id == 127 && input.getVersion().before(LegacyESVersion.V_7_5_0)) { - // was SearchContextException - return new SearchException(input); - } throw new IllegalStateException("unknown exception for id: " + id); } return opensearchException.apply(input); @@ -1569,13 +1564,13 @@ private enum OpenSearchExceptionHandle { org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), INGEST_PROCESSOR_EXCEPTION( org.opensearch.ingest.IngestProcessorException.class, org.opensearch.ingest.IngestProcessorException::new, 157, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.opensearch.indices.recovery.PeerRecoveryNotFound.class, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e31f5f304c836..b721c8f005974 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -114,16 +114,14 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index a3804db687a2d..07b918e427784 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -91,8 +89,6 @@ public final class TransportCleanupRepositoryAction extends TransportClusterMana private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); - private static final Version MIN_VERSION = LegacyESVersion.V_7_4_0; - private final RepositoriesService repositoriesService; private final SnapshotsService snapshotsService; @@ -179,17 +175,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { - cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); - } else { - throw new IllegalArgumentException( - "Repository cleanup is only supported from version [" - + MIN_VERSION - + "] but the oldest node version in the cluster is [" - + state.nodes().getMinNodeVersion() - + ']' - ); - } + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index ed4af6d915792..f604a30121797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -103,18 +103,10 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (state.nodes().getMinNodeVersion().before(SnapshotsService.NO_REPO_INITIALIZE_VERSION)) { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshotLegacy(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshotLegacy(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 8fd1ed22a0d14..5fa908a039887 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; @@ -92,15 +91,8 @@ public class SnapshotStatus implements ToXContentObject, Writeable { state = State.fromValue(in.readByte()); shards = Collections.unmodifiableList(in.readList(SnapshotIndexShardStatus::new)); includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } + final long startTime = in.readLong(); + final long time = in.readLong(); updateShardStats(startTime, time); } @@ -207,10 +199,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value()); out.writeList(shards); out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeLong(stats.getStartTime()); - out.writeLong(stats.getTime()); - } + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index bd5d9c651af7a..484bc93496fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; @@ -247,13 +246,8 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { } private Failure(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } readFrom(in, this); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } + nodeId = in.readString(); } public String nodeId() { @@ -266,13 +260,8 @@ static Failure readFailure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } + out.writeString(nodeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..f5d9528422b58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -122,9 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(LegacyESVersion.V_7_4_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + LegacyESVersion.V_7_4_0); - } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index 381eca2dc716f..ceff8dcbc4b55 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -153,12 +153,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - isPipelineResolved = in.readBoolean(); - } + finalPipeline = in.readOptionalString(); + isPipelineResolved = in.readBoolean(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -639,7 +635,7 @@ public void resolveRouting(Metadata metadata) { } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.V_7_5_0)) { + if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.fromId(7050099))) { throw new IllegalArgumentException( "optype create not supported for indexing requests without explicit id until all nodes " + "are on version 7.5.0 or higher" ); @@ -671,12 +667,8 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeOptionalString(finalPipeline); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeBoolean(isPipelineResolved); - } + out.writeOptionalString(finalPipeline); + out.writeBoolean(isPipelineResolved); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java index 2440a1802912b..f36ca0e7d7379 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java @@ -31,7 +31,6 @@ package org.opensearch.action.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -94,34 +93,14 @@ public SimulateDocumentBaseResult(Exception failure) { * Read from a stream. */ public SimulateDocumentBaseResult(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - failure = in.readException(); - ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); - } else { - if (in.readBoolean()) { - ingestDocument = null; - failure = in.readException(); - } else { - ingestDocument = new WriteableIngestDocument(in); - failure = null; - } - } + failure = in.readException(); + ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeException(failure); - out.writeOptionalWriteable(ingestDocument); - } else { - if (failure == null) { - out.writeBoolean(false); - ingestDocument.writeTo(out); - } else { - out.writeBoolean(true); - out.writeException(failure); - } - } + out.writeException(failure); + out.writeOptionalWriteable(ingestDocument); } public IngestDocument getIngestDocument() { diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 2cf9d66fee2bd..291aa88a3fb3e 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -114,7 +114,7 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return LegacyESVersion.V_7_4_0; + return LegacyESVersion.fromId(7040099); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 489c6125f7d13..25e545f6e2292 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -314,11 +314,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failure = in.readException(); this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } else { - this.failedNodeIds = Collections.emptySet(); - } + this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public void writeTo(StreamOutput out) throws IOException { @@ -330,9 +326,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(failure); out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeCollection(failedNodeIds, StreamOutput::writeString); - } + out.writeCollection(failedNodeIds, StreamOutput::writeString); } /** diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9c7f4804755d4..00daea147f16f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; @@ -1122,8 +1121,7 @@ public int getTranslogRetentionTotalFiles() { } private static boolean shouldDisableTranslogRetention(Settings settings) { - return INDEX_SOFT_DELETES_SETTING.get(settings) - && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(LegacyESVersion.V_7_4_0); + return INDEX_SOFT_DELETES_SETTING.get(settings); } /** diff --git a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java index e11b22e9296cf..d50585ae0aebf 100644 --- a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java @@ -67,7 +67,7 @@ public class VectorGeoShapeQueryProcessor { public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0) - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.fromId(7050099))) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); } // wrap geoQuery as a ConstantScoreQuery diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 9605ba424bfb0..ef606ce35b84f 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.index.query.functionscore; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -123,22 +122,14 @@ public ScriptScoreQueryBuilder(QueryBuilder query, Script script) { public ScriptScoreQueryBuilder(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script = new Script(in); - } else { - script = in.readNamedWriteable(ScriptScoreFunctionBuilder.class).getScript(); - } + script = new Script(in); minScore = in.readOptionalFloat(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(query); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script.writeTo(out); - } else { - out.writeNamedWriteable(new ScriptScoreFunctionBuilder(script)); - } + script.writeTo(out); out.writeOptionalFloat(minScore); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 55d95381923b3..cf18527523bbb 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -223,7 +223,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L /** * Whether there should be a peer recovery retention lease (PRRL) for every tracked shard copy. Always true on indices created from - * {@link LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an + * {@code LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an * earlier version if we recently did a rolling upgrade and * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} has not yet completed. Is only permitted * to change from false to true; can be removed once support for pre-PRRL indices is no longer needed. @@ -996,9 +996,7 @@ public ReplicationTracker( this.routingTable = null; this.replicationGroup = null; this.hasAllPeerRecoveryRetentionLeases = indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0) - || (indexSettings.isSoftDeleteEnabled() - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_4_0) - && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); + || (indexSettings.isSoftDeleteEnabled() && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; @@ -1126,7 +1124,7 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { /** * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@link LegacyESVersion#V_7_4_0}, in which case the missing leases should be created + * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { @@ -1528,7 +1526,7 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { /** * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@link LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d05f7c34f80ce..52ecc5bc66607 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -52,7 +52,6 @@ import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -3187,7 +3186,7 @@ public RetentionLease addPeerRecoveryRetentionLease( ) { assert assertPrimaryMode(); // only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs: - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_4_0) || indexSettings.isSoftDeleteEnabled() == false; + assert indexSettings.isSoftDeleteEnabled() == false; return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index a7334fba15664..446fb78958db4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import java.io.IOException; @@ -57,11 +55,7 @@ final class RecoveryFinalizeRecoveryRequest extends RecoveryTransportRequest { recoveryId = in.readLong(); shardId = new ShardId(in); globalCheckpoint = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - trimAboveSeqNo = in.readZLong(); - } else { - trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + trimAboveSeqNo = in.readZLong(); } RecoveryFinalizeRecoveryRequest( @@ -100,9 +94,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeZLong(globalCheckpoint); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeZLong(trimAboveSeqNo); - } + out.writeZLong(trimAboveSeqNo); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index bdacb0b724884..68979fa4b69bc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.shard.ShardId; @@ -62,9 +61,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends RecoveryTransportReque recoveryId = in.readLong(); shardId = new ShardId(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - in.readBoolean(); // was fileBasedRecovery - } } public long recoveryId() { @@ -85,8 +81,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeBoolean(true); // was fileBasedRecovery - } } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 665e79722770e..8c55a242d34f3 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -720,7 +720,7 @@ void createRetentionLease(final long startingSeqNo, ActionListener addRetentionLeaseStep = new StepListener<>(); final long estimatedGlobalCheckpoint = startingSeqNo - 1; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 57208ab029bf4..de2ee1b8512b4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -430,9 +429,7 @@ public Translog(StreamInput in) throws IOException { recovered = in.readVInt(); total = in.readVInt(); totalOnStart = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - totalLocal = in.readVInt(); - } + totalLocal = in.readVInt(); } @Override @@ -441,9 +438,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(recovered); out.writeVInt(total); out.writeVInt(totalOnStart); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeVInt(totalLocal); - } + out.writeVInt(totalLocal); } public synchronized void reset() { diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index b49cdcd127962..d469b6dec5c3c 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -253,20 +253,14 @@ public StoreFilesMetadata( public StoreFilesMetadata(StreamInput in) throws IOException { this.shardId = new ShardId(in); this.metadataSnapshot = new Store.MetadataSnapshot(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } else { - this.peerRecoveryRetentionLeases = Collections.emptyList(); - } + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); } @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); metadataSnapshot.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeList(peerRecoveryRetentionLeases); - } + out.writeList(peerRecoveryRetentionLeases); } public ShardId shardId() { diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index aaa021a0e8b93..a6a649fa2cd44 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -95,11 +94,6 @@ public void getRepositoryData(ActionListener listener) { in.getRepositoryData(listener); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - in.initializeSnapshot(snapshotId, indices, metadata); - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index a16e0e8d441bc..1826fe1aa51da 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -129,19 +128,6 @@ default Repository create(RepositoryMetadata metadata, Function listener); - /** - * Starts snapshotting process - * - * @param snapshotId snapshot id - * @param indices list of indices to be snapshotted - * @param metadata cluster metadata - * - * @deprecated this method is only used when taking snapshots in a mixed version cluster where a cluster-manager node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION} is present. - */ - @Deprecated - void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata); - /** * Finalizes snapshotting process *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c36d92abcf498..bf06191bdc8d3 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.opensearch.repositories.RepositoryVerificationException; import org.opensearch.repositories.ShardGenerations; import org.opensearch.snapshots.AbortedSnapshotException; -import org.opensearch.snapshots.SnapshotCreationException; import org.opensearch.snapshots.SnapshotException; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -713,21 +712,6 @@ public RepositoryStats stats() { return new RepositoryStats(store.stats()); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata clusterMetadata) { - try { - // Write Global Metadata - GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress); - - // write the index metadata for each index in the snapshot - for (IndexId index : indices) { - INDEX_METADATA_FORMAT.write(clusterMetadata.index(index.getName()), indexContainer(index), snapshotId.getUUID(), compress); - } - } catch (IOException ex) { - throw new SnapshotCreationException(metadata.name(), snapshotId, ex); - } - } - @Override public void deleteSnapshots( Collection snapshotIds, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index aacd386cd4bd7..b13a63ef77f6a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -163,23 +163,9 @@ * *

Creating a snapshot in the repository happens in the three steps described in detail below.

* - *

Initializing a Snapshot in the Repository (Mixed Version Clusters only)

- * - *

In mixed version clusters that contain a node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a - * call to {@link org.opensearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the - * following actions:

- *
    - *
  1. Verify that no snapshot by the requested name exists.
  2. - *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. - *
  5. Write the metadata for each index to a blob in that index's directory at - * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. - *
- * TODO: Remove this section once BwC logic it references is removed - * *

Writing Shard Data (Segments)

* - *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + *

The snapshot process writes the actual shard data * to the repository by invoking {@link org.opensearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries * for the shards in the current snapshot. It is implemented as follows:

* diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java index e7b1da91aba8f..bd2c11cf71ff1 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.document; -import org.opensearch.LegacyESVersion; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.ActiveShardCount; @@ -128,7 +127,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { + if (request.params().get("op_type") == null) { // default to op_type create request.params().put("op_type", "create"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index d8526e684f391..6ca64c2186cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.StreamInput; @@ -103,15 +102,6 @@ public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); - if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 33c09e04bd4b0..501f1af63b3d9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -114,11 +113,7 @@ public MovFnPipelineAggregationBuilder(StreamInput in) throws IOException { format = in.readOptionalString(); gapPolicy = GapPolicy.readFrom(in); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -128,9 +123,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(format); gapPolicy.writeTo(out); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 70652e7ddce44..7b20a796b8134 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; @@ -106,11 +105,7 @@ public MovFnPipelineAggregator(StreamInput in) throws IOException { gapPolicy = BucketHelpers.GapPolicy.readFrom(in); bucketsPath = in.readString(); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -120,9 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { gapPolicy.writeTo(out); out.writeString(bucketsPath); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 4f672c9813d64..e53c2889f88e6 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -90,7 +90,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.repositories.IndexId; @@ -142,12 +141,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - /** - * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, Metadata)} to write snapshot metadata - * when starting a snapshot. - */ - public static final Version NO_REPO_INITIALIZE_VERSION = LegacyESVersion.V_7_5_0; - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; @@ -156,7 +149,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.V_7_5_0; + public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; @@ -244,144 +237,6 @@ public SnapshotsService( } } - /** - * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of - * the snapshot. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot completion listener - */ - public void executeSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - createSnapshotLegacy( - request, - ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) - ); - } - - /** - * Initializes the snapshotting process. - *

- * This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and - * creates a snapshot record in cluster state metadata. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot creation listener - */ - public void createSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - final String repositoryName = request.repository(); - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); - validate(repositoryName, snapshotName); - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot - Repository repository = repositoriesService.repository(request.repository()); - final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - clusterService.submitStateUpdateTask("create_snapshot [" + snapshotName + ']', new ClusterStateUpdateTask() { - - private List indices; - - private SnapshotsInProgress.Entry newEntry; - - @Override - public ClusterState execute(ClusterState currentState) { - validate(repositoryName, snapshotName, currentState); - SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } - final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); - if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.hasCleanupInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" - ); - } - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the cluster state anyway in #applyClusterState. - if (snapshots != null - && snapshots.entries() - .stream() - .anyMatch( - entry -> (entry.state() == State.INIT && initializingSnapshots.contains(entry.snapshot()) == false) == false - )) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } - // Store newSnapshot here to be processed in clusterStateProcessed - indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request)); - - final List dataStreams = indexNameExpressionResolver.dataStreamNames( - currentState, - request.indicesOptions(), - request.indices() - ); - - logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); - newEntry = new SnapshotsInProgress.Entry( - new Snapshot(repositoryName, snapshotId), - request.includeGlobalState(), - request.partial(), - State.INIT, - Collections.emptyList(), // We'll resolve the list of indices when moving to the STARTED state in #beginSnapshot - dataStreams, - threadPool.absoluteTimeInMillis(), - RepositoryData.UNKNOWN_REPO_GEN, - ImmutableOpenMap.of(), - userMeta, - Version.CURRENT - ); - initializingSnapshots.add(newEntry.snapshot()); - snapshots = SnapshotsInProgress.of(Collections.singletonList(newEntry)); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); - if (newEntry != null) { - initializingSnapshots.remove(newEntry.snapshot()); - } - newEntry = null; - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { - if (newEntry != null) { - final Snapshot current = newEntry.snapshot(); - assert initializingSnapshots.contains(current); - assert indices != null; - beginSnapshot(newState, newEntry, request.partial(), indices, repository, new ActionListener() { - @Override - public void onResponse(final Snapshot snapshot) { - initializingSnapshots.remove(snapshot); - listener.onResponse(snapshot); - } - - @Override - public void onFailure(final Exception e) { - initializingSnapshots.remove(current); - listener.onFailure(e); - } - }); - } - } - - @Override - public TimeValue timeout() { - return request.clusterManagerNodeTimeout(); - } - }); - } - /** * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of * the snapshot. @@ -946,227 +801,6 @@ private static void validate(final String repositoryName, final String snapshotN } } - /** - * Starts snapshot. - *

- * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param clusterState cluster state - * @param snapshot snapshot meta data - * @param partial allow partial snapshots - * @param userCreateSnapshotListener listener - */ - private void beginSnapshot( - final ClusterState clusterState, - final SnapshotsInProgress.Entry snapshot, - final boolean partial, - final List indices, - final Repository repository, - final ActionListener userCreateSnapshotListener - ) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - - boolean hadAbortedInitializations; - - @Override - protected void doRun() { - assert initializingSnapshots.contains(snapshot.snapshot()); - if (repository.isReadOnly()) { - throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); - } - final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); - final StepListener repositoryDataListener = new StepListener<>(); - repository.getRepositoryData(repositoryDataListener); - repositoryDataListener.whenComplete(repositoryData -> { - // check if the snapshot name already exists in the repository - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException( - repository.getMetadata().name(), - snapshotName, - "snapshot with the same name already exists" - ); - } - if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager - // failover to an - // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a - // valid - // snapshot. - repository.initializeSnapshot( - snapshot.snapshot().getSnapshotId(), - snapshot.indices(), - metadataForSnapshot(snapshot, clusterState.metadata()) - ); - } - - logger.info("snapshot [{}] started", snapshot.snapshot()); - final Version version = minCompatibleVersion(clusterState.nodes().getMinNodeVersion(), repositoryData, null); - if (indices.isEmpty()) { - // No indices in this snapshot - we are done - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot( - SnapshotsInProgress.startedEntry( - snapshot.snapshot(), - snapshot.includeGlobalState(), - snapshot.partial(), - Collections.emptyList(), - Collections.emptyList(), - threadPool.absoluteTimeInMillis(), - repositoryData.getGenId(), - ImmutableOpenMap.of(), - snapshot.userMetadata(), - version - ), - clusterState.metadata(), - repositoryData - ); - return; - } - clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) { - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - List entries = new ArrayList<>(); - for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) == false) { - entries.add(entry); - continue; - } - - if (entry.state() == State.ABORTED) { - entries.add(entry); - assert entry.shards().isEmpty(); - hadAbortedInitializations = true; - } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); - // Replace the snapshot that was just initialized - ImmutableOpenMap shards = shards( - snapshots, - currentState.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY), - currentState.metadata(), - currentState.routingTable(), - indexIds, - useShardGenerations(version), - repositoryData, - entry.repository() - ); - if (!partial) { - Tuple, Set> indicesWithMissingShards = indicesWithMissingShards( - shards, - currentState.metadata() - ); - Set missing = indicesWithMissingShards.v1(); - Set closed = indicesWithMissingShards.v2(); - if (missing.isEmpty() == false || closed.isEmpty() == false) { - final StringBuilder failureMessage = new StringBuilder(); - if (missing.isEmpty() == false) { - failureMessage.append("Indices don't have primary shards "); - failureMessage.append(missing); - } - if (closed.isEmpty() == false) { - if (failureMessage.length() > 0) { - failureMessage.append("; "); - } - failureMessage.append("Indices are closed "); - failureMessage.append(closed); - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.FAILED, - indexIds, - repositoryData.getGenId(), - shards, - version, - failureMessage.toString() - ) - ); - continue; - } - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.STARTED, - indexIds, - repositoryData.getGenId(), - shards, - version, - null - ) - ); - } - } - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))) - .build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), - e - ); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - - @Override - public void onNoLongerClusterManager(String source) { - // We are not longer a cluster-manager - we shouldn't try to do any cleanup - // The new cluster-manager will take care of it - logger.warn( - "[{}] failed to create snapshot - no longer a cluster-manager", - snapshot.snapshot().getSnapshotId() - ); - userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") - ); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted - // for processing. If client wants to wait for the snapshot completion, it can register snapshot - // completion listener in this method. For the snapshot completion to work properly, the snapshot - // should still exist when listener is registered. - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - - if (hadAbortedInitializations) { - final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); - assert entry != null; - endSnapshot(entry, newState.metadata(), repositoryData); - } else { - endCompletedSnapshots(newState); - } - } - }); - }, this::onFailure); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - }); - } - private static class CleanupAfterErrorListener { private final ActionListener userCreateSnapshotListener; diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java index f588767d5336d..f8f512e5aefc6 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -31,7 +31,6 @@ package org.opensearch.index.query; -import org.opensearch.LegacyESVersion; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.test.geo.RandomShapeGenerator; @@ -73,21 +72,12 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } } if (randomBoolean()) { - QueryShardContext context = createShardContext(); - if (context.indexVersionCreated().onOrAfter(LegacyESVersion.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); - } else { - builder.relation( - randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) - ); - } + if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { + builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); } else { - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); - } else { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); - } + builder.relation( + randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) + ); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6a8999a205be2..da44643de98a5 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -246,11 +246,6 @@ public void getRepositoryData(ActionListener listener) { listener.onResponse(null); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 5ed85fedc8cea..2a85fffa8699a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -55,7 +55,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -116,9 +115,6 @@ public void getRepositoryData(ActionListener listener) { ); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) {} - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index f2b68b6fdaca0..bbf7763551bcd 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -531,8 +531,8 @@ protected boolean waitForAllSnapshotsWiped() { private void wipeCluster() throws Exception { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping - if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced - // in version 7.4 + if (nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced + // in version 7.4 if (preserveSLMPoliciesUponCompletion() == false) { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping deleteAllSLMPolicies();