Skip to content

Commit

Permalink
remove backcompat handling of 6.2.x versions (elastic#42044)
Browse files Browse the repository at this point in the history
relates to refactoring initiative elastic#41164.
  • Loading branch information
talevy authored and Gurkan Kaymak committed May 27, 2019
1 parent be0124e commit 54687e0
Show file tree
Hide file tree
Showing 20 changed files with 58 additions and 203 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1006,7 +1006,7 @@ private enum ElasticsearchExceptionHandle {
UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class,
org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED),
TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class,
MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_6_2_0),
MultiBucketConsumerService.TooManyBucketsException::new, 149, UNKNOWN_VERSION_ADDED),
COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class,
org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0),
SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class,
Expand Down
20 changes: 0 additions & 20 deletions server/src/main/java/org/elasticsearch/Version.java
Original file line number Diff line number Diff line change
Expand Up @@ -48,16 +48,6 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST);
// The below version is missing from the 7.3 JAR
private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1);
public static final int V_6_2_0_ID = 6020099;
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1);
public static final int V_6_2_1_ID = 6020199;
public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1);
public static final int V_6_2_2_ID = 6020299;
public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1);
public static final int V_6_2_3_ID = 6020399;
public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1);
public static final int V_6_2_4_ID = 6020499;
public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1);
public static final int V_6_3_0_ID = 6030099;
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
public static final int V_6_3_1_ID = 6030199;
Expand Down Expand Up @@ -176,16 +166,6 @@ public static Version fromId(int id) {
return V_6_3_1;
case V_6_3_0_ID:
return V_6_3_0;
case V_6_2_4_ID:
return V_6_2_4;
case V_6_2_3_ID:
return V_6_2_3;
case V_6_2_2_ID:
return V_6_2_2;
case V_6_2_1_ID:
return V_6_2_1;
case V_6_2_0_ID:
return V_6_2_0;
case V_EMPTY_ID:
return V_EMPTY;
default:
Expand Down
19 changes: 2 additions & 17 deletions server/src/main/java/org/elasticsearch/index/store/Store.java
Original file line number Diff line number Diff line change
Expand Up @@ -1550,23 +1550,8 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long
final IndexCommit lastIndexCommitCommit = existingCommits.get(existingCommits.size() - 1);
final String translogUUID = lastIndexCommitCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY);
final IndexCommit startingIndexCommit;
// We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog
// are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit.
// To avoid this issue, we only select index commits whose translog are fully retained.
if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) {
final List<IndexCommit> recoverableCommits = new ArrayList<>();
for (IndexCommit commit : existingCommits) {
if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) {
recoverableCommits.add(commit);
}
}
assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " +
"commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]";
startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint);
} else {
// TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint.
startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint);
}
// TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint.
startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint);

if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) {
throw new IllegalStateException("starting commit translog uuid ["
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,10 +594,6 @@ static final class PreSyncedFlushResponse extends TransportResponse {
this.existingSyncId = existingSyncId;
}

boolean includeNumDocs(Version version) {
return version.onOrAfter(Version.V_6_2_2);
}

boolean includeExistingSyncId(Version version) {
return version.onOrAfter(Version.V_6_3_0);
}
Expand All @@ -606,11 +602,7 @@ boolean includeExistingSyncId(Version version) {
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
commitId = new Engine.CommitId(in);
if (includeNumDocs(in.getVersion())) {
numDocs = in.readInt();
} else {
numDocs = UNKNOWN_NUM_DOCS;
}
numDocs = in.readInt();
if (includeExistingSyncId(in.getVersion())) {
existingSyncId = in.readOptionalString();
}
Expand All @@ -620,9 +612,7 @@ public void readFrom(StreamInput in) throws IOException {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
commitId.writeTo(out);
if (includeNumDocs(out.getVersion())) {
out.writeInt(numDocs);
}
out.writeInt(numDocs);
if (includeExistingSyncId(out.getVersion())) {
out.writeOptionalString(existingSyncId);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,22 +106,14 @@ public SliceBuilder(String field, int id, int max) {

public SliceBuilder(StreamInput in) throws IOException {
String field = in.readString();
if ("_uid".equals(field) && in.getVersion().before(Version.V_6_3_0)) {
// This is safe because _id and _uid are handled the same way in #toFilter
field = IdFieldMapper.NAME;
}
this.field = field;
this.id = in.readVInt();
this.max = in.readVInt();
}

@Override
public void writeTo(StreamOutput out) throws IOException {
if (IdFieldMapper.NAME.equals(field) && out.getVersion().before(Version.V_6_3_0)) {
out.writeString("_uid");
} else {
out.writeString(field);
}
out.writeString(field);
out.writeVInt(id);
out.writeVInt(max);
}
Expand Down
5 changes: 0 additions & 5 deletions server/src/test/java/org/elasticsearch/BuildTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -199,29 +199,24 @@ public void testSerializationBWC() throws IOException {
randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6)));

final List<Version> versions = Version.getDeclaredVersions(Version.class);
final Version pre63Version = randomFrom(versions.stream().filter(v -> v.before(Version.V_6_3_0)).collect(Collectors.toList()));
final Version post63Pre67Version = randomFrom(versions.stream()
.filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList()));
final Version post67Pre70Version = randomFrom(versions.stream()
.filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList()));
final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList()));

final WriteableBuild pre63 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, pre63Version);
final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version);
final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version);
final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version);

assertThat(pre63.build.flavor(), equalTo(Build.Flavor.OSS));
assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor()));
assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor()));
assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor()));

assertThat(pre63.build.type(), equalTo(Build.Type.UNKNOWN));
assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR));
assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type()));
assertThat(post70.build.type(), equalTo(dockerBuild.build.type()));

assertThat(pre63.build.getQualifiedVersion(), equalTo(pre63Version.toString()));
assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString()));
assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString()));
assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ public void testCircuitBreakingException() throws IOException {
}

public void testTooManyBucketsException() throws IOException {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.CURRENT);
Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT);
MultiBucketConsumerService.TooManyBucketsException ex =
serialize(new MultiBucketConsumerService.TooManyBucketsException("Too many buckets", 100), version);
assertEquals("Too many buckets", ex.getMessage());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion;
Expand Down Expand Up @@ -189,23 +188,16 @@ public void testCacheFilterReader() throws Exception {
}

public void testLuceneVersionOnUnknownVersions() {
List<Version> allVersions = VersionUtils.allVersions();

// should have the same Lucene version as the latest 6.x version
Version version = Version.fromString("6.88.50");
assertEquals(allVersions.get(Collections.binarySearch(allVersions, Version.V_7_0_0) - 1).luceneVersion,
version.luceneVersion);

// between two known versions, should use the lucene version of the previous version
version = Version.fromString("6.2.50");
assertEquals(VersionUtils.getPreviousVersion(Version.V_6_2_4).luceneVersion, version.luceneVersion);
Version version = VersionUtils.getPreviousVersion(Version.CURRENT);
assertEquals(Version.fromId(version.id + 100).luceneVersion, version.luceneVersion);

// too old version, major should be the oldest supported lucene version minus 1
version = Version.fromString("5.2.1");
assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major);

// future version, should be the same version as today
version = Version.fromString("8.77.1");
version = Version.fromId(Version.CURRENT.id + 100);
assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ public void testThatInstancesAreCachedAndReused() {
PreBuiltAnalyzers.STANDARD.getAnalyzer(VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT)));

// Same Lucene version should be cached:
assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1),
PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2));
assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.0")),
PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.1")));
}

public void testThatAnalyzersAreUsedInMapping() throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
Expand All @@ -63,6 +62,7 @@
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;

import java.io.IOException;
import java.util.ArrayList;
Expand Down Expand Up @@ -455,21 +455,6 @@ public void testToFilterDeprecationMessage() throws IOException {
}
}

public void testSerializationBackcompat() throws IOException {
SliceBuilder sliceBuilder = new SliceBuilder(1, 5);
assertEquals(IdFieldMapper.NAME, sliceBuilder.getField());

SliceBuilder copy62 = copyWriteable(sliceBuilder,
new NamedWriteableRegistry(Collections.emptyList()),
SliceBuilder::new, Version.V_6_2_0);
assertEquals(sliceBuilder, copy62);

SliceBuilder copy63 = copyWriteable(copy62,
new NamedWriteableRegistry(Collections.emptyList()),
SliceBuilder::new, Version.V_6_3_0);
assertEquals(sliceBuilder, copy63);
}

public void testToFilterWithRouting() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
Expand All @@ -489,15 +474,14 @@ public void testToFilterWithRouting() throws IOException {
when(clusterService.operationRouting()).thenReturn(routing);
when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
try (IndexReader reader = DirectoryReader.open(dir)) {
QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0);
Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT);
QueryShardContext context = createShardContext(version, reader, "field", DocValuesType.SORTED, 5, 0);
SliceBuilder builder = new SliceBuilder("field", 6, 10);
String[] routings = new String[] { "foo" };
Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT);
Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, version);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, version);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0);
assertEquals(new DocValuesSliceQuery("field", 1, 2), query);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,7 @@ public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
jobId = in.readString();
update = new JobUpdate(in);
if (in.getVersion().onOrAfter(Version.V_6_2_2)) {
isInternal = in.readBoolean();
} else {
isInternal = false;
}
isInternal = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.getVersion().before(Version.V_7_0_0)) {
in.readBoolean(); // was waitForAck
}
Expand All @@ -107,9 +103,7 @@ public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(jobId);
update.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_2_2)) {
out.writeBoolean(isInternal);
}
out.writeBoolean(isInternal);
if (out.getVersion().onOrAfter(Version.V_6_3_0) && out.getVersion().before(Version.V_7_0_0)) {
out.writeBoolean(false); // was waitForAck
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
*/
package org.elasticsearch.xpack.core.ml.action;

import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
Expand Down Expand Up @@ -121,10 +120,8 @@ public Request(StreamInput in) throws IOException {
if (in.readBoolean()) {
detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new);
}
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
filter = in.readOptionalWriteable(MlFilter::new);
updateScheduledEvents = in.readBoolean();
}
filter = in.readOptionalWriteable(MlFilter::new);
updateScheduledEvents = in.readBoolean();
}

@Override
Expand All @@ -136,10 +133,8 @@ public void writeTo(StreamOutput out) throws IOException {
if (hasDetectorUpdates) {
out.writeList(detectorUpdates);
}
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
out.writeOptionalWriteable(filter);
out.writeBoolean(updateScheduledEvents);
}
out.writeOptionalWriteable(filter);
out.writeBoolean(updateScheduledEvents);
}

public Request(String jobId, ModelPlotConfig modelPlotConfig, List<JobUpdate.DetectorUpdate> detectorUpdates, MlFilter filter,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,11 +222,7 @@ public DatafeedConfig(StreamInput in) throws IOException {
}
this.scrollSize = in.readOptionalVInt();
this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new);
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString));
} else {
this.headers = Collections.emptyMap();
}
this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString));
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new);
} else {
Expand Down Expand Up @@ -432,9 +428,7 @@ public void writeTo(StreamOutput out) throws IOException {
}
out.writeOptionalVInt(scrollSize);
out.writeOptionalWriteable(chunkingConfig);
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString);
}
out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString);
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeOptionalWriteable(delayedDataCheckConfig);
}
Expand Down
Loading

0 comments on commit 54687e0

Please sign in to comment.