From 43e07c0c886223c65d26a4e3bdb7815ff15abe7d Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Fri, 10 Jun 2016 09:56:57 -0400 Subject: [PATCH] Better handling of an empty shard's segments_N file When trying to restore a snapshot of an index created in a previous version of Elasticsearch, it is possible that empty shards in the snapshot have a segments_N file that has an unsupported Lucene version and a missing checksum. This leads to issues with restoring the snapshot. This commit handles this special case by avoiding a restore of a shard that has no data, since there is nothing to restore anyway. Closes #18707 --- .../index/shard/CommitPoint.java | 7 - .../BlobStoreIndexShardRepository.java | 32 +++- .../BlobStoreIndexShardSnapshot.java | 165 +++++++++++------- .../org/elasticsearch/index/store/Store.java | 31 +--- .../index/store/StoreFileMetaData.java | 17 +- .../recovery/RecoveryFileChunkRequest.java | 13 +- .../blobstore/BlobStoreFormat.java | 1 + .../bwcompat/RestoreBackwardsCompatIT.java | 40 ++++- .../snapshots/blobstore/FileInfoTests.java | 12 +- .../bwc/missing-checksum-repo-2.3.4.zip | Bin 0 -> 11371 bytes 10 files changed, 188 insertions(+), 130 deletions(-) create mode 100644 core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java index 916cf563fb8e6..9082fc072dadb 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java @@ -62,13 +62,6 @@ public long length() { public String checksum() { return checksum; } - - public boolean isSame(StoreFileMetaData md) { - if (checksum == null || md.checksum() == null) { - return false; - } - return length == md.length() && checksum.equals(md.checksum()); - } } public static enum Type { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index e0032fe503b4c..94337ecdbc516 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -23,6 +23,8 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -49,7 +51,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -458,7 +459,9 @@ protected Tuple buildBlobStoreIndexShardS } if (latest >= 0) { try { - return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest); + final BlobStoreIndexShardSnapshots shardSnapshots = + indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)); + return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest); } @@ -503,10 +506,8 @@ private class SnapshotContext extends Context { */ public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { super(snapshotId, Version.CURRENT, shardId); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - store = indexService.getShardOrNull(shardId.id()).store(); this.snapshotStatus = snapshotStatus; - + store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); } /** @@ -788,8 +789,8 @@ private class RestoreContext extends Context { */ public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { super(snapshotId, version, shardId, snapshotShardId); - store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); this.recoveryState = recoveryState; + store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); } /** @@ -800,6 +801,25 @@ public void restore() throws IOException { try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId); BlobStoreIndexShardSnapshot snapshot = loadSnapshot(); + + if (snapshot.indexFiles().size() == 1 + && snapshot.indexFiles().get(0).physicalName().startsWith("segments_") + && snapshot.indexFiles().get(0).hasUnknownChecksum()) { + // If the shard has no documents, it will only contain a single segments_N file for the + // shard's snapshot. If we are restoring a snapshot created by a previous supported version, + // it is still possible that in that version, an empty shard has a segments_N file with an unsupported + // version (and no checksum), because we don't know the Lucene version to assign segments_N until we + // have written some data. Since the segments_N for an empty shard could have an incompatible Lucene + // version number and no checksum, even though the index itself is perfectly fine to restore, this + // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty + // shard anyway, we just create the empty shard here and then exit. + IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE) + .setCommitOnClose(true)); + writer.close(); + return; + } + SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); final Store.MetadataSnapshot recoveryTargetMetadata; try { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 60b7ec2112e91..5bb0f728bc14e 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -50,6 +49,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil * Information about snapshotted file */ public static class FileInfo { + private static final String UNKNOWN_CHECKSUM = "_na_"; + private final String name; private final ByteSizeValue partSize; private final long partBytes; @@ -207,27 +208,43 @@ public boolean isSame(StoreFileMetaData md) { * @return true if file in a store this this file have the same checksum and length */ public boolean isSame(FileInfo fileInfo) { - if (numberOfParts != fileInfo.numberOfParts) return false; - if (partBytes != fileInfo.partBytes) return false; - if (!name.equals(fileInfo.name)) return false; + if (numberOfParts != fileInfo.numberOfParts) { + return false; + } + if (partBytes != fileInfo.partBytes) { + return false; + } + if (!name.equals(fileInfo.name)) { + return false; + } if (partSize != null) { - if (!partSize.equals(fileInfo.partSize)) return false; + if (!partSize.equals(fileInfo.partSize)) { + return false; + } } else { - if (fileInfo.partSize != null) return false; + if (fileInfo.partSize != null) { + return false; + } } return metadata.isSame(fileInfo.metadata); } - static final class Fields { - static final String NAME = "name"; - static final String PHYSICAL_NAME = "physical_name"; - static final String LENGTH = "length"; - static final String CHECKSUM = "checksum"; - static final String PART_SIZE = "part_size"; - static final String WRITTEN_BY = "written_by"; - static final String META_HASH = "meta_hash"; + /** + * Checks if the checksum for the file is unknown. This only is possible on an empty shard's + * segments_N file which was created in older Lucene versions. + */ + public boolean hasUnknownChecksum() { + return metadata.checksum().equals(UNKNOWN_CHECKSUM); } + static final String NAME = "name"; + static final String PHYSICAL_NAME = "physical_name"; + static final String LENGTH = "length"; + static final String CHECKSUM = "checksum"; + static final String PART_SIZE = "part_size"; + static final String WRITTEN_BY = "written_by"; + static final String META_HASH = "meta_hash"; + /** * Serializes file info into JSON * @@ -237,22 +254,22 @@ static final class Fields { */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field(Fields.NAME, file.name); - builder.field(Fields.PHYSICAL_NAME, file.metadata.name()); - builder.field(Fields.LENGTH, file.metadata.length()); - if (file.metadata.checksum() != null) { - builder.field(Fields.CHECKSUM, file.metadata.checksum()); + builder.field(NAME, file.name); + builder.field(PHYSICAL_NAME, file.metadata.name()); + builder.field(LENGTH, file.metadata.length()); + if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { + builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { - builder.field(Fields.PART_SIZE, file.partSize.bytes()); + builder.field(PART_SIZE, file.partSize.bytes()); } if (file.metadata.writtenBy() != null) { - builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy()); + builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - builder.field(Fields.META_HASH, file.metadata.hash()); + builder.field(META_HASH, file.metadata.hash()); } builder.endObject(); } @@ -271,6 +288,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { String checksum = null; ByteSizeValue partSize = null; Version writtenBy = null; + String writtenByStr = null; BytesRef metaHash = new BytesRef(); if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -278,19 +296,20 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if ("name".equals(currentFieldName)) { + if (NAME.equals(currentFieldName)) { name = parser.text(); - } else if ("physical_name".equals(currentFieldName)) { + } else if (PHYSICAL_NAME.equals(currentFieldName)) { physicalName = parser.text(); - } else if ("length".equals(currentFieldName)) { + } else if (LENGTH.equals(currentFieldName)) { length = parser.longValue(); - } else if ("checksum".equals(currentFieldName)) { + } else if (CHECKSUM.equals(currentFieldName)) { checksum = parser.text(); - } else if ("part_size".equals(currentFieldName)) { + } else if (PART_SIZE.equals(currentFieldName)) { partSize = new ByteSizeValue(parser.longValue()); - } else if ("written_by".equals(currentFieldName)) { - writtenBy = Lucene.parseVersionLenient(parser.text(), null); - } else if ("meta_hash".equals(currentFieldName)) { + } else if (WRITTEN_BY.equals(currentFieldName)) { + writtenByStr = parser.text(); + writtenBy = Lucene.parseVersionLenient(writtenByStr, null); + } else if (META_HASH.equals(currentFieldName)) { metaHash.bytes = parser.binaryValue(); metaHash.offset = 0; metaHash.length = metaHash.bytes.length; @@ -305,6 +324,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { } } } + // Verify that file information is complete if (name == null || Strings.validFileName(name) == false) { throw new ElasticsearchParseException("missing or invalid file name [" + name + "]"); @@ -312,10 +332,29 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { throw new ElasticsearchParseException("missing or invalid physical file name [" + physicalName + "]"); } else if (length < 0) { throw new ElasticsearchParseException("missing or invalid file length"); + } else if (writtenBy == null) { + throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]"); + } else if (checksum == null) { + if (physicalName.startsWith("segments_") + && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) { + // its possible the checksum is null for segments_N files that belong to a shard with no data, + // so we will assign it _na_ for now and try to get the checksum from the file itself later + checksum = UNKNOWN_CHECKSUM; + } else { + throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); + } } return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize); } + @Override + public String toString() { + return "[name: " + name + + ", numberOfParts: " + numberOfParts + + ", partSize: " + partSize + + ", partBytes: " + partBytes + + ", metadata: " + metadata + "]"; + } } private final String snapshot; @@ -424,26 +463,21 @@ public long totalSize() { return totalSize; } - static final class Fields { - static final String NAME = "name"; - static final String INDEX_VERSION = "index_version"; - static final String START_TIME = "start_time"; - static final String TIME = "time"; - static final String NUMBER_OF_FILES = "number_of_files"; - static final String TOTAL_SIZE = "total_size"; - static final String FILES = "files"; - } - - static final class ParseFields { - static final ParseField NAME = new ParseField("name"); - static final ParseField INDEX_VERSION = new ParseField("index_version", "index-version"); - static final ParseField START_TIME = new ParseField("start_time"); - static final ParseField TIME = new ParseField("time"); - static final ParseField NUMBER_OF_FILES = new ParseField("number_of_files"); - static final ParseField TOTAL_SIZE = new ParseField("total_size"); - static final ParseField FILES = new ParseField("files"); - } - + private static final String NAME = "name"; + private static final String INDEX_VERSION = "index_version"; + private static final String START_TIME = "start_time"; + private static final String TIME = "time"; + private static final String NUMBER_OF_FILES = "number_of_files"; + private static final String TOTAL_SIZE = "total_size"; + private static final String FILES = "files"; + + private static final ParseField PARSE_NAME = new ParseField("name"); + private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version"); + private static final ParseField PARSE_START_TIME = new ParseField("start_time"); + private static final ParseField PARSE_TIME = new ParseField("time"); + private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files"); + private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size"); + private static final ParseField PARSE_FILES = new ParseField("files"); /** * Serializes shard snapshot metadata info into JSON @@ -453,13 +487,13 @@ static final class ParseFields { */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.NAME, snapshot); - builder.field(Fields.INDEX_VERSION, indexVersion); - builder.field(Fields.START_TIME, startTime); - builder.field(Fields.TIME, time); - builder.field(Fields.NUMBER_OF_FILES, numberOfFiles); - builder.field(Fields.TOTAL_SIZE, totalSize); - builder.startArray(Fields.FILES); + builder.field(NAME, snapshot); + builder.field(INDEX_VERSION, indexVersion); + builder.field(START_TIME, startTime); + builder.field(TIME, time); + builder.field(NUMBER_OF_FILES, numberOfFiles); + builder.field(TOTAL_SIZE, totalSize); + builder.startArray(FILES); for (FileInfo fileInfo : indexFiles) { FileInfo.toXContent(fileInfo, builder, params); } @@ -493,24 +527,24 @@ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFiel String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) { + if (parseFieldMatcher.match(currentFieldName, PARSE_NAME)) { snapshot = parser.text(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_INDEX_VERSION)) { // The index-version is needed for backward compatibility with v 1.0 indexVersion = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_START_TIME)) { startTime = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_TIME)) { time = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_NUMBER_OF_FILES)) { numberOfFiles = parser.intValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_TOTAL_SIZE)) { totalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) { + if (parseFieldMatcher.match(currentFieldName, PARSE_FILES)) { while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexFiles.add(FileInfo.fromXContent(parser)); } @@ -526,6 +560,7 @@ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFiel } } return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), - startTime, time, numberOfFiles, totalSize); + startTime, time, numberOfFiles, totalSize); } + } diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index a720f5cb2588c..166f978a4dbb1 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -41,7 +41,6 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.Version; @@ -444,11 +443,9 @@ public static void tryOpenIndex(Path indexLocation, ShardId shardId, ESLogger lo } /** - * The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version - * and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only - * a legacy checksum, returned IndexOutput will not verify the checksum. + * The returned IndexOutput validates the files checksum. *

- * Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the + * Note: Checksums are calculated by default since version 4.8.0. This method only adds the * verification against the checksum in the given metadata and does not add any significant overhead. */ public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException { @@ -652,17 +649,7 @@ final void verifyAfterCleanup(MetadataSnapshot sourceMetaData, MetadataSnapshot // different in the diff. That's why we have to double check here again if the rest of it matches. // all is fine this file is just part of a commit or a segment that is different - final boolean same = local.isSame(remote); - - // this check ensures that the two files are consistent ie. if we don't have checksums only the rest needs to match we are just - // verifying that we are consistent on both ends source and target - final boolean hashAndLengthEqual = ( - local.checksum() == null - && remote.checksum() == null - && local.hash().equals(remote.hash()) - && local.length() == remote.length()); - final boolean consistent = hashAndLengthEqual || same; - if (consistent == false) { + if (local.isSame(remote) == false) { logger.debug("Files are different on the recovery target: {} ", recoveryDiff); throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); } @@ -898,18 +885,6 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map } } - /** - * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB - */ - public static BytesRef hashFile(Directory directory, String file) throws IOException { - final BytesRefBuilder fileHash = new BytesRefBuilder(); - try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) { - hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length()); - } - return fileHash.get(); - } - - /** * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB */ diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index e163b15f60ee5..2653f01c81df9 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -21,10 +21,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; @@ -58,14 +56,15 @@ public StoreFileMetaData(String name, long length, String checksum, Version writ } public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) { - assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that " - + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; - Objects.requireNonNull(writtenBy, "writtenBy must not be null"); - Objects.requireNonNull(checksum, "checksum must not be null"); - this.name = name; + // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the + // file is a segments_N file, but that is fine in the case of a segments_N file because + // we handle that case upstream + assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) : + "index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; + this.name = Objects.requireNonNull(name, "name must not be null"); this.length = length; - this.checksum = checksum; - this.writtenBy = writtenBy; + this.checksum = Objects.requireNonNull(checksum, "checksum must not be null"); + this.writtenBy = Objects.requireNonNull(writtenBy, "writtenBy must not be null"); this.hash = hash == null ? new BytesRef() : hash; } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index 8fd08d9f8fbd8..49cdb737ed33a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.recovery; import org.apache.lucene.util.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -76,7 +75,6 @@ public long position() { return position; } - @Nullable public String checksum() { return metaData.checksum(); } @@ -105,11 +103,10 @@ public void readFrom(StreamInput in) throws IOException { String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); - String checksum = in.readOptionalString(); + String checksum = in.readString(); content = in.readBytesReference(); - Version writtenBy = null; - String versionString = in.readOptionalString(); - writtenBy = Lucene.parseVersionLenient(versionString, null); + Version writtenBy = Lucene.parseVersionLenient(in.readString(), null); + assert writtenBy != null; metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); totalTranslogOps = in.readVInt(); @@ -124,9 +121,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(metaData.name()); out.writeVLong(position); out.writeVLong(metaData.length()); - out.writeOptionalString(metaData.checksum()); + out.writeString(metaData.checksum()); out.writeBytesReference(content); - out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString()); + out.writeString(metaData.writtenBy().toString()); out.writeBoolean(lastChunk); out.writeVInt(totalTranslogOps); out.writeLong(sourceThrottleTimeInNanos); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java index 23d390dfcfe61..04900705e0ab2 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java @@ -115,4 +115,5 @@ protected T read(BytesReference bytes) throws IOException { } } + } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 419104bfe34e5..494aa7d1095d6 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -1,4 +1,3 @@ -/* /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -46,6 +45,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.SortedSet; @@ -127,6 +127,44 @@ public void testRestoreUnsupportedSnapshots() throws Exception { } } + public void testRestoreSnapshotWithMissingChecksum() throws Exception { + final String repo = "test_repo"; + final String snapshot = "test_1"; + final String indexName = "index-2.3.4"; + final String repoFileId = "missing-checksum-repo-2.3.4"; + Path repoFile = getBwcIndicesPath().resolve(repoFileId + ".zip"); + URI repoFileUri = repoFile.toUri(); + URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/"); + logger.info("--> creating repository [{}] for repo file [{}]", repo, repoFileId); + assertAcked(client().admin().cluster().preparePutRepository(repo) + .setType("url") + .setSettings(Settings.builder().put("url", repoJarUri.toString()))); + + logger.info("--> get snapshot and check its indices"); + GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get(); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); + SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); + assertThat(snapshotInfo.indices(), equalTo(Arrays.asList(indexName))); + + logger.info("--> restoring snapshot"); + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get(); + assertThat(response.status(), equalTo(RestStatus.OK)); + RestoreInfo restoreInfo = response.getRestoreInfo(); + assertThat(restoreInfo.successfulShards(), greaterThan(0)); + assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards())); + assertThat(restoreInfo.failedShards(), equalTo(0)); + String index = restoreInfo.indices().get(0); + assertThat(index, equalTo(indexName)); + + logger.info("--> check search"); + SearchResponse searchResponse = client().prepareSearch(index).get(); + assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); + + logger.info("--> cleanup"); + cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()])); + cluster().wipeTemplates(); + } + private List repoVersions() throws Exception { return listRepoVersions("repo"); } diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index 67c431135a05e..70eacaafedb84 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.Fields; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.test.ESTestCase; @@ -105,11 +105,11 @@ public void testInvalidFieldsInFromXContent() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.startObject(); - builder.field(Fields.NAME, name); - builder.field(Fields.PHYSICAL_NAME, physicalName); - builder.field(Fields.LENGTH, length); - builder.field(Fields.WRITTEN_BY, Version.LATEST.toString()); - builder.field(Fields.CHECKSUM, "666"); + builder.field(FileInfo.NAME, name); + builder.field(FileInfo.PHYSICAL_NAME, physicalName); + builder.field(FileInfo.LENGTH, length); + builder.field(FileInfo.WRITTEN_BY, Version.LATEST.toString()); + builder.field(FileInfo.CHECKSUM, "666"); builder.endObject(); byte[] xContent = builder.bytes().toBytes(); diff --git a/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip b/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip new file mode 100644 index 0000000000000000000000000000000000000000..9590f8dbd660fd56617c8e55b6bc73276aae4744 GIT binary patch literal 11371 zcmdT~2{_bU`!*)V{@Rxi#+qzn%a$c3#=d4LYax>?k&HDD(G^ zIB+u`emP-;s*<0_IItg+E@0eFY=!>!j+mz>o9U;=T5o!C2G!a6t&mq(g1=(9y?DB= z<)v}nvUf;kaAenb_hd`Dcg$5scesxW+zRGiXM0u`Zsh|DBh%7yL>M^0U~nrxL)5kK z%%Dsabr(3y&hMC=dzrMt z$Ok+p4HHsK)8n%aWM!r%TO^ppA+y@gbdWO%3-@T#Toddf6W0_!Pen!XmGTors)(eQ z@~V*14af>nce-RU>|8^jSL2ayg%vy%)jY30CO7{P^C4h;eT~(8rREMd2(Xd)zoPiL zc>N_CCW6|SM-X=uH;5#>MIe#Www`#T#a1+%Fln(BOj%LL;b&XpD&dz9<*fkM3svOH zNGN^KcXWr@8NmEt4lV_PIZ0Kg)VgjuP|p}!TRFKH!VUZaEc`4Sbm0rxy>Y8?E$PeS z-37)BNlA<)W6{=Bx(f@$M~GMwTalg0<}uLBR`gsA8o}GT5LP6MM$GZDD1XVAf+Bf& z(TD|Dq6nW4u@5nO4VpK6fhD|^B@mhV_CaqL94MDoM62w|+`=3hfrf_jDsw>gFE&B7v?PpLZbsY;4*`bwH~VSp z918XTf*<~IMmqo?!QW$VIQ${2A@=gu-l1ytU8@={-mD z;^a3(6c2ujyf;csPfdoy$I`P-#8{^$k7Kx=fVAi)*f;Vpjzi&9gNf^#rm*l&X%!PJ z)hiR(T{`2GpAEatvv4Z^p*ec_A?u~dPw`w$kuUXE6zEstwDSYnGzHd3p!J*$%-~)& z%4ZynH>B2x?@BhIOzF9>g?75orA9Y>h&2Qprl?&zZ>Nn@C& z4AYJIqJOxoK8WlXfyuNA(csA&y&4J|954nngTGoH-L+UCoWdJ9FwtU)=1XM+1Sz1nqb}iIxXe)7{_hu2N{WGN%^KpsmXEH zJd@T5De3VAW9in$G1gmD2PTRL5}%i$_#*U~V!Wq&nd@ea7^yC|(GdxW5gt{KdbW~6 z8`|5qd~W#=zg=nd35RyFI5RLhEI9CiZAPFd1TV72Em1+NOyyPy!%Q+GZ{FQoQwfZY zZh#A_45h65XIb{oBhLqWGA8p(Gk)Bknu6S)mWpI%WwA(=C~IY%<;_FgB}9*FljNls z95`Uey8=ZYbU~tJ54O6jKx@KzeQH(?qEG7HdBC;s0C4Ay2uPBz;Zhh~NW>#1(5|qn z$P~;P0|^K!NbyKSaa>{VG3n8xQ=uP4uE7BZv%rKFWFR`4;g1HFQv@ZY@&&?zN%<@X zyoMwzEuV}Cs_CSCq-#wX5~Q7p+fq`Z_PUleVmE+J4RU@jox5}SJu;mGw3 zyB|G6U_I8RUL}vd<{G5-@vdJE4-^bq>9x8y=r2S?JE&+68GrP4dvHxR_jF{$vNuN4PB>rFi4?M1h@&k6(QK;j35mMvie+w~k%O zVJfDa8~*xQGcUGUH*FZ{OV>Nx5yBd>d}VA{K=CZUr#n{(7Y8>9waeXVAsa;sgU~{@ z>0Hnax)!SeB8p2ZF-ingoyiRRhh!f;5T>Dn#P`I9JoF;etBe2PN9xnV@;>#OqUFuZ zkETd9+dKz_;VXTj!KP`?XKM;#f=lO(%I5f|)nigB4L-W-z;ZM=uhY^ide8(nSnk+yUf~KTj(pSF(H7t$RVQ;gd?#g)(G#OSLKMbix!_e*dI{ z4BXfHhN}lrh=)aWCab-q@{#gbW8>nFO(o8rZPceu>CO;SWv;f(P7k%|TeRQIplb}Z zFOn}%a_%326gVW<7~HoyBqK!^)#ex^MP<^L$5nGZvd}4u;vr&P-gW`OGWI>HP{Gwu zmaTc+QfNYhj%qIuN1lOC$ogr<8+|g=(OFjoXgm#+ zB#;h=Rp$yoie>nfjwrI{=LnSYNGL@=zRDzGVUdaiC*{8}H=nX)q&Gvdnq+1%0sF*;LoZ{~5)s;3D3RgiB>Q^IL+mg)H^u)L4+3o0-uCmUqh zzfalwaT#&TzzrZ+4WOX@))Iu^s}Mw)>PeA1Ey2bL^gV@Gl-_)oMs)zwfY5QC>!Tht zmm7my_h80NgnWv!9>mCBLXBC(oyrry}qzA&=cfa6&)4!syHnO z@#Om3?;ITQRS{_RTodyjxAUV?xxVDNu)Wv$(R3y`j0<2}?n9l%91xtsUVB02K(t;2 zQ$v8$42g1yJ6DQ6_choOLBVuMg6=z!ABP~HZ2u?06c zi%}f}ylpBH&Wp zV9AMYNU+OiE>5km%Li&np4Vi27{QRCqq^PC)$m}Y^93RoNp!PplMUMB36Do%JAA?kyY z;yVp_*AGg8PwUADg?Rb+g;Y9Fqz`*lPPx@fSHHMNVVYeYQ39JqBFHG2C|_}ws|ZX6 z9r1_BFA5fFKw-qS0dmLg>*lCGn#;I<)gDB8V2Xn(*`Qr1OOn9-EyhhxkOYE3nJC&2$V0Q6QPiDK)uN^8jvL6aDYe`lA=SJ>2q#EQ)8V zTq^72b?l;2d+bJvIWBUqxhyrWXRS^24K$5D|M7Q^IpTXb=5#(+)x}H35Xa}3gImUk zz*ELZxm*~orEKo5Z=-zEDJ)hGny(Up@Y z`tK{>mt0Rzy?GumLb)r9GYZ1CcoAIUpCGu}KTL2TJPF=3;USnoxJn14Jk)$mhdj8D z_S$_z;FeD5I6lg)-4M8bj8U$}u2bpn68!hgjbD4;#m}>fcoAIkzbE*0d;!8viiA+3@5y zSSPBC-5(rzG)^!>;z>wXoF(@)@HW5IOUC^>H~SDrk4L=Pt<)Z^E4#A~BaX{p9syi^ zpmX+~wtMLUB_osLEV9$mEaFn)%;Gx!-3uZIyW~3Fc3dnh+HL`9Xkl7FumX<&6}XkJ zI~)dcu<}_bhS{KeI-zJxy9K(S>|y9nq0_A$rPHGwNh#DVoTq9$nK;ddVr-qsYh^+) zPM+rDXOw5wg(eI3Y6xi_^b&g)s*#Z7a4YoqlzP|-4?`QsVtFP-E)_JXcAT;HmR?Z) zrCLh6KzKx2Y*1-XIBM#{*Ox89gfYOtRk#pj^_jW;K2I+Lm;-#F_*Tp3e(N6IL?U#r z>By{$1@f|a<<&eD0`=vR61?S4Zb%M`_Ou=r z04sDWDJm*-fNx83m*ntBRdMo{@>DE8|1jFHu?0LJAmUTfLF}rm18QT|P?+I1?HuwR zbcesFq1pKs3wO2y%eJyWR8azYSyrjCNEp)|l#s1hzQPm}Ymt3+GgZn^)jXv<&xB?A z2M48Os_8lo35JVycI%kf_b+-yYRsM0>(NqSeDTyW&N?oI&N@BTEWywZe$?GbysvM( z+^|~T*TT+0*NLX@R#rnGM_{-MF$Wb0Fq%`{IjbH8#fL%UepBdR5sqX zFD?*mU0m38;b6BZ>}v~KrZ5~M$PH5ZX=?it1Ag{42JrJmxp^64V%WQRRxI;Um_T5Sc&GV|3k|pcew7i}*BO4!)<%dUa06^M zKk>uBzHP7*Lk}a>;c#NhCCuFe{BW@Q@jG!6F>)Xdod2LCF1$a_bnMAtAi{yS(T&Ch z@N06gd(k@`g5w6*!{lIhcXwj=5#w~i+8WdA#m^7yR_0EOPoUpmU>ckFVPLmlc4B0a z{{{ookiicFy9u%rBZvYg#vT(HTgBdqBk%`Kod5GgUZuf-x1l%U7dULSai>EDv^W9w zkjU6#;7$xiI-D4LoE&VqY$wi140Hc-mH89#3|H*%S!G@_;K17ui*Nz_T4k`MqMZ&w z7;ysZVREpQoShh7abxUpa