diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java index 916cf563fb8e6..9082fc072dadb 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java @@ -62,13 +62,6 @@ public long length() { public String checksum() { return checksum; } - - public boolean isSame(StoreFileMetaData md) { - if (checksum == null || md.checksum() == null) { - return false; - } - return length == md.length() && checksum.equals(md.checksum()); - } } public static enum Type { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index e0032fe503b4c..94337ecdbc516 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -23,6 +23,8 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -49,7 +51,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -458,7 +459,9 @@ protected Tuple buildBlobStoreIndexShardS } if (latest >= 0) { try { - return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest); + final BlobStoreIndexShardSnapshots shardSnapshots = + indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)); + return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest); } @@ -503,10 +506,8 @@ private class SnapshotContext extends Context { */ public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { super(snapshotId, Version.CURRENT, shardId); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - store = indexService.getShardOrNull(shardId.id()).store(); this.snapshotStatus = snapshotStatus; - + store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); } /** @@ -788,8 +789,8 @@ private class RestoreContext extends Context { */ public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { super(snapshotId, version, shardId, snapshotShardId); - store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); this.recoveryState = recoveryState; + store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); } /** @@ -800,6 +801,25 @@ public void restore() throws IOException { try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId); BlobStoreIndexShardSnapshot snapshot = loadSnapshot(); + + if (snapshot.indexFiles().size() == 1 + && snapshot.indexFiles().get(0).physicalName().startsWith("segments_") + && snapshot.indexFiles().get(0).hasUnknownChecksum()) { + // If the shard has no documents, it will only contain a single segments_N file for the + // shard's snapshot. If we are restoring a snapshot created by a previous supported version, + // it is still possible that in that version, an empty shard has a segments_N file with an unsupported + // version (and no checksum), because we don't know the Lucene version to assign segments_N until we + // have written some data. Since the segments_N for an empty shard could have an incompatible Lucene + // version number and no checksum, even though the index itself is perfectly fine to restore, this + // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty + // shard anyway, we just create the empty shard here and then exit. + IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE) + .setCommitOnClose(true)); + writer.close(); + return; + } + SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); final Store.MetadataSnapshot recoveryTargetMetadata; try { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 60b7ec2112e91..5bb0f728bc14e 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -50,6 +49,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil * Information about snapshotted file */ public static class FileInfo { + private static final String UNKNOWN_CHECKSUM = "_na_"; + private final String name; private final ByteSizeValue partSize; private final long partBytes; @@ -207,27 +208,43 @@ public boolean isSame(StoreFileMetaData md) { * @return true if file in a store this this file have the same checksum and length */ public boolean isSame(FileInfo fileInfo) { - if (numberOfParts != fileInfo.numberOfParts) return false; - if (partBytes != fileInfo.partBytes) return false; - if (!name.equals(fileInfo.name)) return false; + if (numberOfParts != fileInfo.numberOfParts) { + return false; + } + if (partBytes != fileInfo.partBytes) { + return false; + } + if (!name.equals(fileInfo.name)) { + return false; + } if (partSize != null) { - if (!partSize.equals(fileInfo.partSize)) return false; + if (!partSize.equals(fileInfo.partSize)) { + return false; + } } else { - if (fileInfo.partSize != null) return false; + if (fileInfo.partSize != null) { + return false; + } } return metadata.isSame(fileInfo.metadata); } - static final class Fields { - static final String NAME = "name"; - static final String PHYSICAL_NAME = "physical_name"; - static final String LENGTH = "length"; - static final String CHECKSUM = "checksum"; - static final String PART_SIZE = "part_size"; - static final String WRITTEN_BY = "written_by"; - static final String META_HASH = "meta_hash"; + /** + * Checks if the checksum for the file is unknown. This only is possible on an empty shard's + * segments_N file which was created in older Lucene versions. + */ + public boolean hasUnknownChecksum() { + return metadata.checksum().equals(UNKNOWN_CHECKSUM); } + static final String NAME = "name"; + static final String PHYSICAL_NAME = "physical_name"; + static final String LENGTH = "length"; + static final String CHECKSUM = "checksum"; + static final String PART_SIZE = "part_size"; + static final String WRITTEN_BY = "written_by"; + static final String META_HASH = "meta_hash"; + /** * Serializes file info into JSON * @@ -237,22 +254,22 @@ static final class Fields { */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field(Fields.NAME, file.name); - builder.field(Fields.PHYSICAL_NAME, file.metadata.name()); - builder.field(Fields.LENGTH, file.metadata.length()); - if (file.metadata.checksum() != null) { - builder.field(Fields.CHECKSUM, file.metadata.checksum()); + builder.field(NAME, file.name); + builder.field(PHYSICAL_NAME, file.metadata.name()); + builder.field(LENGTH, file.metadata.length()); + if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { + builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { - builder.field(Fields.PART_SIZE, file.partSize.bytes()); + builder.field(PART_SIZE, file.partSize.bytes()); } if (file.metadata.writtenBy() != null) { - builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy()); + builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - builder.field(Fields.META_HASH, file.metadata.hash()); + builder.field(META_HASH, file.metadata.hash()); } builder.endObject(); } @@ -271,6 +288,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { String checksum = null; ByteSizeValue partSize = null; Version writtenBy = null; + String writtenByStr = null; BytesRef metaHash = new BytesRef(); if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -278,19 +296,20 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if ("name".equals(currentFieldName)) { + if (NAME.equals(currentFieldName)) { name = parser.text(); - } else if ("physical_name".equals(currentFieldName)) { + } else if (PHYSICAL_NAME.equals(currentFieldName)) { physicalName = parser.text(); - } else if ("length".equals(currentFieldName)) { + } else if (LENGTH.equals(currentFieldName)) { length = parser.longValue(); - } else if ("checksum".equals(currentFieldName)) { + } else if (CHECKSUM.equals(currentFieldName)) { checksum = parser.text(); - } else if ("part_size".equals(currentFieldName)) { + } else if (PART_SIZE.equals(currentFieldName)) { partSize = new ByteSizeValue(parser.longValue()); - } else if ("written_by".equals(currentFieldName)) { - writtenBy = Lucene.parseVersionLenient(parser.text(), null); - } else if ("meta_hash".equals(currentFieldName)) { + } else if (WRITTEN_BY.equals(currentFieldName)) { + writtenByStr = parser.text(); + writtenBy = Lucene.parseVersionLenient(writtenByStr, null); + } else if (META_HASH.equals(currentFieldName)) { metaHash.bytes = parser.binaryValue(); metaHash.offset = 0; metaHash.length = metaHash.bytes.length; @@ -305,6 +324,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { } } } + // Verify that file information is complete if (name == null || Strings.validFileName(name) == false) { throw new ElasticsearchParseException("missing or invalid file name [" + name + "]"); @@ -312,10 +332,29 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { throw new ElasticsearchParseException("missing or invalid physical file name [" + physicalName + "]"); } else if (length < 0) { throw new ElasticsearchParseException("missing or invalid file length"); + } else if (writtenBy == null) { + throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]"); + } else if (checksum == null) { + if (physicalName.startsWith("segments_") + && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) { + // its possible the checksum is null for segments_N files that belong to a shard with no data, + // so we will assign it _na_ for now and try to get the checksum from the file itself later + checksum = UNKNOWN_CHECKSUM; + } else { + throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); + } } return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize); } + @Override + public String toString() { + return "[name: " + name + + ", numberOfParts: " + numberOfParts + + ", partSize: " + partSize + + ", partBytes: " + partBytes + + ", metadata: " + metadata + "]"; + } } private final String snapshot; @@ -424,26 +463,21 @@ public long totalSize() { return totalSize; } - static final class Fields { - static final String NAME = "name"; - static final String INDEX_VERSION = "index_version"; - static final String START_TIME = "start_time"; - static final String TIME = "time"; - static final String NUMBER_OF_FILES = "number_of_files"; - static final String TOTAL_SIZE = "total_size"; - static final String FILES = "files"; - } - - static final class ParseFields { - static final ParseField NAME = new ParseField("name"); - static final ParseField INDEX_VERSION = new ParseField("index_version", "index-version"); - static final ParseField START_TIME = new ParseField("start_time"); - static final ParseField TIME = new ParseField("time"); - static final ParseField NUMBER_OF_FILES = new ParseField("number_of_files"); - static final ParseField TOTAL_SIZE = new ParseField("total_size"); - static final ParseField FILES = new ParseField("files"); - } - + private static final String NAME = "name"; + private static final String INDEX_VERSION = "index_version"; + private static final String START_TIME = "start_time"; + private static final String TIME = "time"; + private static final String NUMBER_OF_FILES = "number_of_files"; + private static final String TOTAL_SIZE = "total_size"; + private static final String FILES = "files"; + + private static final ParseField PARSE_NAME = new ParseField("name"); + private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version"); + private static final ParseField PARSE_START_TIME = new ParseField("start_time"); + private static final ParseField PARSE_TIME = new ParseField("time"); + private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files"); + private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size"); + private static final ParseField PARSE_FILES = new ParseField("files"); /** * Serializes shard snapshot metadata info into JSON @@ -453,13 +487,13 @@ static final class ParseFields { */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.NAME, snapshot); - builder.field(Fields.INDEX_VERSION, indexVersion); - builder.field(Fields.START_TIME, startTime); - builder.field(Fields.TIME, time); - builder.field(Fields.NUMBER_OF_FILES, numberOfFiles); - builder.field(Fields.TOTAL_SIZE, totalSize); - builder.startArray(Fields.FILES); + builder.field(NAME, snapshot); + builder.field(INDEX_VERSION, indexVersion); + builder.field(START_TIME, startTime); + builder.field(TIME, time); + builder.field(NUMBER_OF_FILES, numberOfFiles); + builder.field(TOTAL_SIZE, totalSize); + builder.startArray(FILES); for (FileInfo fileInfo : indexFiles) { FileInfo.toXContent(fileInfo, builder, params); } @@ -493,24 +527,24 @@ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFiel String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) { + if (parseFieldMatcher.match(currentFieldName, PARSE_NAME)) { snapshot = parser.text(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_INDEX_VERSION)) { // The index-version is needed for backward compatibility with v 1.0 indexVersion = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_START_TIME)) { startTime = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_TIME)) { time = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_NUMBER_OF_FILES)) { numberOfFiles = parser.intValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_TOTAL_SIZE)) { totalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) { + if (parseFieldMatcher.match(currentFieldName, PARSE_FILES)) { while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexFiles.add(FileInfo.fromXContent(parser)); } @@ -526,6 +560,7 @@ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFiel } } return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), - startTime, time, numberOfFiles, totalSize); + startTime, time, numberOfFiles, totalSize); } + } diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index a720f5cb2588c..166f978a4dbb1 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -41,7 +41,6 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.Version; @@ -444,11 +443,9 @@ public static void tryOpenIndex(Path indexLocation, ShardId shardId, ESLogger lo } /** - * The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version - * and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only - * a legacy checksum, returned IndexOutput will not verify the checksum. + * The returned IndexOutput validates the files checksum. *

- * Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the + * Note: Checksums are calculated by default since version 4.8.0. This method only adds the * verification against the checksum in the given metadata and does not add any significant overhead. */ public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException { @@ -652,17 +649,7 @@ final void verifyAfterCleanup(MetadataSnapshot sourceMetaData, MetadataSnapshot // different in the diff. That's why we have to double check here again if the rest of it matches. // all is fine this file is just part of a commit or a segment that is different - final boolean same = local.isSame(remote); - - // this check ensures that the two files are consistent ie. if we don't have checksums only the rest needs to match we are just - // verifying that we are consistent on both ends source and target - final boolean hashAndLengthEqual = ( - local.checksum() == null - && remote.checksum() == null - && local.hash().equals(remote.hash()) - && local.length() == remote.length()); - final boolean consistent = hashAndLengthEqual || same; - if (consistent == false) { + if (local.isSame(remote) == false) { logger.debug("Files are different on the recovery target: {} ", recoveryDiff); throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); } @@ -898,18 +885,6 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map } } - /** - * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB - */ - public static BytesRef hashFile(Directory directory, String file) throws IOException { - final BytesRefBuilder fileHash = new BytesRefBuilder(); - try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) { - hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length()); - } - return fileHash.get(); - } - - /** * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB */ diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index e163b15f60ee5..2653f01c81df9 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -21,10 +21,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; @@ -58,14 +56,15 @@ public StoreFileMetaData(String name, long length, String checksum, Version writ } public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) { - assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that " - + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; - Objects.requireNonNull(writtenBy, "writtenBy must not be null"); - Objects.requireNonNull(checksum, "checksum must not be null"); - this.name = name; + // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the + // file is a segments_N file, but that is fine in the case of a segments_N file because + // we handle that case upstream + assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) : + "index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; + this.name = Objects.requireNonNull(name, "name must not be null"); this.length = length; - this.checksum = checksum; - this.writtenBy = writtenBy; + this.checksum = Objects.requireNonNull(checksum, "checksum must not be null"); + this.writtenBy = Objects.requireNonNull(writtenBy, "writtenBy must not be null"); this.hash = hash == null ? new BytesRef() : hash; } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index 8fd08d9f8fbd8..49cdb737ed33a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.recovery; import org.apache.lucene.util.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -76,7 +75,6 @@ public long position() { return position; } - @Nullable public String checksum() { return metaData.checksum(); } @@ -105,11 +103,10 @@ public void readFrom(StreamInput in) throws IOException { String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); - String checksum = in.readOptionalString(); + String checksum = in.readString(); content = in.readBytesReference(); - Version writtenBy = null; - String versionString = in.readOptionalString(); - writtenBy = Lucene.parseVersionLenient(versionString, null); + Version writtenBy = Lucene.parseVersionLenient(in.readString(), null); + assert writtenBy != null; metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); totalTranslogOps = in.readVInt(); @@ -124,9 +121,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(metaData.name()); out.writeVLong(position); out.writeVLong(metaData.length()); - out.writeOptionalString(metaData.checksum()); + out.writeString(metaData.checksum()); out.writeBytesReference(content); - out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString()); + out.writeString(metaData.writtenBy().toString()); out.writeBoolean(lastChunk); out.writeVInt(totalTranslogOps); out.writeLong(sourceThrottleTimeInNanos); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java index 23d390dfcfe61..04900705e0ab2 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java @@ -115,4 +115,5 @@ protected T read(BytesReference bytes) throws IOException { } } + } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 419104bfe34e5..494aa7d1095d6 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -1,4 +1,3 @@ -/* /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -46,6 +45,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.SortedSet; @@ -127,6 +127,44 @@ public void testRestoreUnsupportedSnapshots() throws Exception { } } + public void testRestoreSnapshotWithMissingChecksum() throws Exception { + final String repo = "test_repo"; + final String snapshot = "test_1"; + final String indexName = "index-2.3.4"; + final String repoFileId = "missing-checksum-repo-2.3.4"; + Path repoFile = getBwcIndicesPath().resolve(repoFileId + ".zip"); + URI repoFileUri = repoFile.toUri(); + URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/"); + logger.info("--> creating repository [{}] for repo file [{}]", repo, repoFileId); + assertAcked(client().admin().cluster().preparePutRepository(repo) + .setType("url") + .setSettings(Settings.builder().put("url", repoJarUri.toString()))); + + logger.info("--> get snapshot and check its indices"); + GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get(); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); + SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); + assertThat(snapshotInfo.indices(), equalTo(Arrays.asList(indexName))); + + logger.info("--> restoring snapshot"); + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get(); + assertThat(response.status(), equalTo(RestStatus.OK)); + RestoreInfo restoreInfo = response.getRestoreInfo(); + assertThat(restoreInfo.successfulShards(), greaterThan(0)); + assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards())); + assertThat(restoreInfo.failedShards(), equalTo(0)); + String index = restoreInfo.indices().get(0); + assertThat(index, equalTo(indexName)); + + logger.info("--> check search"); + SearchResponse searchResponse = client().prepareSearch(index).get(); + assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); + + logger.info("--> cleanup"); + cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()])); + cluster().wipeTemplates(); + } + private List repoVersions() throws Exception { return listRepoVersions("repo"); } diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index 67c431135a05e..70eacaafedb84 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.Fields; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.test.ESTestCase; @@ -105,11 +105,11 @@ public void testInvalidFieldsInFromXContent() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.startObject(); - builder.field(Fields.NAME, name); - builder.field(Fields.PHYSICAL_NAME, physicalName); - builder.field(Fields.LENGTH, length); - builder.field(Fields.WRITTEN_BY, Version.LATEST.toString()); - builder.field(Fields.CHECKSUM, "666"); + builder.field(FileInfo.NAME, name); + builder.field(FileInfo.PHYSICAL_NAME, physicalName); + builder.field(FileInfo.LENGTH, length); + builder.field(FileInfo.WRITTEN_BY, Version.LATEST.toString()); + builder.field(FileInfo.CHECKSUM, "666"); builder.endObject(); byte[] xContent = builder.bytes().toBytes(); diff --git a/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip b/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip new file mode 100644 index 0000000000000..9590f8dbd660f Binary files /dev/null and b/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip differ