Skip to content

Commit

Permalink
Remove Obsolete BwC Logic from BlobStoreRepository (elastic#42193)
Browse files Browse the repository at this point in the history
* Remove Obsolete BwC Logic from BlobStoreRepository

* We can't restore 1.3.3 files anyway -> no point in doing the dance of computing a hash here
* Some other minor+obvious cleanups
  • Loading branch information
original-brownbear authored and Gurkan Kaymak committed May 27, 2019
1 parent 6ea6c92 commit cbebcde
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 80 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
Expand Down Expand Up @@ -954,8 +952,6 @@ protected void finalize(final List<SnapshotFiles> snapshots,
final Map<String, BlobMetaData> blobs,
final String reason) {
final String indexGeneration = Integer.toString(fileListGeneration);
final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration);

final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
try {
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
Expand Down Expand Up @@ -998,7 +994,8 @@ protected void finalize(final List<SnapshotFiles> snapshots,
snapshotId, shardId), e);
}
} catch (IOException e) {
String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]";
String message =
"Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]";
throw new IndexShardSnapshotFailedException(shardId, message, e);
}
}
Expand Down Expand Up @@ -1135,16 +1132,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) {
List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
if (filesInfo != null) {
for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
try {
// in 1.3.3 we added additional hashes for .si / segments_N files
// to ensure we don't double the space in the repo since old snapshots
// don't have this hash we try to read that hash from the blob store
// in a bwc compatible way.
maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]",
shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
}
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
// a commit point file with the same name, size and checksum was already copied to repository
// we will reuse it for this snapshot
Expand Down Expand Up @@ -1315,32 +1302,6 @@ private void checkAborted() {
}
}

/**
* This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
* The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the
* comparison of the files on a per-segment / per-commit level.
*/
private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo,
Store.MetadataSnapshot snapshot) throws Exception {
final StoreFileMetaData metadata;
if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
// we have a hash - check if our repo has a hash too otherwise we have
// to calculate it.
// we might have multiple parts even though the file is small... make sure we read all of it.
try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
BytesRefBuilder builder = new BytesRefBuilder();
Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash
assert hash.length == 0;
hash.bytes = builder.bytes();
hash.offset = 0;
hash.length = builder.length();
}
}
}
}

private static final class PartSliceStream extends SlicedInputStream {

private final BlobContainer container;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.shard.ShardId;
Expand Down Expand Up @@ -127,17 +125,6 @@ public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException
final Map<String, StoreFileMetaData> snapshotMetaData = new HashMap<>();
final Map<String, BlobStoreIndexShardSnapshot.FileInfo> fileInfos = new HashMap<>();
for (final BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) {
try {
// in 1.3.3 we added additional hashes for .si / segments_N files
// to ensure we don't double the space in the repo since old snapshots
// don't have this hash we try to read that hash from the blob store
// in a bwc compatible way.
maybeRecalculateMetadataHash(fileInfo, recoveryTargetMetadata);
} catch (Exception e) {
// if the index is broken we might not be able to read it
logger.warn(new ParameterizedMessage("[{}] Can't calculate hash from blog for file [{}] [{}]", shardId,
fileInfo.physicalName(), fileInfo.metadata()), e);
}
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
fileInfos.put(fileInfo.metadata().name(), fileInfo);
}
Expand Down Expand Up @@ -237,7 +224,7 @@ protected void restoreFiles(List<BlobStoreIndexShardSnapshot.FileInfo> filesToRe
protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo);

@SuppressWarnings("unchecked")
private Iterable<StoreFileMetaData> concat(Store.RecoveryDiff diff) {
private static Iterable<StoreFileMetaData> concat(Store.RecoveryDiff diff) {
return Iterables.concat(diff.different, diff.missing);
}

Expand Down Expand Up @@ -276,29 +263,4 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi
}
}

/**
* This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
* The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the
* comparison of the files on a per-segment / per-commit level.
*/
private void maybeRecalculateMetadataHash(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot)
throws IOException {
final StoreFileMetaData metadata;
if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
// we have a hash - check if our repo has a hash too otherwise we have
// to calculate it.
// we might have multiple parts even though the file is small... make sure we read all of it.
try (InputStream stream = fileInputStream(fileInfo)) {
BytesRefBuilder builder = new BytesRefBuilder();
Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash
assert hash.length == 0;
hash.bytes = builder.bytes();
hash.offset = 0;
hash.length = builder.length();
}
}
}
}
}

0 comments on commit cbebcde

Please sign in to comment.