From fe1d53a9105d922b2b3a8c23b35f851bcf076889 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 2 Apr 2019 15:48:25 +0200 Subject: [PATCH] Name Snapshot Data Blobs by UUID (#40652) * Name Snapshot Data Blobs by UUID * There is no functional reason why we need incremental naming for these files but * As explained in #38941 it is a possible source of corrupting the repository * It wastes API calls for the list operation * Is just needless complication * Since we store the exact names of the data blobs in all the metadata anyway, we can make this change without any BwC considerations * Even on the worst case scenario of a downgrade the functionality would continue working since the incremental names wouldn't conflict with the uuids and the number parsing for finding the next incremental name suppresses the exception when encountring a non-numeric value after the double underscore prefix --- .../blobstore/BlobStoreRepository.java | 38 +------------------ 1 file changed, 1 insertion(+), 37 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 797a77cbd87a0..f11f4cec9291d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1053,41 +1053,6 @@ protected void finalize(final List snapshots, } } - /** - * Generates blob name - * - * @param generation the blob number - * @return the blob name - */ - protected String fileNameFromGeneration(long generation) { - return DATA_BLOB_PREFIX + Long.toString(generation, Character.MAX_RADIX); - } - - /** - * Finds the next available blob number - * - * @param blobs list of blobs in the repository - * @return next available blob number - */ - protected long findLatestFileNameGeneration(Map blobs) { - long generation = -1; - for (String name : blobs.keySet()) { - if (!name.startsWith(DATA_BLOB_PREFIX)) { - continue; - } - name = canonicalName(name); - try { - long currentGen = Long.parseLong(name.substring(DATA_BLOB_PREFIX.length()), Character.MAX_RADIX); - if (currentGen > generation) { - generation = currentGen; - } - } catch (NumberFormatException e) { - logger.warn("file [{}] does not conform to the '{}' schema", name, DATA_BLOB_PREFIX); - } - } - return generation; - } - /** * Loads all available snapshots in the repository * @@ -1180,7 +1145,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); } - long generation = findLatestFileNameGeneration(blobs); Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs); BlobStoreIndexShardSnapshots snapshots = tuple.v1(); int fileListGeneration = tuple.v2(); @@ -1248,7 +1212,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { indexIncrementalSize += md.length(); // create a new FileInfo BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = - new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize()); + new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize()); indexCommitPointFiles.add(snapshotFileInfo); filesToSnapshot.add(snapshotFileInfo); } else {