Skip to content

Commit

Permalink
Simplify Snapshot Delete Process (elastic#47439)
Browse files Browse the repository at this point in the history
We don't need to read the SnapshotInfo for a
snapshot to determine the indices that need to
be updated when it is deleted as the `RepositoryData`
contains that information already.
This PR makes it so the `RepositoryData` is used to
determine which indices to update and also removes
the special handling for deleting snapshot metadata
and the CS snapshot blob and has those simply be
deleted as part of the deleting of other unreferenced
blobs in the last step of the delete.

This makes the snapshot delete a little faster and
more resilient by removing two RPC calls
(the separate delete and the get).

Also, this shortens the diff with elastic#46250 as a
side-effect.
  • Loading branch information
original-brownbear committed Oct 3, 2019
1 parent 56e7668 commit 938de5c
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,20 @@ public Map<String, IndexId> getIndices() {
return indices;
}

/**
* Returns the list of {@link IndexId} that have their snapshots updated but not removed (because they are still referenced by other
* snapshots) after removing the given snapshot from the repository.
*
* @param snapshotId SnapshotId to remove
* @return List of indices that are changed but not removed
*/
public List<IndexId> indicesToUpdateAfterRemovingSnapshot(SnapshotId snapshotId) {
return indexSnapshots.entrySet().stream()
.filter(entry -> entry.getValue().size() > 1 && entry.getValue().contains(snapshotId))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}

/**
* Add a snapshot and its indices to the repository; returns a new instance. If the snapshot
* already exists in the repository data, this method throws an IllegalArgumentException.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
Expand Down Expand Up @@ -61,7 +60,6 @@
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentFactory;
Expand Down Expand Up @@ -101,17 +99,13 @@
import java.io.InputStream;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.stream.Collectors;

import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName;
Expand Down Expand Up @@ -407,6 +401,8 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId, Action
try {
final Map<String, BlobMetaData> rootBlobs = blobContainer().listBlobs();
final RepositoryData repositoryData = getRepositoryData(latestGeneration(rootBlobs.keySet()));
// Cache the indices that were found before writing out the new index-N blob so that a stuck master will never
// delete an index that was created by another master node after writing this index-N blob.
final Map<String, BlobContainer> foundIndices = blobStore().blobContainer(indicesPath()).children();
doDeleteShardSnapshots(snapshotId, repositoryStateId, foundIndices, rootBlobs, repositoryData, listener);
} catch (Exception ex) {
Expand All @@ -432,36 +428,13 @@ private void doDeleteShardSnapshots(SnapshotId snapshotId, long repositoryStateI
Map<String, BlobMetaData> rootBlobs, RepositoryData repositoryData,
ActionListener<Void> listener) throws IOException {
final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId);
// Cache the indices that were found before writing out the new index-N blob so that a stuck master will never
// delete an index that was created by another master node after writing this index-N blob.
writeIndexGen(updatedRepositoryData, repositoryStateId);
SnapshotInfo snapshot = null;
try {
snapshot = getSnapshotInfo(snapshotId);
} catch (SnapshotMissingException ex) {
listener.onFailure(ex);
return;
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
}
final List<String> snapMetaFilesToDelete =
Arrays.asList(snapshotFormat.blobName(snapshotId.getUUID()), globalMetaDataFormat.blobName(snapshotId.getUUID()));
try {
blobContainer().deleteBlobsIgnoringIfNotExists(snapMetaFilesToDelete);
} catch (IOException e) {
logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata files", snapshotId), e);
}
final Map<String, IndexId> survivingIndices = updatedRepositoryData.getIndices();
deleteIndices(
updatedRepositoryData,
Optional.ofNullable(snapshot).map(info -> info.indices().stream().filter(survivingIndices::containsKey)
.map(updatedRepositoryData::resolveIndexId).collect(Collectors.toList())).orElse(Collections.emptyList()),
repositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotId),
snapshotId,
ActionListener.delegateFailure(listener,
(l, v) -> cleanupStaleBlobs(foundIndices,
Sets.difference(rootBlobs.keySet(), new HashSet<>(snapMetaFilesToDelete)).stream().collect(
Collectors.toMap(Function.identity(), rootBlobs::get)),
updatedRepositoryData, ActionListener.map(l, ignored -> null))));
(l, v) -> cleanupStaleBlobs(foundIndices, rootBlobs, updatedRepositoryData, ActionListener.map(l, ignored -> null))));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
import java.util.Set;

import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;

Expand All @@ -57,6 +58,17 @@ public void testEqualsAndHashCode() {
assertEquals(repositoryData1.hashCode(), repositoryData2.hashCode());
}

public void testIndicesToUpdateAfterRemovingSnapshot() {
final RepositoryData repositoryData = generateRandomRepoData();
final List<IndexId> indicesBefore = new ArrayList<>(repositoryData.getIndices().values());
final SnapshotId randomSnapshot = randomFrom(repositoryData.getSnapshotIds());
final IndexId[] indicesToUpdate = indicesBefore.stream().filter(index -> {
final Set<SnapshotId> snapshotIds = repositoryData.getSnapshots(index);
return snapshotIds.contains(randomSnapshot) && snapshotIds.size() > 1;
}).toArray(IndexId[]::new);
assertThat(repositoryData.indicesToUpdateAfterRemovingSnapshot(randomSnapshot), containsInAnyOrder(indicesToUpdate));
}

public void testXContent() throws IOException {
RepositoryData repositoryData = generateRandomRepoData();
XContentBuilder builder = JsonXContent.contentBuilder();
Expand Down

0 comments on commit 938de5c

Please sign in to comment.