-
Notifications
You must be signed in to change notification settings - Fork 3.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
HBASE-28568 Fix incremental backup set shrinking #5876
Changes from 3 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -93,7 +93,6 @@ public BackupInfo getBackupInfo(String backupId) throws IOException { | |
public int deleteBackups(String[] backupIds) throws IOException { | ||
|
||
int totalDeleted = 0; | ||
Map<String, HashSet<TableName>> allTablesMap = new HashMap<>(); | ||
|
||
boolean deleteSessionStarted; | ||
boolean snapshotDone; | ||
|
@@ -129,20 +128,15 @@ public int deleteBackups(String[] backupIds) throws IOException { | |
} | ||
snapshotDone = true; | ||
try { | ||
List<String> affectedBackupRootDirs = new ArrayList<>(); | ||
for (int i = 0; i < backupIds.length; i++) { | ||
BackupInfo info = sysTable.readBackupInfo(backupIds[i]); | ||
if (info != null) { | ||
DieterDP-ng marked this conversation as resolved.
Show resolved
Hide resolved
|
||
String rootDir = info.getBackupRootDir(); | ||
HashSet<TableName> allTables = allTablesMap.get(rootDir); | ||
if (allTables == null) { | ||
allTables = new HashSet<>(); | ||
allTablesMap.put(rootDir, allTables); | ||
} | ||
allTables.addAll(info.getTableNames()); | ||
affectedBackupRootDirs.add(info.getBackupRootDir()); | ||
totalDeleted += deleteBackup(backupIds[i], sysTable); | ||
} | ||
} | ||
finalizeDelete(allTablesMap, sysTable); | ||
finalizeDelete(affectedBackupRootDirs, sysTable); | ||
// Finish | ||
sysTable.finishDeleteOperation(); | ||
// delete snapshot | ||
|
@@ -175,26 +169,23 @@ public int deleteBackups(String[] backupIds) throws IOException { | |
|
||
/** | ||
* Updates incremental backup set for every backupRoot | ||
* @param tablesMap map [backupRoot: {@code Set<TableName>}] | ||
* @param table backup system table | ||
* @param backupRoots backupRoots for which to revise the incremental backup set | ||
* @param table backup system table | ||
* @throws IOException if a table operation fails | ||
*/ | ||
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table) | ||
private void finalizeDelete(List<String> backupRoots, BackupSystemTable table) | ||
throws IOException { | ||
for (String backupRoot : tablesMap.keySet()) { | ||
for (String backupRoot : backupRoots) { | ||
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot); | ||
Map<TableName, ArrayList<BackupInfo>> tableMap = | ||
Map<TableName, List<BackupInfo>> tableMap = | ||
table.getBackupHistoryForTableSet(incrTableSet, backupRoot); | ||
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) { | ||
if (entry.getValue() == null) { | ||
// No more backups for a table | ||
incrTableSet.remove(entry.getKey()); | ||
} | ||
} | ||
|
||
// Keep only the tables that are present in other backups | ||
incrTableSet.retainAll(tableMap.keySet()); | ||
|
||
table.deleteIncrementalBackupTableSet(backupRoot); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This API on |
||
if (!incrTableSet.isEmpty()) { | ||
table.addIncrementalBackupTableSet(incrTableSet, backupRoot); | ||
} else { // empty | ||
table.deleteIncrementalBackupTableSet(backupRoot); | ||
} | ||
} | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ | |
*/ | ||
package org.apache.hadoop.hbase.backup; | ||
|
||
import static org.junit.Assert.assertEquals; | ||
import static org.junit.Assert.assertTrue; | ||
|
||
import java.io.ByteArrayOutputStream; | ||
|
@@ -30,6 +31,7 @@ | |
import org.apache.hadoop.hbase.testclassification.LargeTests; | ||
import org.apache.hadoop.hbase.util.EnvironmentEdge; | ||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; | ||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should be using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
import org.apache.hadoop.util.ToolRunner; | ||
import org.junit.Assert; | ||
import org.junit.ClassRule; | ||
|
@@ -158,4 +160,27 @@ public long currentTime() { | |
LOG.info(baos.toString()); | ||
assertTrue(output.indexOf("Deleted 1 backups") >= 0); | ||
} | ||
|
||
/** | ||
* Verify that backup deletion updates the incremental-backup-set. | ||
*/ | ||
@Test | ||
public void testBackupDeleteUpdatesIncrementalBackupSet() throws Exception { | ||
LOG.info("Test backup delete updates the incremental backup set"); | ||
BackupSystemTable backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection()); | ||
|
||
String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2)); | ||
assertTrue(checkSucceeded(backupId1)); | ||
assertEquals(Sets.newHashSet(table1, table2), | ||
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); | ||
|
||
String backupId2 = fullTableBackup(Lists.newArrayList(table3)); | ||
assertTrue(checkSucceeded(backupId2)); | ||
assertEquals(Sets.newHashSet(table1, table2, table3), | ||
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); | ||
|
||
getBackupAdmin().deleteBackups(new String[] { backupId1 }); | ||
assertEquals(Sets.newHashSet(table3), | ||
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR)); | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That snapshot logic looks problematic. Is it okay to continue using an existing snapshot? If an existing snapshot exists then we don't know what it contains and this operation should fail. Separate issue I guess?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I agree it could be problematic. Perhaps the backup repair takes care of this kind of situations, I'm not sure.
But this is a separate issue indeed.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
FYI @rmdmattingly this is one to look into.