Skip to content

Commit

Permalink
HubSpot Backport: HBASE-28568 Incremental backup set does not correct…
Browse files Browse the repository at this point in the history
…ly shrink (apache#5876) (#97)

* HubSpot Backport: HBASE-28568 Incremental backup set does not correctly shrink (apache#5876)

The incremental backup set is the set of tables included when
an incremental backup is created, it is managed per backup
root dir and contains all tables that are present in at least
one backup (in that root dir).

The incremental backup set can only shrink when backups are
deleted. However, the implementation was incorrect, causing this
set to never be able to shrink.

Reviewed-by: Ray Mattingly <[email protected]>
Signed-off-by: Nick Dimiduk <[email protected]>

* HubSpot Backport: HBASE-28568 Incremental backup set does not correctly shrink (addendum) (apache#5917)

Import the correct shaded Guava and run spotless:apply.

Signed-off-by: Duo Zhang <[email protected]>

---------

Signed-off-by: Nick Dimiduk <[email protected]>
Signed-off-by: Duo Zhang <[email protected]>
Co-authored-by: DieterDP <[email protected]>
  • Loading branch information
ndimiduk and DieterDP-ng authored Jul 10, 2024
1 parent a6eaaff commit 4187f4b
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -94,7 +93,6 @@ public BackupInfo getBackupInfo(String backupId) throws IOException {
public int deleteBackups(String[] backupIds) throws IOException {

int totalDeleted = 0;
Map<String, HashSet<TableName>> allTablesMap = new HashMap<>();

boolean deleteSessionStarted;
boolean snapshotDone;
Expand Down Expand Up @@ -130,20 +128,16 @@ public int deleteBackups(String[] backupIds) throws IOException {
}
snapshotDone = true;
try {
List<String> affectedBackupRootDirs = new ArrayList<>();
for (int i = 0; i < backupIds.length; i++) {
BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
if (info != null) {
String rootDir = info.getBackupRootDir();
HashSet<TableName> allTables = allTablesMap.get(rootDir);
if (allTables == null) {
allTables = new HashSet<>();
allTablesMap.put(rootDir, allTables);
}
allTables.addAll(info.getTableNames());
totalDeleted += deleteBackup(backupIds[i], sysTable);
if (info == null) {
continue;
}
affectedBackupRootDirs.add(info.getBackupRootDir());
totalDeleted += deleteBackup(backupIds[i], sysTable);
}
finalizeDelete(allTablesMap, sysTable);
finalizeDelete(affectedBackupRootDirs, sysTable);
// Finish
sysTable.finishDeleteOperation();
// delete snapshot
Expand Down Expand Up @@ -176,26 +170,23 @@ public int deleteBackups(String[] backupIds) throws IOException {

/**
* Updates incremental backup set for every backupRoot
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
* @param table backup system table
* @param backupRoots backupRoots for which to revise the incremental backup set
* @param table backup system table
* @throws IOException if a table operation fails
*/
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
private void finalizeDelete(List<String> backupRoots, BackupSystemTable table)
throws IOException {
for (String backupRoot : tablesMap.keySet()) {
for (String backupRoot : backupRoots) {
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableMap =
Map<TableName, List<BackupInfo>> tableMap =
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
if (entry.getValue() == null) {
// No more backups for a table
incrTableSet.remove(entry.getKey());
}
}

// Keep only the tables that are present in other backups
incrTableSet.retainAll(tableMap.keySet());

table.deleteIncrementalBackupTableSet(backupRoot);
if (!incrTableSet.isEmpty()) {
table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
} else { // empty
table.deleteIncrementalBackupTableSet(backupRoot);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,9 @@
* <ul>
* <li>1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo</li>
* <li>2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode</li>
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]</li>
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-%3E last WAL
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; table="meta:"+tablename of
* include table; value=empty</li>
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-> last WAL
* timestamp]</li>
* <li>5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp</li>
* <li>6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file
Expand Down Expand Up @@ -842,23 +843,25 @@ public List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOExcept
return tableHistory;
}

public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
/**
* Goes through all backup history corresponding to the provided root folder, and collects all
* backup info mentioning each of the provided tables.
* @param set the tables for which to collect the {@code BackupInfo}
* @param backupRoot backup destination path to retrieve backup history for
* @return a map containing (a subset of) the provided {@code TableName}s, mapped to a list of at
* least one {@code BackupInfo}
* @throws IOException if getting the backup history fails
*/
public Map<TableName, List<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
String backupRoot) throws IOException {
List<BackupInfo> history = getBackupHistory(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
BackupInfo info = iterator.next();
if (!backupRoot.equals(info.getBackupRootDir())) {
continue;
}
Map<TableName, List<BackupInfo>> tableHistoryMap = new HashMap<>();
for (BackupInfo info : history) {
List<TableName> tables = info.getTableNames();
for (TableName tableName : tables) {
if (set.contains(tableName)) {
ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
if (list == null) {
list = new ArrayList<>();
tableHistoryMap.put(tableName, list);
}
List<BackupInfo> list =
tableHistoryMap.computeIfAbsent(tableName, k -> new ArrayList<>());
list.add(info);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.backup;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;

import java.io.ByteArrayOutputStream;
Expand All @@ -39,6 +40,7 @@
import org.slf4j.LoggerFactory;

import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;

@Category(LargeTests.class)
public class TestBackupDelete extends TestBackupBase {
Expand Down Expand Up @@ -158,4 +160,27 @@ public long currentTime() {
LOG.info(baos.toString());
assertTrue(output.indexOf("Deleted 1 backups") >= 0);
}

/**
* Verify that backup deletion updates the incremental-backup-set.
*/
@Test
public void testBackupDeleteUpdatesIncrementalBackupSet() throws Exception {
LOG.info("Test backup delete updates the incremental backup set");
BackupSystemTable backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection());

String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2));
assertTrue(checkSucceeded(backupId1));
assertEquals(Sets.newHashSet(table1, table2),
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));

String backupId2 = fullTableBackup(Lists.newArrayList(table3));
assertTrue(checkSucceeded(backupId2));
assertEquals(Sets.newHashSet(table1, table2, table3),
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));

getBackupAdmin().deleteBackups(new String[] { backupId1 });
assertEquals(Sets.newHashSet(table3),
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));
}
}

0 comments on commit 4187f4b

Please sign in to comment.