diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 80c5242a1060..f2a8e00fea5e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -154,6 +154,8 @@ static final class Options { "Number of mappers to use during the copy (mapreduce.job.maps)."); static final Option BANDWIDTH = new Option(null, "bandwidth", true, "Limit bandwidth to this value in MB/second."); + static final Option RESET_TTL = + new Option(null, "reset-ttl", false, "Do not copy TTL for the snapshot"); } // Export Map-Reduce Counters, to keep track of the progress @@ -931,6 +933,7 @@ private void setPermissionParallel(final FileSystem outputFs, final short filesM private int bandwidthMB = Integer.MAX_VALUE; private int filesMode = 0; private int mappers = 0; + private boolean resetTtl = false; @Override protected void processOptions(CommandLine cmd) { @@ -952,6 +955,7 @@ protected void processOptions(CommandLine cmd) { verifyChecksum = !cmd.hasOption(Options.NO_CHECKSUM_VERIFY.getLongOpt()); verifyTarget = !cmd.hasOption(Options.NO_TARGET_VERIFY.getLongOpt()); verifySource = !cmd.hasOption(Options.NO_SOURCE_VERIFY.getLongOpt()); + resetTtl = cmd.hasOption(Options.RESET_TTL.getLongOpt()); } /** @@ -1089,11 +1093,19 @@ public int doWork() throws IOException { } } - // Write a new .snapshotinfo if the target name is different from the source name - if (!targetName.equals(snapshotName)) { - SnapshotDescription snapshotDesc = SnapshotDescriptionUtils - .readSnapshotInfo(inputFs, snapshotDir).toBuilder().setName(targetName).build(); - SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDesc, initialOutputSnapshotDir, outputFs); + // Write a new .snapshotinfo if the target name is different from the source name or we want to + // reset TTL for target snapshot. + if (!targetName.equals(snapshotName) || resetTtl) { + SnapshotDescription.Builder snapshotDescBuilder = + SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir).toBuilder(); + if (!targetName.equals(snapshotName)) { + snapshotDescBuilder.setName(targetName); + } + if (resetTtl) { + snapshotDescBuilder.setTtl(HConstants.DEFAULT_SNAPSHOT_TTL); + } + SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDescBuilder.build(), + initialOutputSnapshotDir, outputFs); if (filesUser != null || filesGroup != null) { outputFs.setOwner( new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, @@ -1169,6 +1181,7 @@ protected void addOptions() { addOption(Options.CHMOD); addOption(Options.MAPPERS); addOption(Options.BANDWIDTH); + addOption(Options.RESET_TTL); } public static void main(String[] args) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index c49bf218743f..4dcadc755da3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -24,9 +24,12 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; @@ -230,6 +233,37 @@ public void testExportWithTargetName() throws Exception { testExportFileSystemState(tableName, snapshotName, targetName, tableNumFiles); } + @Test + public void testExportWithResetTtl() throws Exception { + String name = "testExportWithResetTtl"; + TableName tableName = TableName.valueOf(name); + String snapshotName = "snaptb-" + name; + Long ttl = 100000L; + + try { + // create Table + createTable(tableName); + SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY); + int tableNumFiles = admin.getRegions(tableName).size(); + // take a snapshot with TTL + Map props = new HashMap<>(); + props.put("TTL", ttl); + admin.snapshot(snapshotName, tableName, props); + Optional ttlOpt = + admin.listSnapshots().stream().filter(s -> s.getName().equals(snapshotName)) + .map(org.apache.hadoop.hbase.client.SnapshotDescription::getTtl).findAny(); + assertTrue(ttlOpt.isPresent()); + assertEquals(ttl, ttlOpt.get()); + + testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, + getHdfsDestinationDir(), false, true); + } catch (Exception e) { + throw e; + } finally { + TEST_UTIL.deleteTable(tableName); + } + } + private void testExportFileSystemState(final TableName tableName, final String snapshotName, final String targetName, int filesExpected) throws Exception { testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, @@ -238,8 +272,15 @@ private void testExportFileSystemState(final TableName tableName, final String s protected void testExportFileSystemState(final TableName tableName, final String snapshotName, final String targetName, int filesExpected, Path copyDir, boolean overwrite) throws Exception { + testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, copyDir, + overwrite, false); + } + + protected void testExportFileSystemState(final TableName tableName, final String snapshotName, + final String targetName, int filesExpected, Path copyDir, boolean overwrite, boolean resetTtl) + throws Exception { testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, - filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, + filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, resetTtl, getBypassRegionPredicate(), true); } @@ -249,7 +290,8 @@ protected void testExportFileSystemState(final TableName tableName, final String protected static void testExportFileSystemState(final Configuration conf, final TableName tableName, final String snapshotName, final String targetName, final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, - final RegionPredicate bypassregionPredicate, boolean success) throws Exception { + final boolean resetTtl, final RegionPredicate bypassregionPredicate, boolean success) + throws Exception { FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); @@ -267,6 +309,9 @@ protected static void testExportFileSystemState(final Configuration conf, if (overwrite) { opts.add("--overwrite"); } + if (resetTtl) { + opts.add("--reset-ttl"); + } // Export Snapshot int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); @@ -295,7 +340,7 @@ protected static void testExportFileSystemState(final Configuration conf, final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName); verifySnapshotDir(srcFs, new Path(srcDir, snapshotDir), tgtFs, new Path(tgtDir, targetDir)); Set snapshotFiles = - verifySnapshot(conf, tgtFs, tgtDir, tableName, targetName, bypassregionPredicate); + verifySnapshot(conf, tgtFs, tgtDir, tableName, targetName, resetTtl, bypassregionPredicate); assertEquals(filesExpected, snapshotFiles.size()); } @@ -312,7 +357,7 @@ protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, */ protected static Set verifySnapshot(final Configuration conf, final FileSystem fs, final Path rootDir, final TableName tableName, final String snapshotName, - final RegionPredicate bypassregionPredicate) throws IOException { + final boolean resetTtl, final RegionPredicate bypassregionPredicate) throws IOException { final Path exportedSnapshot = new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Set snapshotFiles = new HashSet<>(); @@ -354,6 +399,9 @@ private void verifyNonEmptyFile(final Path path) throws IOException { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, exportedSnapshot); assertTrue(desc.getName().equals(snapshotName)); assertTrue(desc.getTable().equals(tableName.getNameAsString())); + if (resetTtl) { + assertEquals(HConstants.DEFAULT_SNAPSHOT_TTL, desc.getTtl()); + } return snapshotFiles; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java index e51ba7da5707..a2db1c688207 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java @@ -151,7 +151,7 @@ public void testExportRetry() throws Exception { conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); conf.setInt("mapreduce.map.maxattempts", 3); TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, true); + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, false, null, true); } /** @@ -167,6 +167,6 @@ public void testExportFailure() throws Exception { conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); conf.setInt("mapreduce.map.maxattempts", 3); TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false); + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, false, null, false); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index 6ae20bc4f4f6..19496fcfe414 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -125,7 +125,7 @@ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, TableName tableName = builder.getTableDescriptor().getTableName(); TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, snapshotName, snapshotName, snapshotFilesCount, testDir, - getDestinationDir(fs, testUtil, testDir), false, null, true); + getDestinationDir(fs, testUtil, testDir), false, false, null, true); } static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtil hctu, Path testDir)