diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml index ba5e7514af03..983bdae5d49e 100644 --- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml +++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml @@ -40,6 +40,7 @@ + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index c51d4937a143..f93c3c0ff0ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -21,12 +21,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,14 +68,14 @@ public HFileArchiveManager enableHFileBackup(byte[] table) throws KeeperExceptio /** * Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next - * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another - * cleaner. + * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are + * retained by another cleaner. * @param table name of the table for which to disable hfile retention. * @return this for chaining. * @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner. */ public HFileArchiveManager disableHFileBackup(byte[] table) throws KeeperException { - disable(this.zooKeeper, table); + disable(this.zooKeeper, table); return this; } @@ -95,17 +95,16 @@ public HFileArchiveManager disableHFileBackup() throws IOException { } /** - * Perform a best effort enable of hfile retention, which relies on zookeeper communicating the // - * * change back to the hfile cleaner. + * Perform a best effort enable of hfile retention, which relies on zookeeper communicating the + * change back to the hfile cleaner. *

* No attempt is made to make sure that backups are successfully created - it is inherently an * asynchronous operation. * @param zooKeeper watcher connection to zk cluster * @param table table name on which to enable archiving - * @throws KeeperException + * @throws KeeperException if a ZooKeeper operation fails */ - private void enable(ZKWatcher zooKeeper, byte[] table) - throws KeeperException { + private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { LOG.debug("Ensuring archiving znode exists"); ZKUtil.createAndFailSilent(zooKeeper, archiveZnode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java index 484ff5ea1748..9212063f88d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java @@ -19,7 +19,6 @@ import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -27,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,7 +35,7 @@ * {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is * currently being archived. *

- * This only works properly if the + * This only works properly if the * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} * is also enabled (it always should be), since it may take a little time * for the ZK notification to propagate, in which case we may accidentally @@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { public boolean isFileDeletable(FileStatus fStat) { try { // if its a directory, then it can be deleted - if (fStat.isDirectory()) return true; + if (fStat.isDirectory()) { + return true; + } Path file = fStat.getPath(); // check to see if FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null); // if the file doesn't exist, then it can be deleted (but should never // happen since deleted files shouldn't get passed in) - if (deleteStatus == null) return true; + if (deleteStatus == null) { + return true; + } // otherwise, we need to check the file's table and see its being archived Path family = file.getParent(); @@ -69,7 +73,8 @@ public boolean isFileDeletable(FileStatus fStat) { String tableName = table.getName(); boolean ret = !archiveTracker.keepHFiles(tableName); - LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName); + LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + + tableName); return ret; } catch (IOException e) { LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e); @@ -97,13 +102,14 @@ public void setConf(Configuration config) { @Override public void stop(String reason) { - if (this.isStopped()) return; + if (this.isStopped()) { + return; + } + super.stop(reason); if (this.archiveTracker != null) { LOG.info("Stopping " + this.archiveTracker); this.archiveTracker.stop(); } - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java index 1b3b775b2a46..49b0e827758b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java @@ -20,12 +20,12 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +38,7 @@ * archive. */ @InterfaceAudience.Private -public class TableHFileArchiveTracker extends ZKListener { +public final class TableHFileArchiveTracker extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class); public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive"; private HFileArchiveTableMonitor monitor; @@ -67,7 +67,9 @@ public void start() throws KeeperException { @Override public void nodeCreated(String path) { // if it is the archive path - if (!path.startsWith(archiveHFileZNode)) return; + if (!path.startsWith(archiveHFileZNode)) { + return; + } LOG.debug("Archive node: " + path + " created"); // since we are already enabled, just update a single table @@ -75,7 +77,6 @@ public void nodeCreated(String path) { // the top level node has come up, so read in all the tables if (table.length() == 0) { - checkEnabledAndUpdate(); return; } @@ -90,7 +91,9 @@ public void nodeCreated(String path) { @Override public void nodeChildrenChanged(String path) { - if (!path.startsWith(archiveHFileZNode)) return; + if (!path.startsWith(archiveHFileZNode)) { + return; + } LOG.debug("Archive node: " + path + " children changed."); // a table was added to the archive @@ -134,7 +137,9 @@ private void safeStopTrackingTable(String tableZnode) throws KeeperException { @Override public void nodeDeleted(String path) { - if (!path.startsWith(archiveHFileZNode)) return; + if (!path.startsWith(archiveHFileZNode)) { + return; + } LOG.debug("Archive node: " + path + " deleted"); String table = path.substring(archiveHFileZNode.length()); @@ -260,7 +265,10 @@ public ZKWatcher getZooKeeperWatcher() { * Stop this tracker and the passed zookeeper */ public void stop() { - if (this.stopped) return; + if (this.stopped) { + return; + } + this.stopped = true; this.watcher.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index 142788e7618a..bc535b7db80f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; /** @@ -110,7 +109,7 @@ public void disableHFileBackup() throws IOException, KeeperException { * @param table name of the table to check * @return true if it is, false otherwise * @throws IOException if a connection to ZooKeeper cannot be established - * @throws KeeperException + * @throws KeeperException if a ZooKeeper operation fails */ public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException { HFileArchiveManager manager = createHFileArchiveManager(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 16f3930e3d4d..ca17633b720b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -269,8 +269,11 @@ public void testMultipleTables() throws Exception { for (Path file : files) { String tableName = file.getParent().getParent().getParent().getName(); // check to which table this file belongs - if (tableName.equals(otherTable)) initialCountForOtherTable++; - else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++; + if (tableName.equals(otherTable)) { + initialCountForOtherTable++; + } else if (tableName.equals(STRING_TABLE_NAME)) { + initialCountForPrimary++; + } } assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0); @@ -293,11 +296,13 @@ public void testMultipleTables() throws Exception { String tableName = file.getParent().getParent().getParent().getName(); // ensure we don't have files from the non-archived table assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable)); - if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++; + if (tableName.equals(STRING_TABLE_NAME)) { + archivedForPrimary++; + } } - assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, - archivedForPrimary); + assertEquals("Not all archived files for the primary table were retained.", + initialCountForPrimary, archivedForPrimary); // but we still have the archive directory assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir)); @@ -374,7 +379,10 @@ public Iterable answer(InvocationOnMock invocation) throws Throwable @SuppressWarnings("unchecked") Iterable ret = (Iterable) invocation.callRealMethod(); - if (counter[0] >= expected) finished.countDown(); + if (counter[0] >= expected) { + finished.countDown(); + } + return ret; } }).when(delegateSpy).getDeletableFiles(Mockito.anyListOf(FileStatus.class)); @@ -399,7 +407,11 @@ private List getAllFiles(FileSystem fs, Path dir) throws IOException { for (FileStatus file : files) { if (file.isDirectory()) { List subFiles = getAllFiles(fs, file.getPath()); - if (subFiles != null) allFiles.addAll(subFiles); + + if (subFiles != null) { + allFiles.addAll(subFiles); + } + continue; } allFiles.add(file.getPath()); @@ -426,7 +438,7 @@ private void loadFlushAndCompact(HRegion region, byte[] family) throws IOExcepti * Create a new hfile in the passed region * @param region region to operate on * @param columnFamily family for which to add data - * @throws IOException + * @throws IOException if doing the put or flush fails */ private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException { // put one row in the region @@ -438,7 +450,7 @@ private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOE } /** - * @param cleaner + * @param cleaner the cleaner to use */ private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop) throws InterruptedException {