Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HBASE-22594 Clean up for backup examples #315

Merged
merged 1 commit into from
Jul 25, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestServletFilter.java"/>
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestGlobalFilter.java"/>
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestPathFilter.java"/>
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.backup.example.TestZooKeeperTableArchiveClient.java"/>
<suppress checks="EqualsHashCode" files="org.apache.hadoop.hbase.favored.StartcodeAgnosticServerName.java"/>
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.tool.coprocessor.Branch1CoprocessorMethods.java"/>
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -68,14 +68,14 @@ public HFileArchiveManager enableHFileBackup(byte[] table) throws KeeperExceptio

/**
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another
* cleaner.
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are
* retained by another cleaner.
* @param table name of the table for which to disable hfile retention.
* @return <tt>this</tt> for chaining.
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
*/
public HFileArchiveManager disableHFileBackup(byte[] table) throws KeeperException {
disable(this.zooKeeper, table);
disable(this.zooKeeper, table);
return this;
}

Expand All @@ -95,17 +95,16 @@ public HFileArchiveManager disableHFileBackup() throws IOException {
}

/**
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the //
* * change back to the hfile cleaner.
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the
* change back to the hfile cleaner.
* <p>
* No attempt is made to make sure that backups are successfully created - it is inherently an
* <b>asynchronous operation</b>.
* @param zooKeeper watcher connection to zk cluster
* @param table table name on which to enable archiving
* @throws KeeperException
* @throws KeeperException if a ZooKeeper operation fails
*/
private void enable(ZKWatcher zooKeeper, byte[] table)
throws KeeperException {
private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
LOG.debug("Ensuring archiving znode exists");
ZKUtil.createAndFailSilent(zooKeeper, archiveZnode);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@

import java.io.IOException;

import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -35,7 +35,7 @@
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
* currently being archived.
* <p>
* This only works properly if the
* This only works properly if the
* {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
* is also enabled (it always should be), since it may take a little time
* for the ZK notification to propagate, in which case we may accidentally
Expand All @@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
public boolean isFileDeletable(FileStatus fStat) {
try {
// if its a directory, then it can be deleted
if (fStat.isDirectory()) return true;
if (fStat.isDirectory()) {
return true;
}

Path file = fStat.getPath();
// check to see if
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
// if the file doesn't exist, then it can be deleted (but should never
// happen since deleted files shouldn't get passed in)
if (deleteStatus == null) return true;
if (deleteStatus == null) {
return true;
}

// otherwise, we need to check the file's table and see its being archived
Path family = file.getParent();
Expand All @@ -69,7 +73,8 @@ public boolean isFileDeletable(FileStatus fStat) {

String tableName = table.getName();
boolean ret = !archiveTracker.keepHFiles(tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
tableName);
return ret;
} catch (IOException e) {
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
Expand Down Expand Up @@ -97,13 +102,14 @@ public void setConf(Configuration config) {

@Override
public void stop(String reason) {
if (this.isStopped()) return;
if (this.isStopped()) {
return;
}

super.stop(reason);
if (this.archiveTracker != null) {
LOG.info("Stopping " + this.archiveTracker);
this.archiveTracker.stop();
}

}

}
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@
import java.io.IOException;
import java.util.List;

import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKListener;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -38,7 +38,7 @@
* archive.
*/
@InterfaceAudience.Private
public class TableHFileArchiveTracker extends ZKListener {
public final class TableHFileArchiveTracker extends ZKListener {
private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class);
public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive";
private HFileArchiveTableMonitor monitor;
Expand Down Expand Up @@ -67,15 +67,16 @@ public void start() throws KeeperException {
@Override
public void nodeCreated(String path) {
// if it is the archive path
if (!path.startsWith(archiveHFileZNode)) return;
if (!path.startsWith(archiveHFileZNode)) {
return;
}

LOG.debug("Archive node: " + path + " created");
// since we are already enabled, just update a single table
String table = path.substring(archiveHFileZNode.length());

// the top level node has come up, so read in all the tables
if (table.length() == 0) {

checkEnabledAndUpdate();
return;
}
Expand All @@ -90,7 +91,9 @@ public void nodeCreated(String path) {

@Override
public void nodeChildrenChanged(String path) {
if (!path.startsWith(archiveHFileZNode)) return;
if (!path.startsWith(archiveHFileZNode)) {
return;
}

LOG.debug("Archive node: " + path + " children changed.");
// a table was added to the archive
Expand Down Expand Up @@ -134,7 +137,9 @@ private void safeStopTrackingTable(String tableZnode) throws KeeperException {

@Override
public void nodeDeleted(String path) {
if (!path.startsWith(archiveHFileZNode)) return;
if (!path.startsWith(archiveHFileZNode)) {
return;
}

LOG.debug("Archive node: " + path + " deleted");
String table = path.substring(archiveHFileZNode.length());
Expand Down Expand Up @@ -260,7 +265,10 @@ public ZKWatcher getZooKeeperWatcher() {
* Stop this tracker and the passed zookeeper
*/
public void stop() {
if (this.stopped) return;
if (this.stopped) {
return;
}

this.stopped = true;
this.watcher.close();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup.example;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.Connection;
Expand Down Expand Up @@ -109,7 +110,7 @@ public void disableHFileBackup() throws IOException, KeeperException {
* @param table name of the table to check
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
* @throws IOException if a connection to ZooKeeper cannot be established
* @throws KeeperException
* @throws KeeperException if a ZooKeeper operation fails
*/
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
HFileArchiveManager manager = createHFileArchiveManager();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,11 @@ public void testMultipleTables() throws Exception {
for (Path file : files) {
String tableName = file.getParent().getParent().getParent().getName();
// check to which table this file belongs
if (tableName.equals(otherTable)) initialCountForOtherTable++;
else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
if (tableName.equals(otherTable)) {
initialCountForOtherTable++;
} else if (tableName.equals(STRING_TABLE_NAME)) {
initialCountForPrimary++;
}
}

assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
Expand All @@ -308,11 +311,13 @@ public void testMultipleTables() throws Exception {
String tableName = file.getParent().getParent().getParent().getName();
// ensure we don't have files from the non-archived table
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++;
if (tableName.equals(STRING_TABLE_NAME)) {
archivedForPrimary++;
}
}

assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary,
archivedForPrimary);
assertEquals("Not all archived files for the primary table were retained.",
initialCountForPrimary, archivedForPrimary);

// but we still have the archive directory
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
Expand Down Expand Up @@ -389,7 +394,10 @@ public Iterable<FileStatus> answer(InvocationOnMock invocation) throws Throwable

@SuppressWarnings("unchecked")
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
if (counter[0] >= expected) finished.countDown();
if (counter[0] >= expected) {
finished.countDown();
}

return ret;
}
}).when(delegateSpy).getDeletableFiles(Mockito.anyList());
Expand All @@ -414,7 +422,11 @@ private List<Path> getAllFiles(FileSystem fs, Path dir) throws IOException {
for (FileStatus file : files) {
if (file.isDirectory()) {
List<Path> subFiles = getAllFiles(fs, file.getPath());
if (subFiles != null) allFiles.addAll(subFiles);

if (subFiles != null) {
allFiles.addAll(subFiles);
}

continue;
}
allFiles.add(file.getPath());
Expand All @@ -441,7 +453,7 @@ private void loadFlushAndCompact(HRegion region, byte[] family) throws IOExcepti
* Create a new hfile in the passed region
* @param region region to operate on
* @param columnFamily family for which to add data
* @throws IOException
* @throws IOException if doing the put or flush fails
*/
private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
// put one row in the region
Expand All @@ -453,7 +465,7 @@ private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOE
}

/**
* @param cleaner
* @param cleaner the cleaner to use
*/
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
throws InterruptedException {
Expand Down