Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HBASE-22776 Rename config names in user scan snapshot feature #440

Merged
merged 1 commit into from
Aug 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclCleaner;
import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
Expand Down Expand Up @@ -1123,6 +1125,10 @@ private void checkSnapshotSupport(final Configuration conf, final MasterFileSyst
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
// If sync acl to HDFS feature is enabled, then inject the cleaner
if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) {
hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName());
}

// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.yetus.audience.InterfaceAudience;
Expand Down Expand Up @@ -59,7 +58,7 @@ public void init(Map<String, Object> params) {
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
userScanSnapshotEnabled = isUserScanSnapshotEnabled(conf);
userScanSnapshotEnabled = SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf);
}

@Override
Expand All @@ -82,13 +81,6 @@ public boolean isEmptyDirDeletable(Path dir) {
return true;
}

private boolean isUserScanSnapshotEnabled(Configuration conf) {
String masterCoprocessors = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
return conf.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)
&& masterCoprocessors.contains(SnapshotScannerHDFSAclController.class.getName())
&& masterCoprocessors.contains(AccessController.class.getName());
}

private boolean isEmptyArchiveDirDeletable(Path dir) {
try {
if (isArchiveDataDir(dir)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public Optional<MasterObserver> getMasterObserver() {
public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> c)
throws IOException {
if (c.getEnvironment().getConfiguration()
.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)) {
.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) {
MasterCoprocessorEnvironment mEnv = c.getEnvironment();
if (!(mEnv instanceof HasMasterServices)) {
throw new IOException("Does not implement HMasterServices");
Expand All @@ -133,7 +133,7 @@ public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment
userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration());
} else {
LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure "
+ "because the config " + SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE
+ "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE
+ " is false.");
}
}
Expand Down Expand Up @@ -213,7 +213,9 @@ public void postCompletedSnapshotAction(ObserverContext<MasterCoprocessorEnviron
public void postCompletedTruncateTableAction(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) throws IOException {
if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) {
// Since the table directories is recreated, so add HDFS acls again
// 1. create tmp table directories
hdfsAclHelper.createTableDirectories(tableName);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why create the table directories here ? I guess the TruncateTableProcedure will create the dir ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The tmp table dir is deleted after truncate table, so recreate this directory.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So please add the comment to say : it's mainly used for creating tmp table dir here because ...

// 2. Since the table directories is recreated, so add HDFS acls again
Set<String> users = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false);
hdfsAclHelper.addTableAcl(tableName, users, "truncate");
}
Expand All @@ -233,9 +235,11 @@ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
Set<String> users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName);
// 1. Delete table owner permission is synced to HDFS in acl table
// 1. Remove table archive directory default ACLs
hdfsAclHelper.removeTableDefaultAcl(tableName, users);
openinx marked this conversation as resolved.
Show resolved Hide resolved
// 2. Delete table owner permission is synced to HDFS in acl table
SnapshotScannerHDFSAclStorage.deleteTableHdfsAcl(aclTable, tableName);
// 2. Remove namespace access acls
// 3. Remove namespace access acls
Set<String> removeUsers = filterUsersToRemoveNsAccessAcl(aclTable, tableName, users);
if (removeUsers.size() > 0) {
hdfsAclHelper.removeNamespaceAccessAcl(tableName, removeUsers, "delete");
Expand All @@ -251,7 +255,7 @@ public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName)
&& !hdfsAclHelper.isTableUserScanSnapshotEnabled(oldDescriptor)) {
&& !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) {
// 1. Create table directories used for acl inherited
hdfsAclHelper.createTableDirectories(tableName);
// 2. Add table users HDFS acls
Expand All @@ -264,7 +268,7 @@ public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(),
tableUsers, tableName);
} else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName)
&& !hdfsAclHelper.isTableUserScanSnapshotEnabled(currentDescriptor)) {
&& !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) {
// 1. Remove empty table directories
List<Path> tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false);
for (Path path : tableRootPaths) {
Expand All @@ -290,17 +294,24 @@ public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
String namespace) throws IOException {
if (checkInitialized("deleteNamespace " + namespace)) {
// 1. Record namespace user acl is not synced to HDFS
SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
namespace);
// 2. Delete tmp namespace directory
/**
* Delete namespace tmp directory because it's created by this coprocessor when namespace is
* created to make namespace default acl can be inherited by tables. The namespace data
* directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is
* deleted by HFileCleaner.
*/
hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
// 1. Delete namespace archive dir default ACLs
Set<String> users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable,
PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace)));
hdfsAclHelper.removeNamespaceDefaultAcl(namespace, users);
openinx marked this conversation as resolved.
Show resolved Hide resolved
// 2. Record namespace user acl is not synced to HDFS
SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
namespace);
// 3. Delete tmp namespace directory
/**
* Delete namespace tmp directory because it's created by this coprocessor when namespace is
* created to make namespace default acl can be inherited by tables. The namespace data
* directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is
* deleted by HFileCleaner.
*/
hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
}
}
}

Expand Down Expand Up @@ -364,7 +375,9 @@ public void postGrant(ObserverContext<MasterCoprocessorEnvironment> c,
UserPermission tPerm = getUserTablePermission(conf, userName, tableName);
if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) {
if (!isHdfsAclSet(aclTable, userName, tableName)) {
// 1. Add HDFS acl
// 1. create table dirs
hdfsAclHelper.createTableDirectories(tableName);
// 2. Add HDFS acl
hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0));
}
// 2. Record table acl is synced to HDFS
Expand Down Expand Up @@ -547,13 +560,13 @@ private boolean needHandleTableHdfsAcl(TablePermission tablePermission) throws I

private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException {
return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper
.isTableUserScanSnapshotEnabled(masterServices.getTableDescriptors().get(tableName));
.isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName));
}

private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) {
TableName tableName = tableDescriptor.getTableName();
return !tableName.isSystemTable() && checkInitialized(operation)
&& hdfsAclHelper.isTableUserScanSnapshotEnabled(tableDescriptor);
&& hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor);
}

private User getActiveUser(ObserverContext<?> ctx) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
Expand All @@ -53,6 +54,7 @@
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
Expand All @@ -71,23 +73,23 @@
public class SnapshotScannerHDFSAclHelper implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);

public static final String USER_SCAN_SNAPSHOT_ENABLE = "hbase.user.scan.snapshot.enable";
public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER =
"hbase.user.scan.snapshot.thread.number";
public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

better to name it as 'hbase.table.acl.sync.to.hdfs.enable' ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The steps to config this feature is as followings:

  1. Check HDFS configuration
  2. Add master coprocessor:
    hbase.coprocessor.master.classes =
    “org.apache.hadoop.hbase.security.access.AccessController
    ,org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController”
  3. Enable this feature:
    hbase.acl.sync.to.hdfs.enable=true
  4. Modify table scheme to enable this feature:
    alter 't1', CONFIGURATION => {'hbase.acl.sync.to.hdfs.enable' => 'true'}

So this config has two usages in both step 3 and 4.
If step3 is set, the acl will aync to HDFS if grant namespace or global.
Maybe 'hbase.table.acl.sync.to.hdfs.enable' is confused because global and namespace acls are also synced?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK

public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
"hbase.acl.sync.to.hdfs.thread.number";
// The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
"/hbase/.tmpdir-to-restore-snapshot";
// The default permission of the common directories if the feature is enabled.
public static final String COMMON_DIRECTORY_PERMISSION =
"hbase.user.scan.snapshot.common.directory.permission";
"hbase.acl.sync.to.hdfs.common.directory.permission";
// The secure HBase permission is 700, 751 means all others have execute access and the mask is
// set to read-execute to make the extended access ACL entries can work. Be cautious to set
// this value.
public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
// The default permission of the snapshot restore directories if the feature is enabled.
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
"hbase.user.scan.snapshot.restore.directory.permission";
"hbase.acl.sync.to.hdfs.restore.directory.permission";
// 753 means all others have write-execute access.
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";

Expand All @@ -102,7 +104,7 @@ public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection conn
this.conf = configuration;
this.pathHelper = new PathHelper(conf);
this.fs = pathHelper.getFileSystem();
this.pool = Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10),
this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
this.admin = connection.getAdmin();
}
Expand Down Expand Up @@ -230,6 +232,50 @@ public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeU
}
}

/**
* Remove default acl from namespace archive dir when delete namespace
* @param namespace the namespace
* @param removeUsers the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
try {
long start = System.currentTimeMillis();
Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
System.currentTimeMillis() - start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
return false;
}
}

/**
* Remove default acl from table archive dir when delete table
* @param tableName the table name
* @param removeUsers the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
try {
long start = System.currentTimeMillis();
Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
System.currentTimeMillis() - start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
return false;
}
}

/**
* Add table user acls
* @param tableName the table
Expand Down Expand Up @@ -349,7 +395,7 @@ private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
Set<TableName> tables = new HashSet<>();
for (String namespace : namespaces) {
tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
.filter(this::isTableUserScanSnapshotEnabled).map(TableDescriptor::getTableName)
.filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
.collect(Collectors.toSet()));
}
handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
Expand Down Expand Up @@ -403,7 +449,7 @@ void createTableDirectories(TableName tableName) throws IOException {
* return paths that user will global permission will visit
* @return the path list
*/
private List<Path> getGlobalRootPaths() {
List<Path> getGlobalRootPaths() {
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
}
Expand Down Expand Up @@ -511,9 +557,20 @@ boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
}

boolean isTableUserScanSnapshotEnabled(TableDescriptor tableDescriptor) {
public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
Set<String> masterCoprocessorSet = new HashSet<>();
if (masterCoprocessors != null) {
Collections.addAll(masterCoprocessorSet, masterCoprocessors);
}
return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
&& masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
&& masterCoprocessorSet.contains(AccessController.class.getName());
}

boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
return tableDescriptor == null ? false
: Boolean.valueOf(tableDescriptor.getValue(USER_SCAN_SNAPSHOT_ENABLE));
: Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
}

PathHelper getPathHelper() {
Expand Down
Loading