-
Notifications
You must be signed in to change notification settings - Fork 3.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
HBASE-22776 Rename config names in user scan snapshot feature #440
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,6 +28,7 @@ | |
import java.io.FileNotFoundException; | ||
import java.io.IOException; | ||
import java.util.ArrayList; | ||
import java.util.Collections; | ||
import java.util.HashSet; | ||
import java.util.List; | ||
import java.util.Map; | ||
|
@@ -53,6 +54,7 @@ | |
import org.apache.hadoop.hbase.client.Connection; | ||
import org.apache.hadoop.hbase.client.SnapshotDescription; | ||
import org.apache.hadoop.hbase.client.TableDescriptor; | ||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; | ||
import org.apache.hadoop.hbase.mob.MobUtils; | ||
import org.apache.hadoop.hbase.util.Bytes; | ||
import org.apache.yetus.audience.InterfaceAudience; | ||
|
@@ -71,23 +73,23 @@ | |
public class SnapshotScannerHDFSAclHelper implements Closeable { | ||
private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class); | ||
|
||
public static final String USER_SCAN_SNAPSHOT_ENABLE = "hbase.user.scan.snapshot.enable"; | ||
public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER = | ||
"hbase.user.scan.snapshot.thread.number"; | ||
public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable"; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. better to name it as 'hbase.table.acl.sync.to.hdfs.enable' ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The steps to config this feature is as followings:
So this config has two usages in both step 3 and 4. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK |
||
public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER = | ||
"hbase.acl.sync.to.hdfs.thread.number"; | ||
// The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir | ||
public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir"; | ||
public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT = | ||
"/hbase/.tmpdir-to-restore-snapshot"; | ||
// The default permission of the common directories if the feature is enabled. | ||
public static final String COMMON_DIRECTORY_PERMISSION = | ||
"hbase.user.scan.snapshot.common.directory.permission"; | ||
"hbase.acl.sync.to.hdfs.common.directory.permission"; | ||
// The secure HBase permission is 700, 751 means all others have execute access and the mask is | ||
// set to read-execute to make the extended access ACL entries can work. Be cautious to set | ||
// this value. | ||
public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751"; | ||
// The default permission of the snapshot restore directories if the feature is enabled. | ||
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION = | ||
"hbase.user.scan.snapshot.restore.directory.permission"; | ||
"hbase.acl.sync.to.hdfs.restore.directory.permission"; | ||
// 753 means all others have write-execute access. | ||
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753"; | ||
|
||
|
@@ -102,7 +104,7 @@ public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection conn | |
this.conf = configuration; | ||
this.pathHelper = new PathHelper(conf); | ||
this.fs = pathHelper.getFileSystem(); | ||
this.pool = Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10), | ||
this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10), | ||
new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build()); | ||
this.admin = connection.getAdmin(); | ||
} | ||
|
@@ -230,6 +232,50 @@ public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeU | |
} | ||
} | ||
|
||
/** | ||
* Remove default acl from namespace archive dir when delete namespace | ||
* @param namespace the namespace | ||
* @param removeUsers the users whose default acl will be removed | ||
* @return false if an error occurred, otherwise true | ||
*/ | ||
public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) { | ||
try { | ||
long start = System.currentTimeMillis(); | ||
Path archiveNsDir = pathHelper.getArchiveNsDir(namespace); | ||
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers, | ||
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); | ||
operation.handleAcl(); | ||
LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace, | ||
System.currentTimeMillis() - start); | ||
return true; | ||
} catch (Exception e) { | ||
LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e); | ||
return false; | ||
} | ||
} | ||
|
||
/** | ||
* Remove default acl from table archive dir when delete table | ||
* @param tableName the table name | ||
* @param removeUsers the users whose default acl will be removed | ||
* @return false if an error occurred, otherwise true | ||
*/ | ||
public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) { | ||
try { | ||
long start = System.currentTimeMillis(); | ||
Path archiveTableDir = pathHelper.getArchiveTableDir(tableName); | ||
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers, | ||
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); | ||
operation.handleAcl(); | ||
LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName, | ||
System.currentTimeMillis() - start); | ||
return true; | ||
} catch (Exception e) { | ||
LOG.error("Remove HDFS acl error when delete table {}", tableName, e); | ||
return false; | ||
} | ||
} | ||
|
||
/** | ||
* Add table user acls | ||
* @param tableName the table | ||
|
@@ -349,7 +395,7 @@ private void handleNamespaceAcl(Set<String> namespaces, Set<String> users, | |
Set<TableName> tables = new HashSet<>(); | ||
for (String namespace : namespaces) { | ||
tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream() | ||
.filter(this::isTableUserScanSnapshotEnabled).map(TableDescriptor::getTableName) | ||
.filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName) | ||
.collect(Collectors.toSet())); | ||
} | ||
handleTableAcl(tables, users, skipNamespaces, skipTables, operationType); | ||
|
@@ -403,7 +449,7 @@ void createTableDirectories(TableName tableName) throws IOException { | |
* return paths that user will global permission will visit | ||
* @return the path list | ||
*/ | ||
private List<Path> getGlobalRootPaths() { | ||
List<Path> getGlobalRootPaths() { | ||
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(), | ||
pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir()); | ||
} | ||
|
@@ -511,9 +557,20 @@ boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) { | |
return !tablePermission.hasFamily() && !tablePermission.hasQualifier(); | ||
} | ||
|
||
boolean isTableUserScanSnapshotEnabled(TableDescriptor tableDescriptor) { | ||
public static boolean isAclSyncToHdfsEnabled(Configuration conf) { | ||
String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); | ||
Set<String> masterCoprocessorSet = new HashSet<>(); | ||
if (masterCoprocessors != null) { | ||
Collections.addAll(masterCoprocessorSet, masterCoprocessors); | ||
} | ||
return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false) | ||
&& masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName()) | ||
&& masterCoprocessorSet.contains(AccessController.class.getName()); | ||
} | ||
|
||
boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) { | ||
return tableDescriptor == null ? false | ||
: Boolean.valueOf(tableDescriptor.getValue(USER_SCAN_SNAPSHOT_ENABLE)); | ||
: Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE)); | ||
} | ||
|
||
PathHelper getPathHelper() { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why create the table directories here ? I guess the TruncateTableProcedure will create the dir ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The tmp table dir is deleted after truncate table, so recreate this directory.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So please add the comment to say : it's mainly used for creating tmp table dir here because ...