Skip to content

Commit

Permalink
HBASE-23055 Alter hbase:meta
Browse files Browse the repository at this point in the history
Make it so hbase:meta can be altered. TableState for hbase:meta
is kept in Master. State is in-memory transient so if  Master
fails, hbase:meta is ENABLED again. hbase:meta schema will be
bootstrapped from the filesystem. Changes to filesystem schema
are atomic so we should be ok if Master fails mid-edit (TBD)
Undoes a bunch of guards that prevented our being able to edit
hbase:meta. At minimmum, need to add in a bunch of WARNING.

TODO: Tests, more clarity around hbase:meta table state, and undoing
references to hard-coded hbase:meta regioninfo.

M hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
 Throw illegal access exception if you try to use MetaTableAccessor
 getting state of the hbase:meta table.

M hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 TODO: getTableState needs work in Connection implemetnations. Presumes
 state is in meta table for all tables. Uses MetaTableAccessor.
 TODO: More cleanup in here and more cleanup in async versions.

M hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
 Change isTableDisabled/Enabled implementation to ask the Master instead.
 This will give the Master's TableStateManager's opinion rather than
 client figuring it for themselves reading meta table direct.

M hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 TODO: Cleanup in here. Go to master for state, not to meta.

M hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java
 Logging cleanup.

M hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
 Shutdown access.

M hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
 Just cleanup.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 Add state holder for hbase:meta.
 Removed unused methods.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
 Shut down access.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
 Allow hbase:meta to be disabled.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
 Allow hbase:meta to be enabled.
  • Loading branch information
saintstack committed Sep 19, 2019
1 parent 294487c commit 487c724
Show file tree
Hide file tree
Showing 17 changed files with 187 additions and 245 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -304,11 +304,18 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re
*/
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
throws IOException {
byte[] row = getMetaKeyForRegion(regionInfo);
Get get = new Get(row);
return getRegionLocation(getCatalogFamilyRow(connection, regionInfo),
regionInfo, regionInfo.getReplicaId());
}

/**
* @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta.
*/
public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
throws IOException {
Get get = new Get(getMetaKeyForRegion(ri));
get.addFamily(HConstants.CATALOG_FAMILY);
Result r = get(getMetaHTable(connection), get);
return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
return get(getMetaHTable(connection), get);
}

/** Returns the row key to use for this regionInfo */
Expand Down Expand Up @@ -1110,7 +1117,7 @@ public static RegionInfo getRegionInfo(final Result r, byte [] qualifier) {
public static TableState getTableState(Connection conn, TableName tableName)
throws IOException {
if (tableName.equals(TableName.META_TABLE_NAME)) {
return new TableState(tableName, TableState.State.ENABLED);
throw new IllegalAccessError("Go to the Master to find hbase:meta table state, not here");
}
Table metaHTable = getMetaHTable(conn);
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
Expand Down Expand Up @@ -1138,7 +1145,8 @@ public static Map<TableName, TableState> getTableStates(Connection conn)
}

/**
* Updates state in META
* Updates state in META.
* Do not use. For internal use only.
* @param conn connection to use
* @param tableName table to look for
*/
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
Expand Down Expand Up @@ -45,6 +45,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.AuthUtil;
import org.apache.hadoop.hbase.CallQueueTooBigException;
Expand Down Expand Up @@ -2057,6 +2058,9 @@ public NonceGenerator getNonceGenerator() {

@Override
public TableState getTableState(TableName tableName) throws IOException {
// TODO: This doesn't work if tablename is hbase:meta. Need to ask Master.
// Other problems with this implementation are that it presumes state is
// available in Master. Would be good to hide how state is kept.
checkClosed();
TableState tableState = MetaTableAccessor.getTableState(this, tableName);
if (tableState == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
Expand Down Expand Up @@ -948,22 +949,39 @@ public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
@Override
public boolean isTableEnabled(final TableName tableName) throws IOException {
checkTableExists(tableName);
return executeCallable(new RpcRetryingCallable<Boolean>() {
// Go to the Master. It knows state of all tables.
return executeCallable(new MasterCallable<Boolean>(getConnection(),
getRpcControllerFactory()) {
@Override
protected Boolean rpcCall(int callTimeout) throws Exception {
TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
if (tableState == null) {
protected Boolean rpcCall() throws Exception {
setPriority(tableName);
MasterProtos.GetTableStateRequest req = RequestConverter.buildGetTableStateRequest(tableName);
MasterProtos.GetTableStateResponse ret = master.getTableState(getRpcController(), req);
if (!ret.hasTableState() || ret.getTableState() == null) {
throw new TableNotFoundException(tableName);
}
return tableState.inStates(TableState.State.ENABLED);
return ret.getTableState().getState() == HBaseProtos.TableState.State.ENABLED;
}
});
}

@Override
public boolean isTableDisabled(TableName tableName) throws IOException {
checkTableExists(tableName);
return connection.isTableDisabled(tableName);
// Go to the Master. It knows state of all tables.
return executeCallable(new MasterCallable<Boolean>(getConnection(),
getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
setPriority(tableName);
MasterProtos.GetTableStateRequest req = RequestConverter.buildGetTableStateRequest(tableName);
MasterProtos.GetTableStateResponse ret = master.getTableState(getRpcController(), req);
if (!ret.hasTableState() || ret.getTableState() == null) {
throw new TableNotFoundException(tableName);
}
return ret.getTableState().getState() == HBaseProtos.TableState.State.DISABLED;
}
});
}

@Override
Expand Down Expand Up @@ -4357,5 +4375,4 @@ protected Boolean rpcCall() throws Exception {
});

}

}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/**
/*
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
Expand All @@ -23,6 +23,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.yetus.audience.InterfaceAudience;

import java.io.Closeable;

/**
* A KeepAlive connection is not physically closed immediately after the close,
* but rather kept alive for a few minutes. It makes sense only if it is shared.
Expand All @@ -35,7 +37,7 @@
* final user code. Hence it's package protected.
*/
@InterfaceAudience.Private
interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface {
interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface, Closeable {
// Do this instead of implement Closeable because closeable returning IOE is PITA.
void close();
}
Original file line number Diff line number Diff line change
Expand Up @@ -663,9 +663,9 @@ public CompletableFuture<Void> disableTable(TableName tableName) {

@Override
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
if (TableName.isMetaTableName(tableName)) {
return CompletableFuture.completedFuture(true);
}
// TODO: This doesn't work if tablename is hbase:meta. Need to ask Master.
// Other problems with this implementation are that it presumes state is
// available in Master. Would be good to hide how state is kept.
CompletableFuture<Boolean> future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
if (error != null) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Expand Down Expand Up @@ -158,7 +158,8 @@ private void getMetaRegionLocation(CompletableFuture<RegionLocations> future,
}
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
LOG.warn("Meta region is in state " + stateAndServerName.getFirst());
LOG.warn("hbase:meta region (replicaId={}) is in state {}", replicaId,
stateAndServerName.getFirst());
}
locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
Expand All @@ -173,7 +174,7 @@ private void getMetaRegionLocation(CompletableFuture<RegionLocations> future,
LOG.warn("Failed to fetch " + path, error);
locs[replicaId] = null;
} else if (proto == null) {
LOG.warn("Meta znode for replica " + replicaId + " is null");
LOG.warn("hbase:meta znode for replica " + replicaId + " is null");
locs[replicaId] = null;
} else {
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
Expand All @@ -197,9 +198,8 @@ private void getMetaRegionLocation(CompletableFuture<RegionLocations> future,
public CompletableFuture<RegionLocations> getMetaRegionLocation() {
CompletableFuture<RegionLocations> future = new CompletableFuture<>();
addListener(
zk.list(znodePaths.baseZNode)
.thenApply(children -> children.stream()
.filter(c -> c.startsWith(znodePaths.metaZNodePrefix)).collect(Collectors.toList())),
zk.list(znodePaths.baseZNode).thenApply(children -> children.stream().
filter(c -> znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())),
(metaReplicaZNodes, error) -> {
if (error != null) {
future.completeExceptionally(error);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Expand All @@ -24,6 +24,7 @@
import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;

import java.util.Collection;
import java.util.Optional;
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
Expand All @@ -40,15 +41,24 @@ public class ZNodePaths {
// TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
public static final char ZNODE_PATH_SEPARATOR = '/';

public final static String META_ZNODE_PREFIX = "meta-region-server";
private static final String META_ZNODE_PREFIX = "meta-region-server";
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";

// base znode for this cluster
public final String baseZNode;
// the prefix of meta znode, does not include baseZNode.
public final String metaZNodePrefix;
// znodes containing the locations of the servers hosting the meta replicas
public final ImmutableMap<Integer, String> metaReplicaZNodes;

/**
* The prefix of meta znode. Does not include baseZNode.
* Its a 'prefix' because meta replica id integer can be tagged on the end (if
* no number present, it is 'default' replica).
*/
private final String metaZNodePrefix;

/**
* znodes containing the locations of the servers hosting the meta replicas
*/
private final ImmutableMap<Integer, String> metaReplicaZNodes;

// znode containing ephemeral nodes of the regionservers
public final String rsZNode;
// znode containing ephemeral nodes of the draining regionservers
Expand Down Expand Up @@ -158,21 +168,21 @@ public String toString() {
}

/**
* Is the znode of any meta replica
* @param node
* @return true or false
* @return true if the znode is a meta region replica
*/
public boolean isAnyMetaReplicaZNode(String node) {
if (metaReplicaZNodes.containsValue(node)) {
return true;
}
return false;
return this.metaReplicaZNodes.containsValue(node);
}

/**
* @return Meta Replica ZNodes
*/
public Collection<String> getMetaReplicaZNodes() {
return this.metaReplicaZNodes.values();
}

/**
* Get the znode string corresponding to a replicaId
* @param replicaId
* @return znode
* @return the znode string corresponding to a replicaId
*/
public String getZNodeForReplica(int replicaId) {
// return a newly created path but don't update the cache of paths
Expand All @@ -183,24 +193,21 @@ public String getZNodeForReplica(int replicaId) {
}

/**
* Parse the meta replicaId from the passed znode
* Parse the meta replicaId from the passed znode name.
* @param znode the name of the znode, does not include baseZNode
* @return replicaId
*/
public int getMetaReplicaIdFromZnode(String znode) {
if (znode.equals(metaZNodePrefix)) {
return RegionInfo.DEFAULT_REPLICA_ID;
}
return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
return znode.equals(metaZNodePrefix)?
RegionInfo.DEFAULT_REPLICA_ID:
Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
}

/**
* Is it the default meta replica's znode
* @param znode the name of the znode, does not include baseZNode
* @return true or false
* @return True if meta znode.
*/
public boolean isDefaultMetaReplicaZnode(String znode) {
return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode);
public boolean isMetaZNodePrefix(String znode) {
return znode != null && znode.startsWith(this.metaZNodePrefix);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1208,12 +1208,6 @@ public enum OperationStatusCode {
HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME
}));

/** Directories that are not HBase user table directories */
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
new String[] { TableName.META_TABLE_NAME.getNameAsString() },
HBASE_NON_TABLE_DIRS.toArray())));

/** Health script related settings. */
public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
public static final String HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,51 +25,39 @@

/**
* Get, remove and modify table descriptors.
* Used by servers to host descriptors.
*/
@InterfaceAudience.Private
public interface TableDescriptors {
/**
* @param tableName
* @return TableDescriptor for tablename
* @throws IOException
*/
TableDescriptor get(final TableName tableName)
throws IOException;
TableDescriptor get(final TableName tableName) throws IOException;

/**
* Get Map of all NamespaceDescriptors for a given namespace.
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, TableDescriptor> getByNamespace(String name)
throws IOException;
Map<String, TableDescriptor> getByNamespace(String name) throws IOException;

/**
* Get Map of all TableDescriptors. Populates the descriptor cache as a
* side effect.
* Notice: the key of map is the table name which contains namespace. It was generated by
* {@link TableName#getNameWithNamespaceInclAsString()}.
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, TableDescriptor> getAll() throws IOException;

/**
* Add or update descriptor
* @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/
void add(final TableDescriptor htd)
throws IOException;
void add(final TableDescriptor htd) throws IOException;

/**
* @param tablename
* @return Instance of table descriptor or null if none found.
* @throws IOException
*/
TableDescriptor remove(final TableName tablename)
throws IOException;
TableDescriptor remove(final TableName tablename) throws IOException;

/**
* Enables the tabledescriptor cache
Expand Down
Loading

0 comments on commit 487c724

Please sign in to comment.