Skip to content

Commit

Permalink
HBASE-22971 Deprecated RSGroupAdminEndpoint and make RSGroup feature …
Browse files Browse the repository at this point in the history
…always enabled (#595)

Signed-off-by: Guanghao Zhang <[email protected]>
  • Loading branch information
Apache9 committed Feb 29, 2020
1 parent 79aa506 commit 009bba8
Show file tree
Hide file tree
Showing 33 changed files with 240 additions and 277 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,7 @@ private void addRegionToMap(Map<ServerName, List<RegionInfo>> assignmentMapForFa
regionsOnServer.add(region);
}

@Override
public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
return this.fnm.getFavoredNodes(regionInfo);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,6 @@ void generateFavoredNodesForDaughter(List<ServerName> servers,

void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents)
throws IOException;

List<ServerName> getFavoredNodes(RegionInfo regionInfo);
}
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
Expand All @@ -106,7 +105,6 @@
import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
import org.apache.hadoop.hbase.master.balancer.BalancerChore;
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
Expand Down Expand Up @@ -188,6 +186,7 @@
import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.SecurityConstants;
Expand Down Expand Up @@ -393,7 +392,7 @@ public void run() {

private final LockManager lockManager = new LockManager(this);

private LoadBalancer balancer;
private RSGroupBasedLoadBalancer balancer;
private RegionNormalizer normalizer;
private BalancerChore balancerChore;
private RegionNormalizerChore normalizerChore;
Expand Down Expand Up @@ -450,9 +449,6 @@ public void run() {
private long splitPlanCount;
private long mergePlanCount;

/* Handle favored nodes information */
private FavoredNodesManager favoredNodesManager;

/** jetty server for master to redirect requests to regionserver infoServer */
private Server masterJettyServer;

Expand Down Expand Up @@ -790,7 +786,8 @@ public MetricsMaster getMasterMetrics() {
@VisibleForTesting
protected void initializeZKBasedSystemTrackers()
throws IOException, InterruptedException, KeeperException, ReplicationException {
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.balancer = new RSGroupBasedLoadBalancer();
this.balancer.setConf(conf);
this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
this.normalizer.setMasterServices(this);
this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);
Expand Down Expand Up @@ -1037,9 +1034,6 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc
return temp;
});
}
if (this.balancer instanceof FavoredNodesPromoter) {
favoredNodesManager = new FavoredNodesManager(this);
}

// initialize load balancer
this.balancer.setMasterServices(this);
Expand Down Expand Up @@ -1089,11 +1083,11 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc
// table states messing up master launch (namespace table, etc., are not assigned).
this.assignmentManager.processOfflineRegions();
// Initialize after meta is up as below scans meta
if (favoredNodesManager != null && !maintenanceMode) {
if (getFavoredNodesManager() != null && !maintenanceMode) {
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(getConnection());
snapshotOfRegionAssignment.initialize();
favoredNodesManager.initialize(snapshotOfRegionAssignment);
getFavoredNodesManager().initialize(snapshotOfRegionAssignment);
}

// set cluster status again after user regions are assigned
Expand Down Expand Up @@ -2055,14 +2049,13 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
// TODO: What is this? I don't get it.
if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
&& !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
// TODO: deal with table on master for rs group.
if (dest.equals(serverName)) {
// To avoid unnecessary region moving later by balancer. Don't put user
// regions on master.
LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
+ " to avoid unnecessary region moving later by load balancer,"
+ " because it should not be on master");
LOG.debug("Skipping move of region " + hri.getRegionNameAsString() +
" to avoid unnecessary region moving later by load balancer," +
" because it should not be on master");
return;
}
}
Expand Down Expand Up @@ -3505,12 +3498,14 @@ public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {

/**
* Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
*
* <p/>
* Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
* this method will return the balancer used inside each rs group.
* @return The name of the {@link LoadBalancer} in use.
*/
public String getLoadBalancerClassName() {
return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory
.getDefaultLoadBalancerClass().getName());
return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
}

/**
Expand All @@ -3525,13 +3520,13 @@ public SplitOrMergeTracker getSplitOrMergeTracker() {
}

@Override
public LoadBalancer getLoadBalancer() {
public RSGroupBasedLoadBalancer getLoadBalancer() {
return balancer;
}

@Override
public FavoredNodesManager getFavoredNodesManager() {
return favoredNodesManager;
return balancer.getFavoredNodesManager();
}

private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
Expand Down Expand Up @@ -3873,7 +3868,7 @@ public MetaRegionLocationCache getMetaRegionLocationCache() {
}

@Override
public RSGroupInfoManager getRSRSGroupInfoManager() {
public RSGroupInfoManager getRSGroupInfoManager() {
return rsGroupInfoManager;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,15 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
// We deliberately use 'localhost' so the operation will fail fast
ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1");

/**
* Config for pluggable load balancers.
* @deprecated since 3.0.0, will be removed in 4.0.0. In the new implementation, as the base load
* balancer will always be the rs group based one, you should just use
* {@link org.apache.hadoop.hbase.HConstants#HBASE_MASTER_LOADBALANCER_CLASS} to
* config the per group load balancer.
*/
@Deprecated
String HBASE_RSGROUP_LOADBALANCER_CLASS = "hbase.rsgroup.grouploadbalancer.class";
/**
* Set the current cluster status. This allows a LoadBalancer to map host name to a server
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
import org.apache.hadoop.hbase.procedure2.LockType;
import org.apache.hadoop.hbase.procedure2.LockedResource;
Expand Down Expand Up @@ -230,10 +231,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
Expand Down Expand Up @@ -287,10 +286,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
Expand Down Expand Up @@ -2379,12 +2376,18 @@ public ClearDeadServersResponse clearDeadServers(RpcController controller,
LOG.debug("Some dead server is still under processing, won't clear the dead server list");
response.addAllServerName(request.getServerNameList());
} else {
DeadServer deadServer = master.getServerManager().getDeadServers();
Set<Address> clearedServers = new HashSet<>();
for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
if (!master.getServerManager().getDeadServers()
.removeDeadServer(ProtobufUtil.toServerName(pbServer))) {
ServerName server = ProtobufUtil.toServerName(pbServer);
if (!deadServer.removeDeadServer(server)) {
response.addServerName(pbServer);
} else {
clearedServers.add(server.getAddress());
}
}
master.getRSGroupInfoManager().removeServers(clearedServers);
LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
}

if (master.cpHost != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -544,5 +544,5 @@ default SplitWALManager getSplitWALManager(){
/**
* @return the {@link RSGroupInfoManager}
*/
RSGroupInfoManager getRSRSGroupInfoManager();
RSGroupInfoManager getRSGroupInfoManager();
}
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@
import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.regionserver.SequenceId;
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
Expand Down Expand Up @@ -329,6 +330,11 @@ private LoadBalancer getBalancer() {
return master.getLoadBalancer();
}

private FavoredNodesPromoter getFavoredNodePromoter() {
return (FavoredNodesPromoter) ((RSGroupBasedLoadBalancer) master.getLoadBalancer())
.getInternalBalancer();
}

private MasterProcedureEnv getProcedureEnvironment() {
return master.getMasterProcedureExecutor().getEnvironment();
}
Expand Down Expand Up @@ -372,7 +378,7 @@ public RegionStateStore getRegionStateStore() {

public List<ServerName> getFavoredNodes(final RegionInfo regionInfo) {
return this.shouldAssignRegionsWithFavoredNodes
? ((FavoredStochasticBalancer) getBalancer()).getFavoredNodes(regionInfo)
? getFavoredNodePromoter().getFavoredNodes(regionInfo)
: ServerName.EMPTY_SERVER_LIST;
}

Expand Down Expand Up @@ -1833,8 +1839,8 @@ public void markRegionAsSplit(final RegionInfo parent, final ServerName serverNa
regionStateStore.splitRegion(parent, daughterA, daughterB, serverName);
if (shouldAssignFavoredNodes(parent)) {
List<ServerName> onlineServers = this.master.getServerManager().getOnlineServersList();
((FavoredNodesPromoter)getBalancer()).
generateFavoredNodesForDaughter(onlineServers, parent, daughterA, daughterB);
getFavoredNodePromoter().generateFavoredNodesForDaughter(onlineServers, parent, daughterA,
daughterB);
}
}

Expand All @@ -1859,8 +1865,7 @@ public void markRegionAsMerged(final RegionInfo child, final ServerName serverNa
}
regionStateStore.mergeRegions(child, mergeParents, serverName);
if (shouldAssignFavoredNodes(child)) {
((FavoredNodesPromoter)getBalancer()).
generateFavoredNodesForMergedRegion(child, mergeParents);
getFavoredNodePromoter().generateFavoredNodesForMergedRegion(child, mergeParents);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,7 @@ private List<ServerName> getOnlineFavoredNodes(List<ServerName> onlineServers,
}
}

@Override
public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
return this.fnm.getFavoredNodes(regionInfo);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
*/
package org.apache.hadoop.hbase.master.balancer;

import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;

/**
* The class that creates a load balancer from a conf.
Expand All @@ -30,8 +30,7 @@
public class LoadBalancerFactory {

/**
* The default {@link LoadBalancer} class.
*
* The default {@link LoadBalancer} class.
* @return The Class for the default {@link LoadBalancer}.
*/
public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {
Expand All @@ -40,16 +39,15 @@ public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {

/**
* Create a loadbalancer from the given conf.
* @param conf
* @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {

// Create the balancer
Class<? extends LoadBalancer> balancerKlass =
conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
LoadBalancer.class);
return ReflectionUtils.newInstance(balancerKlass, conf);

conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
LoadBalancer.class);
LoadBalancer balancer = ReflectionUtils.newInstance(balancerKlass);
balancer.setConf(conf);
return balancer;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.master.procedure;

import java.io.IOException;
import java.util.function.Supplier;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.MasterFileSystem;
Expand All @@ -28,6 +29,7 @@
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;

import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
import org.apache.hadoop.hbase.util.FSUtils;

/**
Expand Down Expand Up @@ -122,4 +124,13 @@ public static void createDirectory(MasterFileSystem mfs, NamespaceDescriptor nsD
protected void releaseSyncLatch() {
ProcedurePrepareLatch.releaseLatch(syncLatch, this);
}

protected final void checkNamespaceRSGroup(MasterProcedureEnv env, NamespaceDescriptor nd)
throws IOException {
Supplier<String> forWhom = () -> "namespace " + nd.getName();
RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(
env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
MasterProcedureUtil.getNamespaceGroup(nd), forWhom);
MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
return false;
}
getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
checkNamespaceRSGroup(env, nsDescriptor);
return true;
}

Expand Down
Loading

0 comments on commit 009bba8

Please sign in to comment.