diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 48476e4bb3af..f29104df3c0c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -289,7 +289,8 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re
RegionLocations locations = getRegionLocations(r);
return locations == null
? null
- : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
+ : locations.getRegionLocation(
+ parsedInfo == null ? RegionInfo.DEFAULT_REPLICA_ID : parsedInfo.getReplicaId());
}
/**
@@ -333,12 +334,12 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws
/**
* Gets the result in hbase:meta for the specified region.
* @param connection connection we're using
- * @param regionName region we're looking for
+ * @param regionInfo region we're looking for
* @return result of the specified region
*/
- public static Result getRegionResult(Connection connection, byte[] regionName)
+ public static Result getRegionResult(Connection connection, RegionInfo regionInfo)
throws IOException {
- Get get = new Get(regionName);
+ Get get = new Get(getMetaKeyForRegion(regionInfo));
get.addFamily(HConstants.CATALOG_FAMILY);
return get(getMetaHTable(connection), get);
}
@@ -364,20 +365,20 @@ public static Result scanByRegionEncodedName(Connection connection, String regio
}
/**
- * Returns Return all regioninfos listed in the 'info:merge*' columns of the
- * regionName
row.
+ * Returns Return all regioninfos listed in the 'info:merge*' columns of the {@code regionInfo}
+ * row.
*/
@Nullable
- public static List getMergeRegions(Connection connection, byte[] regionName)
+ public static List getMergeRegions(Connection connection, RegionInfo regionInfo)
throws IOException {
- return getMergeRegions(getRegionResult(connection, regionName).rawCells());
+ return getMergeRegions(getRegionResult(connection, regionInfo).rawCells());
}
/**
- * Check whether the given {@code regionName} has any 'info:merge*' columns.
+ * Check whether the given {@code regionInfo} has any 'info:merge*' columns.
*/
- public static boolean hasMergeRegions(Connection conn, byte[] regionName) throws IOException {
- return hasMergeRegions(getRegionResult(conn, regionName).rawCells());
+ public static boolean hasMergeRegions(Connection conn, RegionInfo regionInfo) throws IOException {
+ return hasMergeRegions(getRegionResult(conn, regionInfo).rawCells());
}
/**
@@ -1294,11 +1295,21 @@ public final boolean visit(Result rowResult) throws IOException {
////////////////////////
// Editing operations //
////////////////////////
+
+ /**
+ * Generates and returns a {@link Put} containing the {@link RegionInfo} for the catalog table.
+ * @throws IllegalArgumentException when the provided RegionInfo is not the default replica.
+ */
+ public static Put makePutFromRegionInfo(RegionInfo regionInfo) throws IOException {
+ return makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime());
+ }
+
/**
- * Generates and returns a Put containing the region into for the catalog table
+ * Generates and returns a {@link Put} containing the {@link RegionInfo} for the catalog table.
+ * @throws IllegalArgumentException when the provided RegionInfo is not the default replica.
*/
public static Put makePutFromRegionInfo(RegionInfo regionInfo, long ts) throws IOException {
- return addRegionInfo(new Put(regionInfo.getRegionName(), ts), regionInfo);
+ return addRegionInfo(new Put(getMetaKeyForRegion(regionInfo), ts), regionInfo);
}
/**
@@ -1308,7 +1319,11 @@ public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
if (regionInfo == null) {
throw new IllegalArgumentException("Can't make a delete for null region");
}
- Delete delete = new Delete(regionInfo.getRegionName());
+ if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
+ throw new IllegalArgumentException(
+ "Can't make delete for a replica region. Operate on the primary");
+ }
+ Delete delete = new Delete(getMetaKeyForRegion(regionInfo));
delete.addFamily(getCatalogFamily(), ts);
return delete;
}
@@ -1399,9 +1414,14 @@ private static void deleteFromMetaTable(final Connection connection, final List<
}
}
- private static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException {
+ /**
+ * Set the column value corresponding to this {@code replicaId}'s {@link RegionState} to the
+ * provided {@code state}. Mutates the provided {@link Put}.
+ */
+ private static Put addRegionStateToPut(Put put, int replicaId, RegionState.State state)
+ throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
- .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getRegionStateColumn())
+ .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getRegionStateColumn(replicaId))
.setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name()))
.build());
return put;
@@ -1412,9 +1432,9 @@ private static Put addRegionStateToPut(Put put, RegionState.State state) throws
*/
public static void updateRegionState(Connection connection, RegionInfo ri,
RegionState.State state) throws IOException {
- Put put = new Put(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionName());
- MetaTableAccessor.putsToMetaTable(connection,
- Collections.singletonList(addRegionStateToPut(put, state)));
+ final Put put = makePutFromRegionInfo(ri);
+ addRegionStateToPut(put, ri.getReplicaId(), state);
+ putsToMetaTable(connection, Collections.singletonList(put));
}
/**
@@ -1432,7 +1452,7 @@ public static void updateRegionState(Connection connection, RegionInfo ri,
public static void addSplitsToParent(Connection connection, RegionInfo regionInfo,
RegionInfo splitA, RegionInfo splitB) throws IOException {
try (Table meta = getMetaHTable(connection)) {
- Put put = makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime());
+ Put put = makePutFromRegionInfo(regionInfo);
addDaughtersToPut(put, splitA, splitB);
meta.put(put);
debugLogMutation(put);
@@ -1483,7 +1503,7 @@ private static void addRegionsToMeta(Connection connection, List reg
if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
Put put = makePutFromRegionInfo(regionInfo, ts);
// New regions are added with initial state of CLOSED.
- addRegionStateToPut(put, RegionState.State.CLOSED);
+ addRegionStateToPut(put, regionInfo.getReplicaId(), RegionState.State.CLOSED);
// Add empty locations for region replicas so that number of replicas can be cached
// whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
@@ -1548,7 +1568,7 @@ public static void mergeRegions(Connection connection, RegionInfo mergedRegion,
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign this offline region. This is followed by re-assignments of the
// merged region from resumed {@link MergeTableRegionsProcedure}
- addRegionStateToPut(putOfMerged, RegionState.State.CLOSED);
+ addRegionStateToPut(putOfMerged, RegionInfo.DEFAULT_REPLICA_ID, RegionState.State.CLOSED);
mutations.add(putOfMerged);
// The merged is a new region, openSeqNum = 1 is fine. ServerName may be null
// if crash after merge happened but before we got to here.. means in-memory
@@ -1606,8 +1626,8 @@ public static void splitRegion(Connection connection, RegionInfo parent, long pa
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign these offline regions. This is followed by re-assignments of the
// daughter regions from resumed {@link SplitTableRegionProcedure}
- addRegionStateToPut(putA, RegionState.State.CLOSED);
- addRegionStateToPut(putB, RegionState.State.CLOSED);
+ addRegionStateToPut(putA, RegionInfo.DEFAULT_REPLICA_ID, RegionState.State.CLOSED);
+ addRegionStateToPut(putB, RegionInfo.DEFAULT_REPLICA_ID, RegionState.State.CLOSED);
addSequenceNum(putA, 1, splitA.getReplicaId()); // new regions, openSeqNum = 1 is fine.
addSequenceNum(putB, 1, splitB.getReplicaId());
@@ -1820,7 +1840,7 @@ public static void deleteMergeQualifiers(Connection connection, final RegionInfo
throws IOException {
Delete delete = new Delete(mergeRegion.getRegionName());
// NOTE: We are doing a new hbase:meta read here.
- Cell[] cells = getRegionResult(connection, mergeRegion.getRegionName()).rawCells();
+ Cell[] cells = getRegionResult(connection, mergeRegion).rawCells();
if (cells == null || cells.length == 0) {
return;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 34e5b8f92a4a..e9371d7de89c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -33,6 +33,7 @@
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
@@ -50,7 +51,6 @@
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
-import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -71,6 +71,7 @@
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
@@ -1791,15 +1792,10 @@ private void checkHFileFormatVersionForMob() throws IOException {
@QosPriority(priority = HConstants.ADMIN_QOS)
public GetRegionInfoResponse getRegionInfo(final RpcController controller,
final GetRegionInfoRequest request) throws ServiceException {
- RegionInfo ri = null;
- try {
- ri = getRegionInfo(request.getRegion());
- } catch (UnknownRegionException ure) {
- throw new ServiceException(ure);
- }
- GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
- if (ri != null) {
- builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri));
+ final GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
+ final RegionInfo info = getRegionInfo(request.getRegion());
+ if (info != null) {
+ builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
} else {
// Is it a MOB name? These work differently.
byte[] regionName = request.getRegion().getValue().toByteArray();
@@ -2437,64 +2433,69 @@ public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controlle
SetRegionStateInMetaRequest request) throws ServiceException {
rpcPreCheck("setRegionStateInMeta");
SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
+ final AssignmentManager am = master.getAssignmentManager();
try {
for (RegionSpecifierAndState s : request.getStatesList()) {
- RegionSpecifier spec = s.getRegionSpecifier();
- String encodedName;
- if (spec.getType() == RegionSpecifierType.ENCODED_REGION_NAME) {
- encodedName = spec.getValue().toStringUtf8();
- } else {
- // TODO: actually, a full region name can save a lot on meta scan, improve later.
- encodedName = RegionInfo.encodeRegionName(spec.getValue().toByteArray());
+ final RegionSpecifier spec = s.getRegionSpecifier();
+ final RegionInfo targetRegionInfo = getRegionInfo(spec);
+ final RegionState.State targetState = RegionState.State.convert(s.getState());
+ final RegionState.State currentState = Optional.ofNullable(targetRegionInfo)
+ .map(info -> am.getRegionStates().getRegionState(info)).map(RegionState::getState)
+ .orElseThrow(
+ () -> new ServiceException("No existing state known for region '" + spec + "'."));
+ LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(),
+ targetRegionInfo, currentState, targetState);
+ if (currentState == targetState) {
+ LOG.debug("Proposed state matches current state. {}, {}", targetRegionInfo, currentState);
+ continue;
}
- RegionInfo info = this.master.getAssignmentManager().loadRegionFromMeta(encodedName);
- LOG.trace("region info loaded from meta table: {}", info);
- RegionState prevState =
- this.master.getAssignmentManager().getRegionStates().getRegionState(info);
- RegionState.State newState = RegionState.State.convert(s.getState());
- LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
- prevState.getState(), newState);
- Put metaPut =
- MetaTableAccessor.makePutFromRegionInfo(info, EnvironmentEdgeManager.currentTime());
- metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
- Bytes.toBytes(newState.name()));
- List putList = new ArrayList<>();
- putList.add(metaPut);
- MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList);
+ MetaTableAccessor.updateRegionState(master.getConnection(), targetRegionInfo, targetState);
// Loads from meta again to refresh AM cache with the new region state
- this.master.getAssignmentManager().loadRegionFromMeta(encodedName);
+ am.populateRegionStatesFromMeta(targetRegionInfo);
builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
- .setState(prevState.getState().convert()));
+ .setState(currentState.convert()));
}
- } catch (Exception e) {
+ } catch (IOException e) {
throw new ServiceException(e);
}
return builder.build();
}
/**
- * Get RegionInfo from Master using content of RegionSpecifier as key.
- * @return RegionInfo found by decoding rs
or null if none found
+ * Get {@link RegionInfo} from Master using content of {@link RegionSpecifier} as key.
+ * @return {@link RegionInfo} found by decoding {@code rs} or {@code null} if {@code rs} is
+ * unknown to the master.
+ * @throws ServiceException If some error occurs while querying META or parsing results.
*/
- private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws UnknownRegionException {
- RegionInfo ri = null;
+ private RegionInfo getRegionInfo(HBaseProtos.RegionSpecifier rs) throws ServiceException {
+ // TODO: this doesn't handle MOB regions. Should it? See the public method #getRegionInfo
+ final AssignmentManager am = master.getAssignmentManager();
+ final String encodedRegionName;
+ final RegionInfo info;
+ // first try resolving from the AM's caches.
switch (rs.getType()) {
case REGION_NAME:
final byte[] regionName = rs.getValue().toByteArray();
- ri = this.master.getAssignmentManager().getRegionInfo(regionName);
+ encodedRegionName = RegionInfo.encodeRegionName(regionName);
+ info = am.getRegionInfo(regionName);
break;
case ENCODED_REGION_NAME:
- String encodedRegionName = Bytes.toString(rs.getValue().toByteArray());
- RegionState regionState =
- this.master.getAssignmentManager().getRegionStates().getRegionState(encodedRegionName);
- ri = regionState == null
- ? this.master.getAssignmentManager().loadRegionFromMeta(encodedRegionName)
- : regionState.getRegion();
+ encodedRegionName = rs.getValue().toStringUtf8();
+ info = am.getRegionInfo(encodedRegionName);
break;
default:
- break;
+ throw new IllegalArgumentException("Unrecognized RegionSpecifierType " + rs.getType());
}
- return ri;
+ if (info != null) {
+ return info;
+ }
+ // fall back to a meta scan and check the cache again.
+ try {
+ am.populateRegionStatesFromMeta(encodedRegionName);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return am.getRegionInfo(encodedRegionName);
}
/**
@@ -2514,28 +2515,22 @@ private void checkMasterProcedureExecutor() throws ServiceException {
public MasterProtos.AssignsResponse assigns(RpcController controller,
MasterProtos.AssignsRequest request) throws ServiceException {
checkMasterProcedureExecutor();
+ final ProcedureExecutor pe = master.getMasterProcedureExecutor();
+ final AssignmentManager am = master.getAssignmentManager();
MasterProtos.AssignsResponse.Builder responseBuilder =
MasterProtos.AssignsResponse.newBuilder();
- try {
- boolean override = request.getOverride();
- LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override);
- for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
- long pid = Procedure.NO_PROC_ID;
- RegionInfo ri = getRegionInfo(rs);
- if (ri == null) {
- LOG.info("Unknown={}", rs);
- } else {
- Procedure p = this.master.getAssignmentManager().createOneAssignProcedure(ri, override);
- if (p != null) {
- pid = this.master.getMasterProcedureExecutor().submitProcedure(p);
- }
- }
- responseBuilder.addPid(pid);
+ final boolean override = request.getOverride();
+ LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override);
+ for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
+ final RegionInfo info = getRegionInfo(rs);
+ if (info == null) {
+ LOG.info("Unknown region {}", rs);
+ continue;
}
- return responseBuilder.build();
- } catch (IOException ioe) {
- throw new ServiceException(ioe);
+ responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override))
+ .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
}
+ return responseBuilder.build();
}
/**
@@ -2547,35 +2542,29 @@ public MasterProtos.AssignsResponse assigns(RpcController controller,
public MasterProtos.UnassignsResponse unassigns(RpcController controller,
MasterProtos.UnassignsRequest request) throws ServiceException {
checkMasterProcedureExecutor();
+ final ProcedureExecutor pe = master.getMasterProcedureExecutor();
+ final AssignmentManager am = master.getAssignmentManager();
MasterProtos.UnassignsResponse.Builder responseBuilder =
MasterProtos.UnassignsResponse.newBuilder();
- try {
- boolean override = request.getOverride();
- LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override);
- for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
- long pid = Procedure.NO_PROC_ID;
- RegionInfo ri = getRegionInfo(rs);
- if (ri == null) {
- LOG.info("Unknown={}", rs);
- } else {
- Procedure p = this.master.getAssignmentManager().createOneUnassignProcedure(ri, override);
- if (p != null) {
- pid = this.master.getMasterProcedureExecutor().submitProcedure(p);
- }
- }
- responseBuilder.addPid(pid);
+ final boolean override = request.getOverride();
+ LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override);
+ for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
+ final RegionInfo info = getRegionInfo(rs);
+ if (info == null) {
+ LOG.info("Unknown region {}", rs);
+ continue;
}
- return responseBuilder.build();
- } catch (IOException ioe) {
- throw new ServiceException(ioe);
+ responseBuilder.addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override))
+ .map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
}
+ return responseBuilder.build();
}
/**
* Bypass specified procedure to completion. Procedure is marked completed but no actual work is
* done from the current state/ step onwards. Parents of the procedure are also marked for bypass.
* NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave
- * system in inconherent state. This may need to be followed by some cleanup steps/ actions by
+ * system in incoherent state. This may need to be followed by some cleanup steps/ actions by
* operator.
* @return BypassProcedureToCompletionResponse indicating success or failure
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 6257edf04f77..d8da72da30ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.master.assignment;
+import edu.umd.cs.findbugs.annotations.NonNull;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -1693,26 +1694,29 @@ public void visitRegionState(Result result, final RegionInfo regionInfo, final S
};
/**
- * Query META if the given RegionInfo
exists, adding to
- * AssignmentManager.regionStateStore
cache if the region is found in META.
- * @param regionEncodedName encoded name for the region to be loaded from META into
- * AssignmentManager.regionStateStore
cache
- * @return RegionInfo
instance for the given region if it is present in META and got
- * successfully loaded into AssignmentManager.regionStateStore
cache,
- * null otherwise.
- * @throws UnknownRegionException if any errors occur while querying meta.
+ * Attempt to load {@code regionInfo} from META, adding any results to the
+ * {@link #regionStateStore} Is NOT aware of replica regions.
+ * @param regionInfo the region to be loaded from META.
+ * @throws IOException If some error occurs while querying META or parsing results.
*/
- public RegionInfo loadRegionFromMeta(String regionEncodedName) throws UnknownRegionException {
- try {
- RegionMetaLoadingVisitor visitor = new RegionMetaLoadingVisitor();
- regionStateStore.visitMetaForRegion(regionEncodedName, visitor);
- return regionStates.getRegionState(regionEncodedName) == null
- ? null
- : regionStates.getRegionState(regionEncodedName).getRegion();
- } catch (IOException e) {
- throw new UnknownRegionException(
- "Error trying to load region " + regionEncodedName + " from META", e);
- }
+ public void populateRegionStatesFromMeta(@NonNull final RegionInfo regionInfo)
+ throws IOException {
+ final String regionEncodedName = RegionInfo.DEFAULT_REPLICA_ID == regionInfo.getReplicaId()
+ ? regionInfo.getEncodedName()
+ : RegionInfoBuilder.newBuilder(regionInfo).setReplicaId(RegionInfo.DEFAULT_REPLICA_ID).build()
+ .getEncodedName();
+ populateRegionStatesFromMeta(regionEncodedName);
+ }
+
+ /**
+ * Attempt to load {@code regionEncodedName} from META, adding any results to the
+ * {@link #regionStateStore} Is NOT aware of replica regions.
+ * @param regionEncodedName encoded name for the region to be loaded from META.
+ * @throws IOException If some error occurs while querying META or parsing results.
+ */
+ public void populateRegionStatesFromMeta(@NonNull String regionEncodedName) throws IOException {
+ final RegionMetaLoadingVisitor visitor = new RegionMetaLoadingVisitor();
+ regionStateStore.visitMetaForRegion(regionEncodedName, visitor);
}
private void loadMeta() throws IOException {
@@ -1866,11 +1870,23 @@ public List getAssignedRegions() {
return regionStates.getAssignedRegions();
}
+ /**
+ * Resolve a cached {@link RegionInfo} from the region name as a {@code byte[]}.
+ */
public RegionInfo getRegionInfo(final byte[] regionName) {
final RegionStateNode regionState = regionStates.getRegionStateNodeFromName(regionName);
return regionState != null ? regionState.getRegionInfo() : null;
}
+ /**
+ * Resolve a cached {@link RegionInfo} from the encoded region name as a {@code String}.
+ */
+ public RegionInfo getRegionInfo(final String encodedRegionName) {
+ final RegionStateNode regionState =
+ regionStates.getRegionStateNodeFromEncodedRegionName(encodedRegionName);
+ return regionState != null ? regionState.getRegionInfo() : null;
+ }
+
// ============================================================================================
// Expected states on region state transition.
// Notice that there is expected states for transiting to OPENING state, this is because SCP.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java
index 4155247071b4..2b769208de7c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java
@@ -100,8 +100,8 @@ protected Flow executeFromState(MasterProcedureEnv env, GCMergedRegionsState sta
// If GCMultipleMergedRegionsProcedure processing is slower than the CatalogJanitor's scan
// interval, it will end resubmitting GCMultipleMergedRegionsProcedure for the same
// region. We can skip duplicate GCMultipleMergedRegionsProcedure while previous finished
- List parents = MetaTableAccessor
- .getMergeRegions(env.getMasterServices().getConnection(), mergedChild.getRegionName());
+ List parents =
+ MetaTableAccessor.getMergeRegions(env.getMasterServices().getConnection(), mergedChild);
if (parents == null || parents.isEmpty()) {
LOG.info("{} mergeXXX qualifiers have ALL been deleted",
mergedChild.getShortNameToLog());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 2be1e2a30b18..813caa47d339 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -469,10 +469,7 @@ private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOExcept
RegionStates regionStates = env.getAssignmentManager().getRegionStates();
for (RegionInfo ri : this.regionsToMerge) {
- if (
- MetaTableAccessor.hasMergeRegions(env.getMasterServices().getConnection(),
- ri.getRegionName())
- ) {
+ if (MetaTableAccessor.hasMergeRegions(env.getMasterServices().getConnection(), ri)) {
String msg = "Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge)
+ ", because a parent, " + RegionInfo.getShortNameToLog(ri) + ", has a merge qualifier "
+ "(if a 'merge column' in parent, it was recently merged but still has outstanding "
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 56132558fb92..35eb87b38184 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -110,9 +110,9 @@ public boolean visit(final Result r) throws IOException {
/**
* Queries META table for the passed region encoded name, delegating action upon results to the
- * RegionStateVisitor
passed as second parameter.
+ * {@code RegionStateVisitor} passed as second parameter.
* @param regionEncodedName encoded name for the Region we want to query META for.
- * @param visitor The RegionStateVisitor
instance to react over the query
+ * @param visitor The {@code RegionStateVisitor} instance to react over the query
* results.
* @throws IOException If some error occurs while querying META or parsing results.
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index b1c11af2b9bf..adf376f68e09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -72,7 +72,7 @@ public int compare(final RegionState l, final RegionState r) {
// TODO: Replace the ConcurrentSkipListMaps
/**
- * RegionName -- i.e. RegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
+ * A Map from {@link RegionInfo#getRegionName()} to {@link RegionStateNode}
*/
private final ConcurrentSkipListMap regionsMap =
new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
@@ -144,6 +144,10 @@ public RegionStateNode getRegionStateNodeFromName(byte[] regionName) {
return regionsMap.get(regionName);
}
+ public RegionStateNode getRegionStateNodeFromEncodedRegionName(final String encodedRegionName) {
+ return encodedRegionsMap.get(encodedRegionName);
+ }
+
public RegionStateNode getRegionStateNode(RegionInfo regionInfo) {
return getRegionStateNodeFromName(regionInfo.getRegionName());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index d5ac2a327db8..95aab682e224 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -279,8 +279,8 @@ protected static void deleteFromFs(final MasterProcedureEnv env, final TableName
for (RegionInfo region : regions) {
if (RegionReplicaUtil.isDefaultReplica(region)) {
regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, region));
- List mergeRegions = MetaTableAccessor
- .getMergeRegions(env.getMasterServices().getConnection(), region.getRegionName());
+ List mergeRegions =
+ MetaTableAccessor.getMergeRegions(env.getMasterServices().getConnection(), region);
if (!CollectionUtils.isEmpty(mergeRegions)) {
mergeRegions.stream()
.forEach(r -> regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, r)));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 1baed53a98f9..657f1fc9305a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -1918,7 +1918,7 @@ private void resetSplitParent(HbckRegionInfo hi) throws IOException {
RegionInfo hri =
RegionInfoBuilder.newBuilder(hi.getMetaEntry()).setOffline(false).setSplit(false).build();
- Put p = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index b7f7d394e9de..df22cfbf2269 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -146,7 +146,7 @@ public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInf
Collection servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
Table meta = conn.getTable(TableName.META_TABLE_NAME);
- Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
+ Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random rand = ThreadLocalRandom.current();
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 47e375382809..08c69bc99ad8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -133,20 +133,19 @@ public void testGetMergeRegions() throws Exception {
RegionInfo mergedRegion0 = mergedRegions.get(0);
RegionInfo mergedRegion1 = mergedRegions.get(1);
- List mergeParents =
- MetaTableAccessor.getMergeRegions(connection, mergedRegion0.getRegionName());
+ List mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion0);
assertTrue(mergeParents.contains(regions.get(0)));
assertTrue(mergeParents.contains(regions.get(1)));
- mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion1.getRegionName());
+ mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion1);
assertTrue(mergeParents.contains(regions.get(2)));
assertTrue(mergeParents.contains(regions.get(3)));
// Delete merge qualifiers for mergedRegion0, then cannot getMergeRegions again
MetaTableAccessor.deleteMergeQualifiers(connection, mergedRegion0);
- mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion0.getRegionName());
+ mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion0);
assertNull(mergeParents);
- mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion1.getRegionName());
+ mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion1);
assertTrue(mergeParents.contains(regions.get(2)));
assertTrue(mergeParents.contains(regions.get(3)));
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java
index 1da0b3d015fd..1cb24ed33ace 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java
@@ -142,7 +142,7 @@ public void testMergeRegionOrder() throws Exception {
RegionInfo mergedRegion = mergedRegions.get(0);
List mergeParentRegions =
- MetaTableAccessor.getMergeRegions(UTIL.getConnection(), mergedRegion.getRegionName());
+ MetaTableAccessor.getMergeRegions(UTIL.getConnection(), mergedRegion);
assertEquals(mergeParentRegions.size(), regionCount);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
index 7656ab225012..d5c4b066a160 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
@@ -29,6 +29,7 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@@ -57,7 +58,6 @@
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -119,7 +119,7 @@ private Hbck getHbck() throws Exception {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
- TEST_UTIL.createMultiRegionTable(TABLE_NAME, Bytes.toBytes("family1"), 5);
+ TEST_UTIL.createMultiRegionTable(TABLE_NAME, 3, new byte[][] { Bytes.toBytes("family1") });
procExec = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost().load(
@@ -189,7 +189,7 @@ public void testSetTableStateInMeta() throws Exception {
// will be DISABLED
TableState prevState =
hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED));
- assertTrue("Incorrect previous state! expeced=DISABLED, found=" + prevState.getState(),
+ assertTrue("Incorrect previous state! expected=DISABLED, found=" + prevState.getState(),
prevState.isDisabled());
}
@@ -197,48 +197,57 @@ public void testSetTableStateInMeta() throws Exception {
public void testSetRegionStateInMeta() throws Exception {
Hbck hbck = getHbck();
Admin admin = TEST_UTIL.getAdmin();
+ TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
final List regions = admin.getRegions(TABLE_NAME);
final AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
- Map prevStates = new HashMap<>();
- Map newStates = new HashMap<>();
- final Map> regionsMap = new HashMap<>();
+ final Map beforeStates = new HashMap<>();
+ final Map requestStates = new HashMap<>();
regions.forEach(r -> {
- RegionState prevState = am.getRegionStates().getRegionState(r);
- prevStates.put(r.getEncodedName(), prevState.getState());
- newStates.put(r.getEncodedName(), RegionState.State.CLOSED);
- regionsMap.put(r.getEncodedName(),
- new Pair<>(prevState.getState(), RegionState.State.CLOSED));
+ RegionState beforeState = am.getRegionStates().getRegionState(r);
+ beforeStates.put(r.getEncodedName(), beforeState.getState());
+ LOG.debug("Before test: {} ; {}", r, beforeState.getState());
+ requestStates.put(r.getEncodedName(), RegionState.State.CLOSED);
});
- final Map result = hbck.setRegionStateInMeta(newStates);
- result.forEach((k, v) -> {
- RegionState.State prevState = regionsMap.get(k).getFirst();
- assertEquals(prevState, v);
- });
- regions.forEach(r -> {
- RegionState cachedState = am.getRegionStates().getRegionState(r.getEncodedName());
- RegionState.State newState = regionsMap.get(r.getEncodedName()).getSecond();
- assertEquals(newState, cachedState.getState());
- });
- hbck.setRegionStateInMeta(prevStates);
+ final Callable doTest = () -> {
+ // run the entire test with the ProcedureExecution environment paused. This prevents
+ // background operations from modifying AM internal state between the assertions this test
+ // relies upon.
+ Map result = hbck.setRegionStateInMeta(requestStates);
+ result.forEach((k, v) -> {
+ RegionState.State beforeState = beforeStates.get(k);
+ assertEquals("response state should match before state; " + k, beforeState, v);
+ });
+ regions.forEach(r -> {
+ RegionState afterState = am.getRegionStates().getRegionState(r.getEncodedName());
+ RegionState.State expectedState = requestStates.get(r.getEncodedName());
+ LOG.debug("After test: {}, {}", r, afterState);
+ assertEquals("state in AM should match requested state ; " + r, expectedState,
+ afterState.getState());
+ });
+ return null;
+ };
+ ProcedureTestingUtility.restart(procExec, true, true, null, doTest, null, false, true);
+ // restore the table as we found it -- fragile?
+ hbck.setRegionStateInMeta(beforeStates);
}
@Test
public void testAssigns() throws Exception {
Hbck hbck = getHbck();
+ final AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
- List regions = admin.getRegions(TABLE_NAME);
- for (RegionInfo ri : regions) {
- RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
- .getRegionStates().getRegionState(ri.getEncodedName());
- LOG.info("RS: {}", rs.toString());
- }
- List pids =
- hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
+ List regions = admin.getRegions(TABLE_NAME).stream()
+ .filter(ri -> ri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID).peek(ri -> {
+ final RegionState rs = am.getRegionStates().getRegionState(ri.getEncodedName());
+ LOG.info("RS: {}", rs);
+ }).collect(Collectors.toList());
+ List pids = hbck
+ .unassigns(regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()));
waitOnPids(pids);
// Rerun the unassign. Should fail for all Regions since they already unassigned; failed
// unassign will manifest as all pids being -1 (ever since HBASE-24885).
- pids =
- hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
+ pids = hbck
+ .unassigns(regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()));
waitOnPids(pids);
for (long pid : pids) {
assertEquals(Procedure.NO_PROC_ID, pid);
@@ -247,7 +256,7 @@ public void testAssigns() throws Exception {
// unassigned.... makes for a mess but operator might want to do this at an extreme when
// doing fixup of broke cluster.
pids = hbck.unassigns(
- regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()), true);
+ regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()), true);
waitOnPids(pids);
for (long pid : pids) {
assertNotEquals(Procedure.NO_PROC_ID, pid);
@@ -261,12 +270,12 @@ public void testAssigns() throws Exception {
assertTrue(rs.toString(), rs.isClosed());
}
pids =
- hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
+ hbck.assigns(regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()));
waitOnPids(pids);
// Rerun the assign. Should fail for all Regions since they already assigned; failed
// assign will manifest as all pids being -1 (ever since HBASE-24885).
pids =
- hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
+ hbck.assigns(regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()));
for (long pid : pids) {
assertEquals(Procedure.NO_PROC_ID, pid);
}
@@ -288,19 +297,21 @@ public void testAssigns() throws Exception {
@Test
public void testScheduleSCP() throws Exception {
HRegionServer testRs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
- TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), Bytes.toBytes("family1"),
- true);
+ try (final Table t = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
+ TEST_UTIL.loadTable(t, Bytes.toBytes("family1"), true);
+ }
ServerName serverName = testRs.getServerName();
Hbck hbck = getHbck();
List pids =
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
- assertTrue(pids.get(0) > 0);
- LOG.info("pid is {}", pids.get(0));
+ assertEquals(1, pids.size());
+ assertNotEquals((Long) Procedure.NO_PROC_ID, pids.get(0));
+ LOG.debug("SCP pid is {}", pids.get(0));
List newPids =
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
- assertTrue(newPids.get(0) < 0);
- LOG.info("pid is {}", newPids.get(0));
+ assertEquals(1, pids.size());
+ assertEquals((Long) Procedure.NO_PROC_ID, newPids.get(0));
waitOnPids(pids);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
index d2c34a55a7d3..4801ec3e3a9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
@@ -140,7 +140,7 @@ public void testRegionReopensWithStoreRefConfig() throws Exception {
// Verify that we need to reopen total 3 regions that have refCount > 300
Mockito.verify(hMaster, Mockito.times(3)).getAssignmentManager();
- Mockito.verify(assignmentManager, Mockito.times(3)).getRegionInfo(Mockito.any());
+ Mockito.verify(assignmentManager, Mockito.times(3)).getRegionInfo(Mockito.any(byte[].class));
}
@Test
@@ -175,7 +175,7 @@ public void testRegionReopensWithLessThreshold() throws Exception {
// Verify that we need to reopen only 1 region with refCount > 400
Mockito.verify(hMaster, Mockito.times(1)).getAssignmentManager();
- Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any());
+ Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any(byte[].class));
}
@Test
@@ -210,7 +210,7 @@ public void testRegionReopensWithoutStoreRefConfig() throws Exception {
// default maxCompactedStoreFileRefCount is -1 (no regions to be reopened using AM)
Mockito.verify(hMaster, Mockito.times(0)).getAssignmentManager();
- Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any());
+ Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any(byte[].class));
}
private static ClusterMetrics getClusterMetrics(int noOfLiveServer) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index c203288d6448..025aebb3e082 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.master.assignment;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@@ -303,12 +304,13 @@ public void testLoadRegionFromMetaAfterRegionManuallyAdded() throws Exception {
+ "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri));
MetaTableAccessor.addRegionsToMeta(this.util.getConnection(), Collections.singletonList(hri),
1);
- assertNull(
- "RegionInfo was manually added in META, but " + "shouldn't be in AM regionStates yet.",
+ // TODO: is there a race here -- no other thread else will refresh the table states behind
+ // the scenes?
+ assertNull("RegionInfo was manually added in META, but shouldn't be in AM regionStates yet.",
am.getRegionStates().getRegionState(hri));
- hri = am.loadRegionFromMeta(hri.getEncodedName());
- assertEquals(hri.getEncodedName(),
- am.getRegionStates().getRegionState(hri).getRegion().getEncodedName());
+ am.populateRegionStatesFromMeta(hri.getEncodedName());
+ assertNotNull(am.getRegionInfo(hri.getRegionName()));
+ assertNotNull(am.getRegionInfo(hri.getEncodedName()));
} finally {
this.util.killMiniHBaseCluster();
}
@@ -322,10 +324,10 @@ public void testLoadRegionFromMetaRegionNotInMeta() throws Exception {
final TableName tableName = TableName.valueOf("testLoadRegionFromMetaRegionNotInMeta");
this.util.createTable(tableName, "f");
final RegionInfo hri = createRegionInfo(tableName, 1);
- assertNull("RegionInfo was just instantiated by the test, but "
- + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri));
- assertNull("RegionInfo was never added in META, should had returned null.",
- am.loadRegionFromMeta(hri.getEncodedName()));
+ assertNull("Bogus RegionInfo discovered in RegionStates.",
+ am.getRegionStates().getRegionState(hri));
+ am.populateRegionStatesFromMeta(hri.getEncodedName());
+ assertNull("RegionInfo was never added in META", am.getRegionStates().getRegionState(hri));
} finally {
this.util.killMiniHBaseCluster();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java
index 8e0d6acef884..41267a19373a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java
@@ -205,10 +205,10 @@ private void testMerge(TableName tableName, int mergeCount) throws IOException {
// the merged regions cleanup.
UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true);
UTIL.getHBaseCluster().getMaster().getCatalogJanitor().triggerNow();
- byte[] mergedRegion = proc.getMergedRegion().getRegionName();
+ RegionInfo mergedRegion = proc.getMergedRegion();
while (ris != null && ris.get(0) != null && ris.get(1) != null) {
ris = MetaTableAccessor.getMergeRegions(UTIL.getConnection(), mergedRegion);
- LOG.info("{} {}", Bytes.toStringBinary(mergedRegion), ris);
+ LOG.info("{} {}", Bytes.toStringBinary(mergedRegion.getRegionName()), ris);
Threads.sleep(1000);
}
assertEquals(countOfRowsLoaded, UTIL.countRows(tableName));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
index bbcbcccbf83f..ea4c93d6de20 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
@@ -328,10 +328,10 @@ public void testOverlapWithSmallMergeCount() throws Exception {
&& regionStates.getRegionState(pair.getSecond()).isOpened())
) {
// Make sure GC is done.
- List firstParents = MetaTableAccessor
- .getMergeRegions(services.getConnection(), pair.getFirst().getRegionName());
- List secondParents = MetaTableAccessor
- .getMergeRegions(services.getConnection(), pair.getSecond().getRegionName());
+ List firstParents =
+ MetaTableAccessor.getMergeRegions(services.getConnection(), pair.getFirst());
+ List secondParents =
+ MetaTableAccessor.getMergeRegions(services.getConnection(), pair.getSecond());
return (firstParents == null || firstParents.isEmpty())
&& (secondParents == null || secondParents.isEmpty());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
index bd355d9ace13..2303cb1965e5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
@@ -25,6 +25,9 @@
import java.io.IOException;
import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -32,23 +35,26 @@
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -62,6 +68,7 @@
* Regions that were on the server-to-process rather than consult Master in-memory-state.
*/
@Category({ MasterTests.class, LargeTests.class })
+@RunWith(Parameterized.class)
public class TestHBCKSCP extends TestSCPBase {
private static final Logger LOG = LoggerFactory.getLogger(TestHBCKSCP.class);
@@ -69,7 +76,27 @@ public class TestHBCKSCP extends TestSCPBase {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHBCKSCP.class);
@Rule
- public TestName name = new TestName();
+ public TableNameTestRule tableNameTestRule = new TableNameTestRule();
+
+ private final int replicas;
+ private final HBCKSCPScheduler hbckscpScheduler;
+ private final RegionSelector regionSelector;
+
+ public TestHBCKSCP(final int replicas, final HBCKSCPScheduler hbckscpScheduler,
+ final RegionSelector regionSelector) {
+ this.replicas = replicas;
+ this.hbckscpScheduler = hbckscpScheduler;
+ this.regionSelector = regionSelector;
+ }
+
+ @Parameterized.Parameters(name = "replicas:{0} scheduler:{1} selector:{2}")
+ public static Object[][] params() {
+ return new Object[][] {
+ { 1, new ScheduleServerCrashProcedure(), new PrimaryNotMetaRegionSelector() },
+ { 3, new ScheduleServerCrashProcedure(), new ReplicaNonMetaRegionSelector() },
+ { 1, new ScheduleSCPsForUnknownServers(), new PrimaryNotMetaRegionSelector() },
+ { 3, new ScheduleSCPsForUnknownServers(), new ReplicaNonMetaRegionSelector() } };
+ }
@Test
public void test() throws Exception {
@@ -80,7 +107,7 @@ public void test() throws Exception {
assertEquals(RS_COUNT, cluster.getLiveRegionServerThreads().size());
int count;
- try (Table table = createTable(TableName.valueOf(this.name.getMethodName()))) {
+ try (Table table = createTable(tableNameTestRule.getTableName())) {
// Load the table with a bit of data so some logs to split and some edits in each region.
this.util.loadTable(table, HBaseTestingUtility.COLUMNS[0]);
count = util.countRows(table);
@@ -91,17 +118,24 @@ public void test() throws Exception {
// Find another RS. Purge it from Master memory w/o running SCP (if
// SCP runs, it will clear entries from hbase:meta which frustrates
// our attempt at manufacturing 'Unknown Servers' condition).
- int metaIndex = this.util.getMiniHBaseCluster().getServerWithMeta();
- int rsIndex = (metaIndex + 1) % RS_COUNT;
- ServerName rsServerName = cluster.getRegionServer(rsIndex).getServerName();
+ final ServerName metaServer = util.getMiniHBaseCluster().getServerHoldingMeta();
+ final ServerName rsServerName = cluster.getRegionServerThreads().stream()
+ .map(JVMClusterUtil.RegionServerThread::getRegionServer).map(HRegionServer::getServerName)
+ .filter(sn -> !sn.equals(metaServer)).findAny().orElseThrow(() -> new NoSuchElementException(
+ "Cannot locate a region server that is not hosting meta."));
HMaster master = cluster.getMaster();
// Get a Region that is on the server.
- RegionInfo rsRI = master.getAssignmentManager().getRegionsOnServer(rsServerName).get(0);
- Result r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
+ final List regions = master.getAssignmentManager().getRegionsOnServer(rsServerName);
+ LOG.debug("{} is holding {} regions.", rsServerName, regions.size());
+ final RegionInfo rsRI =
+ regions.stream().peek(info -> LOG.debug("{}", info)).filter(regionSelector::regionFilter)
+ .findAny().orElseThrow(regionSelector::regionFilterFailure);
+ final int replicaId = rsRI.getReplicaId();
+ Result r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI);
// Assert region is OPEN.
- assertEquals(RegionState.State.OPEN.toString(),
- Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
- ServerName serverName = MetaTableAccessor.getServerName(r, 0);
+ assertEquals(RegionState.State.OPEN.toString(), Bytes.toString(
+ r.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getRegionStateColumn(replicaId))));
+ ServerName serverName = MetaTableAccessor.getServerName(r, replicaId);
assertEquals(rsServerName, serverName);
// moveFrom adds to dead servers and adds it to processing list only we will
// not be processing this server 'normally'. Remove it from processing by
@@ -117,18 +151,16 @@ public void test() throws Exception {
// Kill the server. Nothing should happen since an 'Unknown Server' as far
// as the Master is concerned; i.e. no SCP.
HRegionServer hrs = cluster.getRegionServer(rsServerName);
- while (!hrs.isStopped()) {
- Threads.sleep(10);
- }
+ util.waitFor(TimeUnit.MINUTES.toMillis(1), hrs::isStopped);
LOG.info("Dead {}", rsServerName);
// Now assert still references in hbase:meta to the 'dead' server -- they haven't been
// cleaned up by an SCP or by anything else.
assertTrue(searchMeta(master, rsServerName));
// Assert region is OPEN on dead server still.
- r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
- assertEquals(RegionState.State.OPEN.toString(),
- Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
- serverName = MetaTableAccessor.getServerName(r, 0);
+ r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI);
+ assertEquals(RegionState.State.OPEN.toString(), Bytes.toString(
+ r.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getRegionStateColumn(replicaId))));
+ serverName = MetaTableAccessor.getServerName(r, replicaId);
assertNotNull(cluster.getRegionServer(serverName));
assertEquals(rsServerName, serverName);
@@ -136,13 +168,11 @@ public void test() throws Exception {
// with no corresponding SCP. Queue one.
long pid = scheduleHBCKSCP(rsServerName, master);
assertNotEquals(Procedure.NO_PROC_ID, pid);
- while (master.getMasterProcedureExecutor().getActiveProcIds().contains(pid)) {
- Threads.sleep(10);
- }
+ ProcedureTestingUtility.waitProcedure(master.getMasterProcedureExecutor(), pid);
// After SCP, assert region is OPEN on new server.
- r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
- assertEquals(RegionState.State.OPEN.toString(),
- Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
+ r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI);
+ assertEquals(RegionState.State.OPEN.toString(), Bytes.toString(
+ r.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getRegionStateColumn(replicaId))));
serverName = MetaTableAccessor.getServerName(r, 0);
assertNotNull(cluster.getRegionServer(serverName));
assertNotEquals(rsServerName, serverName);
@@ -151,12 +181,12 @@ public void test() throws Exception {
}
protected long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException {
- MasterProtos.ScheduleServerCrashProcedureResponse response = master.getMasterRpcServices()
- .scheduleServerCrashProcedure(null, MasterProtos.ScheduleServerCrashProcedureRequest
- .newBuilder().addServerName(ProtobufUtil.toServerName(rsServerName)).build());
- assertEquals(1, response.getPidCount());
- long pid = response.getPid(0);
- return pid;
+ return hbckscpScheduler.scheduleHBCKSCP(rsServerName, master);
+ }
+
+ @Override
+ protected int getRegionReplication() {
+ return replicas;
}
/** Returns True if we find reference to sn
in meta table. */
@@ -170,4 +200,90 @@ private boolean searchMeta(HMaster master, ServerName sn) throws IOException {
}
return false;
}
+
+ /**
+ * Encapsulates the choice of which HBCK2 method to call.
+ */
+ private abstract static class HBCKSCPScheduler {
+ abstract long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException;
+
+ @Override
+ public String toString() {
+ return this.getClass().getSimpleName();
+ }
+ }
+
+ /**
+ * Invokes {@code MasterRpcServices#scheduleServerCrashProcedure}.
+ */
+ private static class ScheduleServerCrashProcedure extends HBCKSCPScheduler {
+ @Override
+ public long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException {
+ MasterProtos.ScheduleServerCrashProcedureResponse response = master.getMasterRpcServices()
+ .scheduleServerCrashProcedure(null, MasterProtos.ScheduleServerCrashProcedureRequest
+ .newBuilder().addServerName(ProtobufUtil.toServerName(rsServerName)).build());
+ assertEquals(1, response.getPidCount());
+ return response.getPid(0);
+ }
+ }
+
+ /**
+ * Invokes {@code MasterRpcServices#scheduleSCPsForUnknownServers}.
+ */
+ private static class ScheduleSCPsForUnknownServers extends HBCKSCPScheduler {
+ @Override
+ long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException {
+ MasterProtos.ScheduleSCPsForUnknownServersResponse response =
+ master.getMasterRpcServices().scheduleSCPsForUnknownServers(null,
+ MasterProtos.ScheduleSCPsForUnknownServersRequest.newBuilder().build());
+ assertEquals(1, response.getPidCount());
+ return response.getPid(0);
+ }
+ }
+
+ /**
+ * Encapsulates how the target region is selected.
+ */
+ private static abstract class RegionSelector {
+ abstract boolean regionFilter(RegionInfo info);
+
+ abstract Exception regionFilterFailure();
+
+ @Override
+ public String toString() {
+ return this.getClass().getSimpleName();
+ }
+ }
+
+ /**
+ * Selects a non-meta region that is also a primary region.
+ */
+ private static class PrimaryNotMetaRegionSelector extends RegionSelector {
+ @Override
+ boolean regionFilter(final RegionInfo info) {
+ return !Objects.equals(TableName.META_TABLE_NAME, info.getTable())
+ && Objects.equals(RegionInfo.DEFAULT_REPLICA_ID, info.getReplicaId());
+ }
+
+ @Override
+ Exception regionFilterFailure() {
+ return new NoSuchElementException("Cannot locate a primary, non-meta region.");
+ }
+ }
+
+ /**
+ * Selects a non-meta region that is also a replica region.
+ */
+ private static class ReplicaNonMetaRegionSelector extends RegionSelector {
+ @Override
+ boolean regionFilter(RegionInfo info) {
+ return !Objects.equals(TableName.META_TABLE_NAME, info.getTable())
+ && !Objects.equals(RegionInfo.DEFAULT_REPLICA_ID, info.getReplicaId());
+ }
+
+ @Override
+ Exception regionFilterFailure() {
+ return new NoSuchElementException("Cannot locate a replica, non-meta region.");
+ }
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java
deleted file mode 100644
index 10c7a6ec7551..000000000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.procedure;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.junit.ClassRule;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-
-/**
- * Test of the HBCK-version of SCP. The HBCKSCP is an SCP only it reads hbase:meta for list of
- * Regions that were on the server-to-process rather than consult Master in-memory-state.
- */
-@Category({ MasterTests.class, LargeTests.class })
-public class TestHBCKSCPUnknown extends TestHBCKSCP {
-
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestHBCKSCPUnknown.class);
-
- @Override
- protected long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException {
- MasterProtos.ScheduleSCPsForUnknownServersResponse response =
- master.getMasterRpcServices().scheduleSCPsForUnknownServers(null,
- MasterProtos.ScheduleSCPsForUnknownServersRequest.newBuilder().build());
- assertEquals(1, response.getPidCount());
- long pid = response.getPid(0);
- return pid;
- }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 9b421cb93b4c..2f4bfbe7dbe8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -224,7 +224,7 @@ public void testCleanMergeReference() throws Exception {
RegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(tableName);
Result mergedRegionResult =
- MetaTableAccessor.getRegionResult(MASTER.getConnection(), mergedRegionInfo.getRegionName());
+ MetaTableAccessor.getRegionResult(MASTER.getConnection(), mergedRegionInfo);
// contains merge reference in META
assertTrue(MetaTableAccessor.hasMergeRegions(mergedRegionResult.rawCells()));
@@ -299,8 +299,8 @@ public void testCleanMergeReference() throws Exception {
// Wait around a bit to give stuff a chance to complete.
while (true) {
- mergedRegionResult = MetaTableAccessor.getRegionResult(TEST_UTIL.getConnection(),
- mergedRegionInfo.getRegionName());
+ mergedRegionResult =
+ MetaTableAccessor.getRegionResult(TEST_UTIL.getConnection(), mergedRegionInfo);
if (MetaTableAccessor.hasMergeRegions(mergedRegionResult.rawCells())) {
LOG.info("Waiting on cleanup of merge columns {}",
Arrays.asList(mergedRegionResult.rawCells()).stream().map(c -> c.toString())