diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index 0c94a20e3586..eff6ecb05dba 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -128,7 +128,6 @@ protected List balanceTable(TableName tableName, public void testBulkAssignment() throws Exception { List tmp = getListOfServerNames(randomServers(5, 0)); List hris = randomRegions(20); - hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO); tmp.add(master); Map> plans = loadBalancer.roundRobinAssignment(hris, tmp); int totalRegion = 0; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 0aa301c4c8cd..e2acd5e333d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -110,14 +110,6 @@ private static int checkReplicaId(int regionId) { return regionId; } - /** - * Package private constructor used constructing MutableRegionInfo for the first meta regions - */ - MutableRegionInfo(long regionId, TableName tableName, int replicaId) { - this(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, - replicaId, false); - } - MutableRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, final boolean split, final long regionId, final int replicaId, boolean offLine) { this.tableName = checkTableName(tableName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index e691bb792a21..fc3da7473ad0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2426,13 +2426,6 @@ private CompletableFuture getRegionInfo(byte[] regionNameOrEncodedRe return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - if (Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || - Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { - return CompletableFuture.completedFuture(RegionInfoBuilder.FIRST_META_REGIONINFO); - } - CompletableFuture future = new CompletableFuture<>(); addListener(getRegionLocation(regionNameOrEncodedRegionName), (location, err) -> { if (err != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 55a91db93979..f27f3ee355ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -73,8 +73,9 @@ public interface RegionInfo extends Comparable { @Deprecated @InterfaceAudience.Private // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896 - RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), - RegionInfo.DEFAULT_REPLICA_ID); + RegionInfo UNDEFINED = + new MutableRegionInfo(TableName.valueOf("__UNDEFINED__"), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, 0, RegionInfo.DEFAULT_REPLICA_ID, false); /** * Separator used to demarcate the encodedName in a region name diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index cc42b96fb165..03434f3036b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -34,18 +34,6 @@ public class RegionInfoBuilder { public static final RegionInfo UNDEFINED = RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build(); - /** - * RegionInfo for first meta region - * You cannot use this builder to make an instance of the {@link #FIRST_META_REGIONINFO}. - * Just refer to this instance. Also, while the instance is actually a MutableRI, its type is - * just RI so the mutable methods are not available (unless you go casting); it appears - * as immutable (I tried adding Immutable type but it just makes a mess). - */ - // TODO: How come Meta regions still do not have encoded region names? Fix. - // hbase:meta,,1.1588230740 should be the hbase:meta first region name. - public static final RegionInfo FIRST_META_REGIONINFO = - new MutableRegionInfo(1L, TableName.META_TABLE_NAME, RegionInfo.DEFAULT_REPLICA_ID); - private final TableName tableName; private byte[] startKey = HConstants.EMPTY_START_ROW; private byte[] endKey = HConstants.EMPTY_END_ROW; @@ -111,5 +99,4 @@ public RegionInfo build() { return new MutableRegionInfo(tableName, startKey, endKey, split, regionId, replicaId, offLine); } - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index bf93776f5577..c79e9d677c7d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -18,9 +18,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; -import static org.apache.hadoop.hbase.client.RegionInfoBuilder.FIRST_META_REGIONINFO; -import static org.apache.hadoop.hbase.client.RegionReplicaUtil.getRegionInfoForDefaultReplica; -import static org.apache.hadoop.hbase.client.RegionReplicaUtil.getRegionInfoForReplica; import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.lengthOfPBMagic; import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; @@ -36,6 +33,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Pair; @@ -161,7 +159,8 @@ private void getMetaRegionLocation(CompletableFuture future, LOG.warn("Meta region is in state " + stateAndServerName.getFirst()); } locs[DEFAULT_REPLICA_ID] = new HRegionLocation( - getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build(), + stateAndServerName.getSecond()); tryComplete(remaining, locs, future); }); } else { @@ -183,8 +182,8 @@ private void getMetaRegionLocation(CompletableFuture future, locs[replicaId] = null; } else { locs[replicaId] = - new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), - stateAndServerName.getSecond()); + new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setRegionId(1).setReplicaId(replicaId).build(), stateAndServerName.getSecond()); } } tryComplete(remaining, locs, future); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b6918ca23a62..138b469cc35a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLoadStats; import org.apache.hadoop.hbase.client.RegionLocateType; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; @@ -3237,8 +3236,8 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic if (serverName == null) { state = RegionState.State.OFFLINE; } - return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); + return new RegionState(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1) + .setReplicaId(replicaId).build(), state, serverName); } /** @@ -3354,9 +3353,6 @@ public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBase long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId; - if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { - return RegionInfoBuilder.FIRST_META_REGIONINFO; - } byte[] startKey = null; byte[] endKey = null; if (proto.hasStartKey()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index 180d29479c7d..55084c0ae677 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -130,7 +130,7 @@ public static void setUpBeforeClass() { @Before public void setUp() throws IOException { - RegionInfo metaRegionInfo = RegionInfoBuilder.FIRST_META_REGIONINFO; + RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); locs = new RegionLocations( new HRegionLocation(metaRegionInfo, ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())), diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index 3b66f7eb2e60..1c3af9e8ab47 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -84,10 +84,10 @@ public void testBuilder() { @Test public void testPb() throws DeserializationException { - RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; + RegionInfo ri = RegionInfoBuilder.newBuilder(name.getTableName()).build(); byte[] bytes = RegionInfo.toByteArray(ri); RegionInfo pbri = RegionInfo.parseFrom(bytes); - assertTrue(RegionInfo.COMPARATOR.compare(ri, pbri) == 0); + assertEquals(0, RegionInfo.COMPARATOR.compare(ri, pbri)); } @Test @@ -183,7 +183,7 @@ public void testLastRegionCompare() { @Test public void testMetaTables() { - assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isMetaRegion()); + assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isMetaRegion()); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 45da1e8560df..46107d126017 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -30,6 +30,8 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; @@ -106,13 +108,12 @@ public void testWrapConnectionException() throws Exception { if (exception instanceof TimeoutException) { assertThat(IPCUtil.wrapException(addr, null, exception), instanceOf(TimeoutIOException.class)); } else { - IOException ioe = IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, - exception); + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + IOException ioe = IPCUtil.wrapException(addr, ri, exception); // Assert that the exception contains the Region name if supplied. HBASE-25735. // Not all exceptions get the region stuffed into it. if (ioe.getMessage() != null) { - assertTrue(ioe.getMessage(). - contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); + assertTrue(ioe.getMessage().contains(ri.getRegionNameAsString())); } assertThat(ioe, instanceOf(exception.getClass())); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index 9fb7db9666ce..885729cff806 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -306,6 +306,19 @@ public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { return false; } + @Override + public ServerName getServerHoldingMeta() throws IOException { + HRegionLocation regionLoc = null; + try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + regionLoc = locator.getRegionLocation(HConstants.EMPTY_START_ROW, true); + } + if (regionLoc == null) { + LOG.warn("Cannot find region server holding first meta region"); + return null; + } + return regionLoc.getServerName(); + } + @Override public ServerName getServerHoldingRegion(TableName tn, byte[] regionName) throws IOException { byte[] startKey = RegionInfo.getStartKey(regionName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 171966f41876..4e849604f350 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -89,8 +89,8 @@ import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocateType; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -816,13 +816,16 @@ private void tryMigrateRootTableFromZooKeeper() throws IOException, KeeperExcept } } // start migrating - byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); - Put put = new Put(row); + Put put = null; List metaReplicaNodes = zooKeeper.getMetaReplicaNodes(); StringBuilder info = new StringBuilder("Migrating meta location:"); for (String metaReplicaNode : metaReplicaNodes) { int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode); RegionState state = getMetaRegionState(zooKeeper, replicaId); + if (put == null) { + byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(state.getRegion()); + put = new Put(row); + } info.append(" ").append(state); put.setTimestamp(state.getStamp()); MetaTableAccessor.addRegionInfo(put, state.getRegion()); @@ -834,9 +837,10 @@ private void tryMigrateRootTableFromZooKeeper() throws IOException, KeeperExcept .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp()) .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build()); } - if (!put.isEmpty()) { + if (put != null) { LOG.info(info.toString()); - masterRegion.update(r -> r.put(put)); + final Put p = put; + masterRegion.update(r -> r.put(p)); } else { LOG.info("No meta location avaiable on zookeeper, skip migrating..."); } @@ -1276,11 +1280,14 @@ && getMasterProcedureExecutor().isRunning() && tries > 0) { /** * Check hbase:meta is up and ready for reading. For use during Master startup only. * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online - * and we will hold here until operator intervention. + * and we will hold here until operator intervention. */ @InterfaceAudience.Private - public boolean waitForMetaOnline() { - return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO); + public boolean waitForMetaOnline() throws InterruptedException { + Optional firstMetaRegion = + this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).stream() + .filter(RegionInfo::isFirst).filter(RegionReplicaUtil::isDefaultReplica).findFirst(); + return firstMetaRegion.isPresent() ? isRegionOnline(firstMetaRegion.get()) : false; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 73b6aa6c5e7d..1786afe5a02e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -45,8 +45,6 @@ import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -153,7 +151,6 @@ public class AssignmentManager { private static final int DEFAULT_RIT_STUCK_WARNING_THRESHOLD = 60 * 1000; public static final String UNEXPECTED_STATE_REGION = "Unexpected state for "; - private final ProcedureEvent metaAssignEvent = new ProcedureEvent<>("meta assign"); private final ProcedureEvent metaLoadEvent = new ProcedureEvent<>("meta load"); private final MetricsAssignmentManager metrics; @@ -279,10 +276,6 @@ public void start() throws IOException, KeeperException { if (regionLocation != null) { regionStates.addRegionToServer(regionNode); } - if (RegionReplicaUtil.isDefaultReplica(regionInfo)) { - setMetaAssigned(regionInfo, state == State.OPEN); - } - if (regionInfo.isFirst()) { // for compatibility, mirror the meta region state to zookeeper try { @@ -360,9 +353,6 @@ public void stop() { // Update meta events (for testing) if (hasProcExecutor) { metaLoadEvent.suspend(); - for (RegionInfo hri: getMetaRegionSet()) { - setMetaAssigned(hri, false); - } } } @@ -424,6 +414,14 @@ public List getRegionsOnServer(ServerName serverName) { return serverInfo.getRegionInfoList(); } + public List getDefaultMetaRegionsOnServer(ServerName serverName) { + ServerStateNode serverInfo = regionStates.getServerNode(serverName); + if (serverInfo == null) { + return Collections.emptyList(); + } + return serverInfo.getDefaultMetaRegionInfoList(); + } + public RegionStateStore getRegionStateStore() { return regionStateStore; } @@ -453,95 +451,22 @@ private boolean isTableDisabled(final TableName tableName) { // ============================================================================================ // META Helpers // ============================================================================================ - private boolean isMetaRegion(final RegionInfo regionInfo) { - return regionInfo.isMetaRegion(); - } - - public boolean isMetaRegion(final byte[] regionName) { - return getMetaRegionFromName(regionName) != null; - } - - public RegionInfo getMetaRegionFromName(final byte[] regionName) { - for (RegionInfo hri: getMetaRegionSet()) { - if (Bytes.equals(hri.getRegionName(), regionName)) { - return hri; - } - } - return null; - } - - public boolean isCarryingMeta(final ServerName serverName) { - // TODO: handle multiple meta - return isCarryingRegion(serverName, RegionInfoBuilder.FIRST_META_REGIONINFO); - } - - private boolean isCarryingRegion(final ServerName serverName, final RegionInfo regionInfo) { - // TODO: check for state? - final RegionStateNode node = regionStates.getRegionStateNode(regionInfo); - return(node != null && serverName.equals(node.getRegionLocation())); - } - - private RegionInfo getMetaForRegion(final RegionInfo regionInfo) { - //if (regionInfo.isMetaRegion()) return regionInfo; - // TODO: handle multiple meta. if the region provided is not meta lookup - // which meta the region belongs to. - return RegionInfoBuilder.FIRST_META_REGIONINFO; - } - - // TODO: handle multiple meta. - private static final Set META_REGION_SET = - Collections.singleton(RegionInfoBuilder.FIRST_META_REGIONINFO); - public Set getMetaRegionSet() { - return META_REGION_SET; + public boolean isCarryingMeta(ServerName serverName) { + return regionStates.getTableRegionStateNodes(TableName.META_TABLE_NAME).stream() + .map(RegionStateNode::getRegionLocation).anyMatch(serverName::equals); } // ============================================================================================ // META Event(s) helpers // ============================================================================================ - /** - * Notice that, this only means the meta region is available on a RS, but the AM may still be - * loading the region states from meta, so usually you need to check {@link #isMetaLoaded()} first - * before checking this method, unless you can make sure that your piece of code can only be - * executed after AM builds the region states. - * @see #isMetaLoaded() - */ - public boolean isMetaAssigned() { - return metaAssignEvent.isReady(); - } - public boolean isMetaRegionInTransition() { - return !isMetaAssigned(); - } - - /** - * Notice that this event does not mean the AM has already finished region state rebuilding. See - * the comment of {@link #isMetaAssigned()} for more details. - * @see #isMetaAssigned() - */ - public boolean waitMetaAssigned(Procedure proc, RegionInfo regionInfo) { - return getMetaAssignEvent(getMetaForRegion(regionInfo)).suspendIfNotReady(proc); - } - - private void setMetaAssigned(RegionInfo metaRegionInfo, boolean assigned) { - assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo; - ProcedureEvent metaAssignEvent = getMetaAssignEvent(metaRegionInfo); - if (assigned) { - metaAssignEvent.wake(getProcedureScheduler()); - } else { - metaAssignEvent.suspend(); - } - } - - private ProcedureEvent getMetaAssignEvent(RegionInfo metaRegionInfo) { - assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo; - // TODO: handle multiple meta. - return metaAssignEvent; + return regionStates.getRegionsInTransition().stream().map(RegionStateNode::getRegionInfo) + .anyMatch(RegionInfo::isMetaRegion); } /** * Wait until AM finishes the meta loading, i.e, the region states rebuilding. * @see #isMetaLoaded() - * @see #waitMetaAssigned(Procedure, RegionInfo) */ public boolean waitMetaLoaded(Procedure proc) { return metaLoadEvent.suspendIfNotReady(proc); @@ -562,7 +487,6 @@ public void wakeMetaLoadedEvent() { /** * Return whether AM finishes the meta loading, i.e, the region states rebuilding. - * @see #isMetaAssigned() * @see #waitMetaLoaded(Procedure) */ public boolean isMetaLoaded() { @@ -1695,7 +1619,7 @@ private void checkMetaLoaded(RegionInfo hri) throws PleaseHoldException { if (!isRunning()) { throw new PleaseHoldException("AssignmentManager not running"); } - boolean meta = isMetaRegion(hri); + boolean meta = hri.isMetaRegion(); boolean metaLoaded = isMetaLoaded(); if (!meta && !metaLoaded) { throw new PleaseHoldException( @@ -1927,12 +1851,6 @@ void regionFailedOpen(RegionStateNode regionNode, boolean giveUp) throws IOExcep // should be called under the RegionStateNode lock void regionClosing(RegionStateNode regionNode) throws IOException { transitStateAndUpdate(regionNode, State.CLOSING, STATES_EXPECTED_ON_CLOSING); - - RegionInfo hri = regionNode.getRegionInfo(); - // Set meta has not initialized early. so people trying to create/edit tables will wait - if (isMetaRegion(hri)) { - setMetaAssigned(hri, false); - } regionStates.addRegionToServer(regionNode); // update the operation count metrics metrics.incrementOperationCounter(); @@ -1988,14 +1906,6 @@ public void regionClosedAbnormally(RegionStateNode regionNode) throws IOExceptio void persistToMeta(RegionStateNode regionNode) throws IOException { regionStateStore.updateRegionLocation(regionNode); - RegionInfo regionInfo = regionNode.getRegionInfo(); - if (isMetaRegion(regionInfo) && regionNode.getState() == State.OPEN) { - // Usually we'd set a table ENABLED at this stage but hbase:meta is ALWAYs enabled, it - // can't be disabled -- so skip the RPC (besides... enabled is managed by TableStateManager - // which is backed by hbase:meta... Avoid setting ENABLED to avoid having to update state - // on table that contains state. - setMetaAssigned(regionInfo, true); - } } // ============================================================================================ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index 805b51caebec..8eebc4e40014 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -148,13 +149,7 @@ public TableName getTableName() { @Override protected boolean waitInitialized(MasterProcedureEnv env) { - if (TableName.isMetaTableName(getTableName())) { - return false; - } - // First we need meta to be loaded, and second, if meta is not online then we will likely to - // fail when updating meta so we wait until it is assigned. - AssignmentManager am = env.getAssignmentManager(); - return am.waitMetaLoaded(this) || am.waitMetaAssigned(this, region); + return MasterProcedureUtil.waitInitialized(this, env, getTableName()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java index 33f6b1a07d84..30feab432f82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; /** @@ -73,6 +74,11 @@ public List getRegionInfoList() { return regions.stream().map(RegionStateNode::getRegionInfo).collect(Collectors.toList()); } + public List getDefaultMetaRegionInfoList() { + return regions.stream().map(RegionStateNode::getRegionInfo).filter(RegionInfo::isMetaRegion) + .filter(RegionReplicaUtil::isDefaultReplica).collect(Collectors.toList()); + } + public List getSystemRegionInfoList() { return regions.stream().filter(RegionStateNode::isSystemTable) .map(RegionStateNode::getRegionInfo).collect(Collectors.toList()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index 8ca1ee482e81..69b48aa117e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -21,7 +21,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RetriesExhaustedException; @@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; @@ -163,13 +163,7 @@ public TableOperationType getTableOperationType() { @Override protected boolean waitInitialized(MasterProcedureEnv env) { - if (TableName.isMetaTableName(getTableName())) { - return false; - } - // First we need meta to be loaded, and second, if meta is not online then we will likely to - // fail when updating meta so we wait until it is assigned. - AssignmentManager am = env.getAssignmentManager(); - return am.waitMetaLoaded(this) || am.waitMetaAssigned(this, getRegion()); + return MasterProcedureUtil.waitInitialized(this, env, getTableName()); } private void queueAssign(MasterProcedureEnv env, RegionStateNode regionNode) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java index 3d00e491b9b4..caf68589a49f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java @@ -26,8 +26,11 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionLocateType; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl; @@ -81,9 +84,13 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) tmpl.render(response.getWriter(), master); } - private ServerName getMetaLocationOrNull(HMaster master) { - return master.getAssignmentManager().getRegionStates() - .getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO).getServerName(); + private ServerName getMetaLocationOrNull(HMaster master) throws IOException { + RegionLocations locs = master.locateMeta(HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT); + if (locs == null) { + return null; + } + HRegionLocation loc = locs.getDefaultRegionLocation(); + return loc != null ? loc.getServerName() : null; } private Map getFragmentationInfo( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java index 725a138ca91d..28e1901baa90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java @@ -84,7 +84,7 @@ public HBCKServerCrashProcedure() {} @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH_EXCEPTION", justification="FindBugs seems confused on ps in below.") - List getRegionsOnCrashedServer(MasterProcedureEnv env) { + protected List getRegionsOnCrashedServer(MasterProcedureEnv env) { // Super will return an immutable list (empty if nothing on this server). List ris = super.getRegionsOnCrashedServer(env); if (!ris.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index e92fc110aba2..05c5b8537ba0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; @@ -57,6 +58,14 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure + * Setting region id to 1 is for keeping compatible with old clients. + */ + private static final RegionInfo BOOTSTRAP_META_REGIONINFO = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build(); + private CountDownLatch latch = new CountDownLatch(1); private RetryCounter retryCounter; @@ -85,7 +94,7 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) t TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir); HRegion - .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null) + .createHRegion(BOOTSTRAP_META_REGIONINFO, rootDir, conf, metaDescriptor, null) .close(); return metaDescriptor; } @@ -106,7 +115,7 @@ protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state) case INIT_META_ASSIGN_META: LOG.info("Going to assign meta"); addChildProcedure(env.getAssignmentManager() - .createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO))); + .createAssignProcedures(Arrays.asList(BOOTSTRAP_META_REGIONINFO))); setNextState(InitMetaState.INIT_META_CREATE_NAMESPACES); return Flow.HAS_MORE_STATE; case INIT_META_CREATE_NAMESPACES: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java index c6e77fd4af2f..0766b5c7caea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureException; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; @@ -241,4 +242,14 @@ public static Optional getNamespaceGroup(NamespaceDescriptor namespaceDe return Optional .ofNullable(namespaceDesc.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)); } + + public static boolean waitInitialized(Procedure proc, MasterProcedureEnv env, + TableName tableName) { + if (TableName.isMetaTableName(tableName)) { + return false; + } + // we need meta to be loaded + AssignmentManager am = env.getAssignmentManager(); + return am.waitMetaLoaded(proc); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 49449e397f01..3722391b8347 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -20,12 +20,10 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterServices; @@ -184,7 +182,9 @@ protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) } break; case SERVER_CRASH_ASSIGN_META: - assignRegions(env, Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO)); + // notice that, here we will only assign the primary meta regions, secondary meta replicas + // will be assigned below + assignRegions(env, env.getAssignmentManager().getDefaultMetaRegionsOnServer(serverName)); setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS); break; case SERVER_CRASH_GET_REGIONS: @@ -263,7 +263,7 @@ protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) /** * @return List of Regions on crashed server. */ - List getRegionsOnCrashedServer(MasterProcedureEnv env) { + protected List getRegionsOnCrashedServer(MasterProcedureEnv env) { return env.getMasterServices().getAssignmentManager().getRegionsOnServer(serverName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ae602a61b7a1..bd0b423d8bbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -92,7 +92,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.locking.EntityLock; import org.apache.hadoop.hbase.client.locking.LockServiceClient; import org.apache.hadoop.hbase.conf.ConfigurationManager; @@ -1232,7 +1232,8 @@ public void run() { } private boolean containsMetaTableRegions() { - return onlineRegions.containsKey(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + return onlineRegions.values().stream().map(Region::getRegionInfo) + .anyMatch(ri -> ri.isMetaRegion() && RegionReplicaUtil.isDefaultReplica(ri)); } private boolean areAllUserRegionsOffline() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index b32d4979c480..d87611785cdb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.HFileLink; @@ -392,7 +391,7 @@ public static void checkVersion(FileSystem fs, Path rootdir, String version = getVersion(fs, rootdir); String msg; if (version == null) { - if (!metaRegionExists(fs, rootdir)) { + if (!metaTableExists(fs, rootdir)) { // rootDir is empty (no version file and no root region) // just create new version file (HBASE-1195) setVersion(fs, rootdir, wait, retries); @@ -693,14 +692,14 @@ public static void waitOnSafeMode(final Configuration conf, } /** - * Checks if meta region exists + * Checks if meta table exists * @param fs file system * @param rootDir root directory of HBase installation * @return true if exists */ - public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOException { - Path metaRegionDir = getRegionDirFromRootDir(rootDir, RegionInfoBuilder.FIRST_META_REGIONINFO); - return fs.exists(metaRegionDir); + private static boolean metaTableExists(FileSystem fs, Path rootDir) throws IOException { + Path metaTableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); + return fs.exists(metaTableDir); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 28f0d5eb887b..343ddcc00252 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -2717,19 +2717,10 @@ private void unassignMetaReplica(HbckRegionInfo hi) zkw.getZNodePaths().getZNodeForReplica(hi.getMetaEntry().getRegionInfo().getReplicaId())); } - private void assignMetaReplica(int replicaId) - throws IOException, KeeperException, InterruptedException { - errors.reportError(ERROR_CODE.NO_META_REGION, "hbase:meta, replicaId " + - replicaId +" is not found on any region."); - if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta.."); - setShouldRerun(); - // try to fix it (treat it as unassigned region) - RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); - HBaseFsckRepair.fixUnassigned(admin, h); - HBaseFsckRepair.waitUntilAssigned(admin, h); - } + private void assignMetaReplica(int replicaId) { + errors.reportError(ERROR_CODE.NO_META_REGION, + "hbase:meta, replicaId " + replicaId + " is not found on any region."); + throw new UnsupportedOperationException("fix meta region is not allowed"); } /** diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 29913b52b614..df631c06a8aa 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -312,7 +312,7 @@ // NOTE: Presumes meta with one or more replicas for (int j = 0; j < numMetaReplicas; j++) { RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, j); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), j); RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta); // If a metaLocation is null, All of its info would be empty here to be displayed. ServerName metaLocation = regionState != null ? regionState.getServerName() : null; @@ -380,7 +380,7 @@ // NOTE: Presumes meta with one or more replicas for (int j = 0; j < numMetaReplicas; j++) { RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, j); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), j); RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta); // If a metaLocation is null, All of its info would be empty here to be displayed. ServerName metaLocation = regionState != null ? regionState.getServerName() : null; @@ -431,9 +431,8 @@ // NOTE: Presumes meta with one or more replicas for (int j = 0; j < numMetaReplicas; j++) { RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, j); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), j); RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta); - // If a metaLocation is null, All of its info would be empty here to be displayed. ServerName metaLocation = regionState != null ? regionState.getServerName() : null; for (int i = 0; i < 1; i++) { //If metaLocation is null, default value below would be displayed in UI. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java index 0584be85e72b..ece5b5f5a23c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java @@ -338,10 +338,7 @@ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOExce /** * Get the ServerName of region server serving the first hbase:meta region */ - public ServerName getServerHoldingMeta() throws IOException { - return getServerHoldingRegion(TableName.META_TABLE_NAME, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); - } + public abstract ServerName getServerHoldingMeta() throws IOException; /** * Get the ServerName of region server serving the specified region diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java index a405f7b24a93..7b44c2734dee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java @@ -26,7 +26,8 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; @@ -775,11 +776,24 @@ public List getRegions(TableName tableName) { } /** - * Returns index into List of {@link SingleProcessHBaseCluster#getRegionServerThreads()} of HRS - * carrying regionName. Returns -1 if none found. + * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} + * of HRS carrying regionName. Returns -1 if none found. */ public int getServerWithMeta() { - return getServerWith(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); + int index = 0; + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { + HRegionServer hrs = rst.getRegionServer(); + if (!hrs.isStopped()) { + for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) { + RegionInfo ri = region.getRegionInfo(); + if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) { + return index; + } + } + } + index++; + } + return -1; } /** @@ -803,6 +817,22 @@ public int getServerWith(byte[] regionName) { return -1; } + @Override + public ServerName getServerHoldingMeta() throws IOException { + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { + HRegionServer hrs = rst.getRegionServer(); + if (!hrs.isStopped()) { + for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) { + RegionInfo ri = region.getRegionInfo(); + if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) { + return hrs.getServerName(); + } + } + } + } + return null; + } + @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java index 6aef56b2ca1e..ee116432ced4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java @@ -25,7 +25,6 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.Waiter.Predicate; @@ -37,7 +36,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -395,11 +393,12 @@ public void testRegionStatesWithSplit() throws Exception { private RegionMetrics getMetaMetrics() throws IOException { for (ServerMetrics serverMetrics : ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().values()) { - RegionMetrics metaMetrics = serverMetrics.getRegionMetrics() - .get(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); - if (metaMetrics != null) { - return metaMetrics; + .getLiveServerMetrics().values()) { + for (RegionMetrics metrics : serverMetrics.getRegionMetrics().values()) { + if (CatalogFamilyFormat.parseRegionInfoFromRegionName(metrics.getRegionName()) + .isMetaRegion()) { + return metrics; + } } } Assert.fail("Should have find meta metrics"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java index fb3ea93fe5b3..448757e15c41 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -116,11 +115,12 @@ public void testEditMeta() throws IOException { String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration(). get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING); assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString()); - Region r = UTIL.getHBaseCluster().getRegionServer(0). - getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + Region r = + UTIL.getHBaseCluster().getRegionServer(0).getRegions(TableName.META_TABLE_NAME).get(0); assertEquals(oldVersions + 1, - r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions()); - encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor(). + r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions()); + encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor() + . getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING); assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString()); assertTrue(r.getStore(extraColumnFamilyName) != null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java index 038ced66d8e6..9f1e54d80432 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java @@ -22,16 +22,21 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertTrue; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ MiscTests.class, SmallTests.class }) public class TestHRegionLocation { + private static final Logger LOG = LoggerFactory.getLogger(TestHRegionLocation.class); + @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHRegionLocation.class); @@ -43,17 +48,18 @@ public class TestHRegionLocation { @Test public void testHashAndEqualsCode() { ServerName hsa1 = ServerName.valueOf("localhost", 1234, -1L); - HRegionLocation hrl1 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1); - HRegionLocation hrl2 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1); + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + HRegionLocation hrl1 = new HRegionLocation(ri, hsa1); + HRegionLocation hrl2 = new HRegionLocation(ri, hsa1); assertEquals(hrl1.hashCode(), hrl2.hashCode()); assertTrue(hrl1.equals(hrl2)); - HRegionLocation hrl3 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1); + HRegionLocation hrl3 = new HRegionLocation(ri, hsa1); assertNotSame(hrl1, hrl3); // They are equal because they have same location even though they are // carrying different regions or timestamp. assertTrue(hrl1.equals(hrl3)); ServerName hsa2 = ServerName.valueOf("localhost", 12345, -1L); - HRegionLocation hrl4 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa2); + HRegionLocation hrl4 = new HRegionLocation(ri, hsa2); // These have same HRI but different locations so should be different. assertFalse(hrl3.equals(hrl4)); HRegionLocation hrl5 = @@ -64,17 +70,19 @@ public void testHashAndEqualsCode() { @Test public void testToString() { ServerName hsa1 = ServerName.valueOf("localhost", 1234, -1L); - HRegionLocation hrl1 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1); - System.out.println(hrl1.toString()); + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + HRegionLocation hrl1 = new HRegionLocation(ri, hsa1); + LOG.info(hrl1.toString()); } @SuppressWarnings("SelfComparison") @Test public void testCompareTo() { + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); ServerName hsa1 = ServerName.valueOf("localhost", 1234, -1L); - HRegionLocation hsl1 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1); + HRegionLocation hsl1 = new HRegionLocation(ri, hsa1); ServerName hsa2 = ServerName.valueOf("localhost", 1235, -1L); - HRegionLocation hsl2 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa2); + HRegionLocation hsl2 = new HRegionLocation(ri, hsa2); assertEquals(0, hsl1.compareTo(hsl1)); assertEquals(0, hsl2.compareTo(hsl2)); int compare1 = hsl1.compareTo(hsl2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 3b0fbe88aaa9..899e41765a23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; @@ -65,16 +66,18 @@ protected static void startCluster() throws Exception { HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); Set sns = new HashSet(); + RegionInfo metaRegionInfo; ServerName hbaseMetaServerName; try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { - hbaseMetaServerName = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getServerName(); + HRegionLocation loc = locator.getRegionLocation(HConstants.EMPTY_START_ROW); + metaRegionInfo = loc.getRegion(); + hbaseMetaServerName = loc.getServerName(); } LOG.info("HBASE:META DEPLOY: on " + hbaseMetaServerName); sns.add(hbaseMetaServerName); for (int replicaId = 1; replicaId < 3; replicaId++) { - RegionInfo h = RegionReplicaUtil - .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); + RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, replicaId); AssignmentTestingUtil.waitForAssignment(am, h); ServerName sn = am.getRegionStates().getRegionServerOfRegion(h); assertNotNull(sn); @@ -98,8 +101,7 @@ protected static void startCluster() throws Exception { ServerName metaServerName = TEST_UTIL.getHBaseCluster().getRegionServer(metaServerIndex).getServerName(); assertNotEquals(destinationServerName, metaServerName); - TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), - destinationServerName); + TEST_UTIL.getAdmin().move(metaRegionInfo.getEncodedNameAsBytes(), destinationServerName); } // Disable the balancer LoadBalancerTracker l = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index f96770910541..fca03949344f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -704,7 +704,7 @@ public void testGetRegionInfo() throws Exception { testGetWithRegionName(sn, ri, ri.getEncodedNameAsBytes()); testGetWithRegionName(sn, ri, ri.getRegionName()); // Try querying meta encoded name. - ri = RegionInfoBuilder.FIRST_META_REGIONINFO; + ri = ADMIN.getRegions(TableName.META_TABLE_NAME).get(0); testGetWithRegionName(sn, ri, ri.getEncodedNameAsBytes()); testGetWithRegionName(sn, ri, ri.getRegionName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java index ca11ea69b966..377f30fdd8dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java @@ -204,7 +204,8 @@ public String explainFailure() throws Exception { .map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer)) .findAny().get(); LOG.info("====== Moving meta from {} to {} ======", metaServer, newMetaServer); - admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), newMetaServer); + RegionInfo meta = admin.getRegions(TableName.META_TABLE_NAME).get(0); + admin.move(meta.getEncodedNameAsBytes(), newMetaServer); LOG.info("====== Move meta done ======"); Thread.sleep(5000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java index fde362c916d5..4409d29ffad8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.StartTestingClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -81,9 +82,9 @@ public void testFailedReplicaAssignment() throws InterruptedException { TEST_UTIL.waitFor(30000, () -> master.isInitialized()); AssignmentManager am = master.getAssignmentManager(); + RegionInfo metaHri = am.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).get(0); // showing one of the replicas got assigned - RegionInfo metaReplicaHri = - RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 1); + RegionInfo metaReplicaHri = RegionReplicaUtil.getRegionInfoForReplica(metaHri, 1); // we use assignAsync so we need to wait a bit TEST_UTIL.waitFor(30000, () -> { RegionStateNode metaReplicaRegionNode = @@ -91,8 +92,7 @@ public void testFailedReplicaAssignment() throws InterruptedException { return metaReplicaRegionNode.getRegionLocation() != null; }); // showing one of the replicas failed to be assigned - RegionInfo metaReplicaHri2 = - RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 2); + RegionInfo metaReplicaHri2 = RegionReplicaUtil.getRegionInfoForReplica(metaHri, 2); RegionStateNode metaReplicaRegionNode2 = am.getRegionStates().getOrCreateRegionStateNode(metaReplicaHri2); // wait for several seconds to make sure that it is not assigned diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java index fe105848c865..f2bf5c9e3e5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java @@ -77,8 +77,8 @@ public void testMetaAddressChange() throws Exception { final TableName tableName = name.getTableName(); TEST_UTIL.createTable(tableName, "f"); assertTrue(TEST_UTIL.getAdmin().tableExists(tableName)); - TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), - moveToServer); + RegionInfo metaRegionInfo = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0); + TEST_UTIL.getAdmin().move(metaRegionInfo.getEncodedNameAsBytes(), moveToServer); assertNotEquals(currentServer, moveToServer); LOG.debug("CurrentServer={}, moveToServer={}", currentServer, moveToServer); TEST_UTIL.waitFor(60000, () -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java index e859e72ca83f..283a99c4436a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.After; @@ -80,10 +81,11 @@ public void testGetHRegionInfo() throws IOException { assertTrue(hri == null); // OK, give it what it expects kvs.clear(); + RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, - RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); + RegionInfo.toByteArray(metaRegionInfo))); hri = CatalogFamilyFormat.getRegionInfo(Result.create(kvs)); assertNotNull(hri); - assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0); + assertTrue(RegionInfo.COMPARATOR.compare(hri, metaRegionInfo) == 0); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 24a9c51aa655..372dade59fc2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -177,7 +177,8 @@ public void testMetaRegionMove() throws Exception { break; } } - admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), destServerName); + RegionInfo metaRegion = admin.getRegions(TableName.META_TABLE_NAME).get(0); + admin.move(metaRegion.getEncodedNameAsBytes(), destServerName); LOG.debug("Finished moving meta"); // invalidate client cache RegionInfo region = locator.getRegionLocation(row).getRegion(); @@ -215,6 +216,7 @@ public void testMetaMoveDuringClientZkClusterRestart() throws Exception { Put put = new Put(row); put.addColumn(family, qualifier, value); table.put(put); + RegionInfo metaRegion = admin.getRegions(TableName.META_TABLE_NAME).get(0); // invalid connection cache conn.clearRegionLocationCache(); // stop client zk cluster @@ -230,7 +232,7 @@ public void testMetaMoveDuringClientZkClusterRestart() throws Exception { } // wait for meta region online AssignmentTestingUtil.waitForAssignment(cluster.getMaster().getAssignmentManager(), - RegionInfoBuilder.FIRST_META_REGIONINFO); + metaRegion); // wait some long time to make sure we will retry sync data to client ZK until data set Thread.sleep(10000); clientZkCluster.startup(clientZkDir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java index 734d4e0f913e..ef8bc1b0fb41 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -109,7 +110,7 @@ private RegionCoprocessorHost getRegionCoprocessorHost() { // Make up an HRegion instance. Use the hbase:meta first region as our RegionInfo. Use // hbase:meta table name for building the TableDescriptor our mock returns when asked schema // down inside RegionCoprocessorHost. Pass in mocked RegionServerServices too. - RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); HRegion mockedHRegion = Mockito.mock(HRegion.class); Mockito.when(mockedHRegion.getRegionInfo()).thenReturn(ri); TableDescriptor td = TableDescriptorBuilder.newBuilder(ri.getTable()).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java index 230b5cd7b285..4d7741946cb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -49,7 +50,6 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; @@ -78,6 +78,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -206,7 +207,7 @@ public boolean evaluate() throws Exception { LOG.info("Current Open Regions After Master Node Starts Up:" + HBaseTestingUtil.getAllOnlineRegions(cluster).size()); - assertEquals(numLogLines, TEST_UTIL.countRows(ht)); + assertEquals(numLogLines, HBaseTestingUtil.countRows(ht)); } } @@ -242,7 +243,7 @@ public String explainFailure() throws Exception { TEST_UTIL.waitUntilAllRegionsAssigned(tableName); int rows; try { - rows = TEST_UTIL.countRows(table); + rows = HBaseTestingUtil.countRows(table); } catch (Exception e) { Threads.printThreadInfo(System.out, "Thread dump before fail"); throw e; @@ -414,9 +415,7 @@ public void makeWAL(HRegionServer hrs, List regions, int num_edits, public void makeWAL(HRegionServer hrs, List regions, int numEdits, int editSize, boolean cleanShutdown) throws IOException { - // remove root and meta region - regions.remove(RegionInfoBuilder.FIRST_META_REGIONINFO); - + // remove meta and system regions for (Iterator iter = regions.iterator(); iter.hasNext();) { RegionInfo regionInfo = iter.next(); if (regionInfo.getTable().isSystemTable()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 69a7a79644e2..ddb21f00d9af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -430,10 +430,11 @@ public ClientProtos.MultiResponse multi( } @Override - public GetRegionInfoResponse getRegionInfo(RpcController controller, - GetRegionInfoRequest request) throws ServiceException { + public GetRegionInfoResponse getRegionInfo(RpcController controller, GetRegionInfoRequest request) + throws ServiceException { GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); - builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO)); + builder.setRegionInfo(ProtobufUtil.toRegionInfo( + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build())); return builder.build(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 0f091729c955..3a46d62f59d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -177,7 +177,8 @@ public void testMoveRegionWhenNotInitialized() { HMaster m = cluster.getMaster(); try { m.setInitialized(false); // fake it, set back later - RegionInfo meta = RegionInfoBuilder.FIRST_META_REGIONINFO; + RegionInfo meta = m.getAssignmentManager().getRegionStates() + .getRegionsOfTable(TableName.META_TABLE_NAME).get(0); m.move(meta.getEncodedNameAsBytes(), null); fail("Region should not be moved since master is not initialized"); } catch (IOException ioe) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java index 9f46ca2c46fd..cf2e99a73cf1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java @@ -67,7 +67,7 @@ public void setUp() { public void testRegionInTransition() throws IOException { // Check ReportRegionInTransition HBaseProtos.RegionInfo meta_ri = - ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO); + ProtobufUtil.toRegionInfo(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()); HBaseProtos.RegionInfo normal_ri = ProtobufUtil.toRegionInfo(RegionInfoBuilder.newBuilder(TableName.valueOf("test:table")) .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index 29bafe3f567d..d8d35d8b906e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -28,8 +28,9 @@ import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer; import org.apache.hadoop.hbase.StartTestingClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -71,13 +72,10 @@ public static void tearDownAfterClass() throws Exception { } /** - * This test will test the expire handling of a meta-carrying - * region server. - * After HBaseMiniCluster is up, we will delete the ephemeral - * node of the meta-carrying region server, which will trigger - * the expire of this region server on the master. - * On the other hand, we will slow down the abort process on - * the region server so that it is still up during the master SSH. + * This test will test the expire handling of a meta-carrying region server. After + * HBaseMiniCluster is up, we will delete the ephemeral node of the meta-carrying region server, + * which will trigger the expire of this region server on the master. On the other hand, we will + * slow down the abort process on the region server so that it is still up during the master SSH. * We will check that the master SSH is still successfully done. */ @Test @@ -85,27 +83,25 @@ public void testExpireMetaRegionServer() throws Exception { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); RegionStates regionStates = master.getAssignmentManager().getRegionStates(); - ServerName metaServerName = - regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); + RegionInfo firstMetaRegion = regionStates.getRegionsOfTable(TableName.META_TABLE_NAME).get(0); + ServerName metaServerName = regionStates.getRegionServerOfRegion(firstMetaRegion); if (master.getServerName().equals(metaServerName) || metaServerName == null || !metaServerName.equals(cluster.getServerHoldingMeta())) { // Move meta off master metaServerName = cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName(); - master.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), + master.move(firstMetaRegion.getEncodedNameAsBytes(), Bytes.toBytes(metaServerName.getServerName())); TEST_UTIL.waitUntilNoRegionsInTransition(60000); - metaServerName = - regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); + metaServerName = regionStates.getRegionServerOfRegion(firstMetaRegion); } assertNotEquals("Meta is on master!", metaServerName, master.getServerName()); HRegionServer metaRegionServer = cluster.getRegionServer(metaServerName); // Delete the ephemeral node of the meta-carrying region server. // This is trigger the expire of this region server on the master. - String rsEphemeralNodePath = - ZNodePaths.joinZNode(master.getZooKeeper().getZNodePaths().rsZNode, - metaServerName.toString()); + String rsEphemeralNodePath = ZNodePaths.joinZNode(master.getZooKeeper().getZNodePaths().rsZNode, + metaServerName.toString()); ZKUtil.deleteNode(master.getZooKeeper(), rsEphemeralNodePath); LOG.info("Deleted the znode for the RegionServer hosting hbase:meta; waiting on SSH"); // Wait for SSH to finish @@ -115,18 +111,16 @@ public void testExpireMetaRegionServer() throws Exception { TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return !serverManager.isServerOnline(priorMetaServerName) - && !serverManager.areDeadServersInProgress(); + return !serverManager.isServerOnline(priorMetaServerName) && + !serverManager.areDeadServersInProgress(); } }); LOG.info("Past wait on RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); // Now, make sure meta is assigned - assertTrue("Meta should be assigned", - regionStates.isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO)); + assertTrue("Meta should be assigned", regionStates.isRegionOnline(firstMetaRegion)); // Now, make sure meta is registered in zk - ServerName newMetaServerName = - regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); + ServerName newMetaServerName = regionStates.getRegionServerOfRegion(firstMetaRegion); assertNotEquals("Meta should be assigned on a different server", newMetaServerName, metaServerName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java index cd13905855c5..7d66dcf46fa3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNotNull; import java.io.IOException; +import java.util.HashSet; import java.util.Set; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; @@ -115,16 +117,16 @@ public static ServerName getServerHoldingRegion(final HBaseTestingUtil util, public static boolean isServerHoldingMeta(final HBaseTestingUtil util, final ServerName serverName) throws Exception { - for (RegionInfo hri: getMetaRegions(util)) { - if (serverName.equals(getServerHoldingRegion(util, hri))) { - return true; - } + HRegionServer server = util.getMiniHBaseCluster().getRegionServer(serverName); + if (server == null) { + return false; } - return false; + return !server.getRegions(TableName.META_TABLE_NAME).isEmpty(); } public static Set getMetaRegions(final HBaseTestingUtil util) { - return getMaster(util).getAssignmentManager().getMetaRegionSet(); + return new HashSet<>(getMaster(util).getAssignmentManager().getRegionStates() + .getTableRegionsInfo(TableName.META_TABLE_NAME)); } private static HMaster getMaster(final HBaseTestingUtil util) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java index be3bb24d2df0..259cf85de214 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.util.StringUtils; @@ -238,10 +237,7 @@ public void testAssignMetaAndCrashBeforeResponse() throws Exception { am = master.getAssignmentManager(); // Assign meta - rsDispatcher.setMockRsExecutor(new HangThenRSRestartExecutor()); - am.assign(RegionInfoBuilder.FIRST_META_REGIONINFO); - assertEquals(true, am.isMetaAssigned()); - + setUpMeta(new HangThenRSRestartExecutor()); // set it back as default, see setUpMeta() am.wakeMetaLoadedEvent(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java index 02e8600ae4e4..300fdab8ad25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; -import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.util.Bytes; @@ -141,8 +140,6 @@ protected int getAssignMaxAttempts() { protected void setupConfiguration(Configuration conf) throws Exception { CommonFSUtils.setRootDir(conf, util.getDataTestDir()); - conf.setBoolean(WALProcedureStore.USE_HSYNC_CONF_KEY, false); - conf.setInt(WALProcedureStore.SYNC_WAIT_MSEC_CONF_KEY, 10); conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, PROC_NTHREADS); conf.setInt(RSProcedureDispatcher.RS_RPC_STARTUP_WAIT_TIME_CONF_KEY, 1000); conf.setInt(AssignmentManager.ASSIGN_MAX_ATTEMPTS, getAssignMaxAttempts()); @@ -168,12 +165,12 @@ public void setUp() throws Exception { reopenProcMetrics = am.getAssignmentManagerMetrics().getReopenProcMetrics(); openProcMetrics = am.getAssignmentManagerMetrics().getOpenProcMetrics(); closeProcMetrics = am.getAssignmentManagerMetrics().getCloseProcMetrics(); - setUpMeta(); + setUpMeta(new GoodRsExecutor()); } - protected void setUpMeta() throws Exception { - rsDispatcher.setMockRsExecutor(new GoodRsExecutor()); - am.assign(RegionInfoBuilder.FIRST_META_REGIONINFO); + protected final void setUpMeta(MockRSExecutor mockRsExec) throws Exception { + rsDispatcher.setMockRsExecutor(mockRsExec); + am.assign(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build()); am.wakeMetaLoadedEvent(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java index 9ec5110df910..6c282280d271 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java @@ -41,8 +41,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @Category({MasterTests.class, LargeTests.class}) public class TestAssignmentOnRSCrash { @@ -51,8 +49,6 @@ public class TestAssignmentOnRSCrash { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAssignmentOnRSCrash.class); - private static final Logger LOG = LoggerFactory.getLogger(TestAssignmentOnRSCrash.class); - private static final TableName TEST_TABLE = TableName.valueOf("testb"); private static final String FAMILY_STR = "f"; private static final byte[] FAMILY = Bytes.toBytes(FAMILY_STR); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java index 6c5a81177833..c06959e125b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java @@ -64,8 +64,9 @@ public void setUp() throws Exception { @Test public void testForMeta() { - byte[] metaRegionNameAsBytes = RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(); - String metaRegionName = RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString(); + RegionInfo meta = am.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).get(0); + byte[] metaRegionNameAsBytes = meta.getRegionName(); + String metaRegionName = meta.getRegionNameAsString(); List serverNames = master.getServerManager().getOnlineServersList(); assertEquals(NSERVERS, serverNames.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java index 614385ec04d6..d94b9b934b4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java @@ -43,28 +43,33 @@ public class TestMetaFixerNoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class); - private static byte[] A = Bytes.toBytes("a"); - private static byte[] B = Bytes.toBytes("b"); - private static byte[] C = Bytes.toBytes("c"); - private static byte[] D = Bytes.toBytes("d"); - private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO; - private static RegionInfo _ARI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build(); - private static RegionInfo _BRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build(); - private static RegionInfo ABRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build(); - private static RegionInfo ACRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build(); - private static RegionInfo CDRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build(); - private static RegionInfo ADRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build(); - private static RegionInfo D_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build(); - private static RegionInfo C_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build(); + HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class); + private static byte [] A = Bytes.toBytes("a"); + private static byte [] B = Bytes.toBytes("b"); + private static byte [] C = Bytes.toBytes("c"); + private static byte [] D = Bytes.toBytes("d"); + private static RegionInfo ALL = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + private static RegionInfo _ARI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setEndKey(A).build(); + private static RegionInfo _BRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setEndKey(B).build(); + private static RegionInfo ABRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(A).setEndKey(B).build(); + private static RegionInfo ACRI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(A).setEndKey(C).build(); + private static RegionInfo CDRI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(C).setEndKey(D).build(); + private static RegionInfo ADRI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(A).setEndKey(D).build(); + private static RegionInfo D_RI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(D).build(); + private static RegionInfo C_RI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(C).build(); @Test public void testGetRegionInfoWithLargestEndKey() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java index f8d59dec3903..98acb3cde923 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -64,8 +64,8 @@ public void testCleanupMetaWAL() throws Exception { TEST_UTIL.createTable(TableName.valueOf("test"), "cf"); HRegionServer serverWithMeta = TEST_UTIL.getMiniHBaseCluster() .getRegionServer(TEST_UTIL.getMiniHBaseCluster().getServerWithMeta()); - TEST_UTIL.getAdmin() - .move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); + RegionInfo metaInfo = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0); + TEST_UTIL.getAdmin().move(metaInfo.getEncodedNameAsBytes()); LOG.info("KILL"); TEST_UTIL.getMiniHBaseCluster().killRegionServer(serverWithMeta.getServerName()); LOG.info("WAIT"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 74bf075f83d8..13ac5c17ffe6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -954,9 +954,9 @@ public void testShouldFlushMeta() throws Exception { WALFactory wFactory = new WALFactory(conf, "1234"); TableDescriptors tds = new FSTableDescriptors(conf); FSTableDescriptors.tryUpdateMetaTableDescriptor(conf); - HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, - conf, tds.get(TableName.META_TABLE_NAME), - wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); + RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + HRegion meta = HRegion.createHRegion(metaRegionInfo, testDir, conf, + tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(metaRegionInfo)); // parameterized tests add [#] suffix get rid of [ and ]. TableDescriptor desc = TableDescriptorBuilder .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_"))) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java index e832c47aac81..49d9d66f1eaf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -62,11 +63,11 @@ public void testCustomParts() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, DummyCompactor.class.getName()); conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, - DummyCompactionPolicy.class.getName()); - conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, - DummyStoreFlusher.class.getName()); + DummyCompactionPolicy.class.getName()); + conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, DummyStoreFlusher.class.getName()); HStore mockStore = Mockito.mock(HStore.class); - Mockito.when(mockStore.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); + Mockito.when(mockStore.getRegionInfo()) + .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()); StoreEngine se = StoreEngine.create(mockStore, conf, CellComparatorImpl.COMPARATOR); Assert.assertTrue(se instanceof DefaultStoreEngine); Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index a435b9d9b239..350876ee6424 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -94,8 +94,8 @@ public void testUsingMetaAndBinary() throws IOException { FSTableDescriptors.tryUpdateMetaTableDescriptor(UTIL.getConfiguration()); TableDescriptor td = tds.get(TableName.META_TABLE_NAME); td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build(); - HRegion mr = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO, - rootdir, conf, td); + HRegion mr = HBaseTestingUtil.createRegionAndWAL( + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), rootdir, conf, td); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java index 6d202bd3facc..17a5b4b5c397 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java @@ -58,6 +58,9 @@ public class TestPriorityRpc { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPriorityRpc.class); + private static final RegionInfo FIRST_META_REGIONINFO = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static HRegionServer RS = null; @@ -87,8 +90,7 @@ public void testQosFunctionForMeta() throws IOException { GetRequest.Builder getRequestBuilder = GetRequest.newBuilder(); RegionSpecifier.Builder regionSpecifierBuilder = RegionSpecifier.newBuilder(); regionSpecifierBuilder.setType(RegionSpecifierType.REGION_NAME); - ByteString name = UnsafeByteOperations.unsafeWrap( - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); + ByteString name = UnsafeByteOperations.unsafeWrap(FIRST_META_REGIONINFO.getRegionName()); regionSpecifierBuilder.setValue(name); RegionSpecifier regionSpecifier = regionSpecifierBuilder.build(); getRequestBuilder.setRegion(regionSpecifier); @@ -104,8 +106,7 @@ public void testQosFunctionForMeta() throws IOException { RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class); Mockito.when(mockRpc.getRegion(Mockito.any())).thenReturn(mockRegion); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); - Mockito.when(mockRegionInfo.getTable()) - .thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()); + Mockito.when(mockRegionInfo.getTable()).thenReturn(FIRST_META_REGIONINFO.getTable()); // Presume type. ((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS); assertEquals( @@ -159,8 +160,7 @@ public void testQosFunctionForScanMethod() throws IOException { Mockito.when(mockRegionScanner.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRpc.getRegion((RegionSpecifier)Mockito.any())).thenReturn(mockRegion); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); - Mockito.when(mockRegionInfo.getTable()) - .thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()); + Mockito.when(mockRegionInfo.getTable()).thenReturn(FIRST_META_REGIONINFO.getTable()); // Presume type. ((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java index 9a2456d207d8..db4752760b24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java @@ -22,6 +22,7 @@ import java.net.UnknownHostException; import java.util.Optional; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -63,7 +64,8 @@ public void testRegionScannerHolderToString() throws UnknownHostException { String userNameTest = RSRpcServices.getUserName(); assertEquals("test", userNameTest); HRegion region = Mockito.mock(HRegion.class); - Mockito.when(region.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); + Mockito.when(region.getRegionInfo()) + .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()); RSRpcServices.RegionScannerHolder rsh = new RSRpcServices.RegionScannerHolder(null, region, null, null, false, false, clientIpAndPort, userNameTest); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java index c0e72cbf94e9..f37352fe047d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java @@ -68,7 +68,7 @@ public static void tearDown() { @Test public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedException { - RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR); FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java index 56a8ea061e0a..6242eae4b1d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java @@ -57,70 +57,58 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRegionInfo { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInfo.class); + HBaseClassTestRule.forClass(TestRegionInfo.class); @Rule public TestName name = new TestName(); @Test public void testIsStart() { - assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); - org.apache.hadoop.hbase.client.RegionInfo ri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(Bytes.toBytes("not_start")).build(); + assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isFirst()); + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(Bytes.toBytes("not_start")).build(); assertFalse(ri.isFirst()); } @Test public void testIsEnd() { - assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); - org.apache.hadoop.hbase.client.RegionInfo ri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(Bytes.toBytes("not_end")).build(); + assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isLast()); + RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setEndKey(Bytes.toBytes("not_end")).build(); assertFalse(ri.isLast()); } @Test public void testIsNext() { - byte [] bytes = Bytes.toBytes("row"); - org.apache.hadoop.hbase.client.RegionInfo ri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(bytes).build(); - org.apache.hadoop.hbase.client.RegionInfo ri2 = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(bytes).build(); - assertFalse(ri.isNext(RegionInfoBuilder.FIRST_META_REGIONINFO)); + byte[] bytes = Bytes.toBytes("row"); + RegionInfo ri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(bytes).build(); + RegionInfo ri2 = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(bytes).build(); + assertFalse(ri.isNext(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build())); assertTrue(ri.isNext(ri2)); } @Test public void testIsOverlap() { - byte [] a = Bytes.toBytes("a"); - byte [] b = Bytes.toBytes("b"); - byte [] c = Bytes.toBytes("c"); - byte [] d = Bytes.toBytes("d"); - org.apache.hadoop.hbase.client.RegionInfo all = - RegionInfoBuilder.FIRST_META_REGIONINFO; - org.apache.hadoop.hbase.client.RegionInfo ari = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(a).build(); - org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(a).setEndKey(b).build(); - org.apache.hadoop.hbase.client.RegionInfo adri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(a).setEndKey(d).build(); - org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(c).setEndKey(d).build(); - org.apache.hadoop.hbase.client.RegionInfo dri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(d).build(); + byte[] a = Bytes.toBytes("a"); + byte[] b = Bytes.toBytes("b"); + byte[] c = Bytes.toBytes("c"); + byte[] d = Bytes.toBytes("d"); + RegionInfo all = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + RegionInfo ari = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); + RegionInfo abri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(a).setEndKey(b).build(); + RegionInfo adri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(a).setEndKey(d).build(); + RegionInfo cdri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(c).setEndKey(d).build(); + RegionInfo dri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(d).build(); assertTrue(all.isOverlap(all)); assertTrue(all.isOverlap(abri)); assertFalse(abri.isOverlap(cdri)); @@ -146,21 +134,14 @@ public void testIsOverlaps() { byte[] d = Bytes.toBytes("d"); byte[] e = Bytes.toBytes("e"); byte[] f = Bytes.toBytes("f"); - org.apache.hadoop.hbase.client.RegionInfo ari = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(a).build(); - org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(a).setEndKey(b).build(); - org.apache.hadoop.hbase.client.RegionInfo eri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(e).build(); - org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(c).setEndKey(d).build(); - org.apache.hadoop.hbase.client.RegionInfo efri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(e).setEndKey(f).build(); + RegionInfo ari = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); + RegionInfo abri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(a).setEndKey(b).build(); + RegionInfo eri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(e).build(); + RegionInfo cdri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(c).setEndKey(d).build(); + RegionInfo efri = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(e).setEndKey(f).build(); assertFalse(ari.isOverlap(abri)); assertTrue(abri.isOverlap(eri)); assertFalse(cdri.isOverlap(efri)); @@ -169,8 +150,9 @@ public void testIsOverlaps() { @Test public void testPb() throws DeserializationException { - RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; - byte [] bytes = RegionInfo.toByteArray(hri); + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf("test")) + .setStartKey(Bytes.toBytes("start")).build(); + byte[] bytes = RegionInfo.toByteArray(hri); RegionInfo pbhri = RegionInfo.parseFrom(bytes); assertTrue(hri.equals(pbhri)); } @@ -178,28 +160,26 @@ public void testPb() throws DeserializationException { @Test public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException { HBaseTestingUtil htu = new HBaseTestingUtil(); - RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); Path basedir = htu.getDataTestDir(); - // Create a region. That'll write the .regioninfo file. + // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtil.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), - null, htu.getConfiguration()); + r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, + htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); // Now load the file. - org.apache.hadoop.hbase.client.RegionInfo deserializedHri = - HRegionFileSystem.loadRegionInfoFileContent( - r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); - assertEquals(0, - org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri)); + RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent( + r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); + assertEquals(0, RegionInfo.COMPARATOR.compare(hri, deserializedHri)); HBaseTestingUtil.closeRegionAndWAL(r); } @@ -219,19 +199,16 @@ public void testCreateHRegionInfoName() throws Exception { String id = "id"; // old format region name - byte [] name = RegionInfo.createRegionName(tn, sk, id, false); + byte[] name = RegionInfo.createRegionName(tn, sk, id, false); String nameStr = Bytes.toString(name); assertEquals(tableName + "," + startKey + "," + id, nameStr); - // new format region name. String md5HashInHex = MD5Hash.getMD5AsHex(name); assertEquals(RegionInfo.MD5_HEX_LENGTH, md5HashInHex.length()); name = RegionInfo.createRegionName(tn, sk, id, true); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," - + id + "." + md5HashInHex + ".", - nameStr); + assertEquals(tableName + "," + startKey + "," + id + "." + md5HashInHex + ".", nameStr); } @Test @@ -309,7 +286,8 @@ public void testLastRegionCompare() { @Test public void testMetaTables() { - assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isMetaRegion()); + assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isMetaRegion()); + assertFalse(RegionInfoBuilder.newBuilder(TableName.valueOf("test")).build().isMetaRegion()); } @SuppressWarnings("SelfComparison") @@ -327,7 +305,7 @@ public void testComparator() { RegionInfo b = RegionInfoBuilder.newBuilder(TableName.valueOf("b")).build(); assertNotEquals(0, a.compareTo(b)); TableName t = TableName.valueOf("t"); - byte [] midway = Bytes.toBytes("midway"); + byte[] midway = Bytes.toBytes("midway"); a = RegionInfoBuilder.newBuilder(t).setEndKey(midway).build(); b = RegionInfoBuilder.newBuilder(t).setStartKey(midway).build(); assertTrue(a.compareTo(b) < 0); @@ -363,21 +341,22 @@ public void testRegionNameForRegionReplicas() throws Exception { // assert with only the region name without encoding // primary, replicaId = 0 - byte [] name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false); + byte[] name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false); String nameStr = Bytes.toString(name); assertEquals(tableName + "," + startKey + "," + id, nameStr); // replicaId = 1 name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 1, false); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," + id + "_" + - String.format(RegionInfo.REPLICA_ID_FORMAT, 1), nameStr); + assertEquals( + tableName + "," + startKey + "," + id + "_" + String.format(RegionInfo.REPLICA_ID_FORMAT, 1), + nameStr); // replicaId = max name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0xFFFF, false); nameStr = Bytes.toString(name); assertEquals(tableName + "," + startKey + "," + id + "_" + - String.format(RegionInfo.REPLICA_ID_FORMAT, 0xFFFF), nameStr); + String.format(RegionInfo.REPLICA_ID_FORMAT, 0xFFFF), nameStr); } @Test @@ -391,21 +370,20 @@ public void testParseName() throws IOException { byte[] regionName = RegionInfo.createRegionName(tableName, startKey, regionId, false); byte[][] fields = RegionInfo.parseRegionName(regionName); - assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]); - assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]); - assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]); + assertArrayEquals(Bytes.toString(fields[0]), tableName.getName(), fields[0]); + assertArrayEquals(Bytes.toString(fields[1]), startKey, fields[1]); + assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)), fields[2]); assertEquals(3, fields.length); // test with replicaId - regionName = RegionInfo.createRegionName(tableName, startKey, regionId, - replicaId, false); + regionName = RegionInfo.createRegionName(tableName, startKey, regionId, replicaId, false); fields = RegionInfo.parseRegionName(regionName); - assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]); - assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]); - assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]); - assertArrayEquals(Bytes.toString(fields[3]), Bytes.toBytes( - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]); + assertArrayEquals(Bytes.toString(fields[0]), tableName.getName(), fields[0]); + assertArrayEquals(Bytes.toString(fields[1]), startKey, fields[1]); + assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)), fields[2]); + assertArrayEquals(Bytes.toString(fields[3]), + Bytes.toBytes(String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]); } @Test @@ -441,10 +419,11 @@ public void testConvert() { assertEquals(expectedHri, convertedHri); } + @Test public void testRegionDetailsForDisplay() throws IOException { - byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03}; - byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04}; + byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 }; + byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 }; Configuration conf = new Configuration(); conf.setBoolean("hbase.display.keys", false); RegionInfo h = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) @@ -507,4 +486,3 @@ private void checkEquality(RegionInfo h, Configuration conf) throws IOException } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java index 48729faae3ef..6d767aa61bca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java @@ -51,7 +51,7 @@ public void testParallelStaticInitialization() throws Exception { // RegionInfoBuilder. final Supplier retrieveUNDEFINED = () -> RegionInfo.UNDEFINED; final Supplier retrieveMetaRegionInfo = - () -> RegionInfoBuilder.FIRST_META_REGIONINFO; + () -> RegionInfoBuilder.UNDEFINED; // The test runs multiple threads that reference these mutually dependent symbols. In order to // express this bug, these threads need to access these symbols at roughly the same time, so diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index f5330f6faa42..dbab4e75e211 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; @@ -119,7 +120,8 @@ private static HStoreFile createFile() throws Exception { private static TestStoreEngine createEngine(Configuration conf) throws Exception { HStore store = mock(HStore.class); - when(store.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); + when(store.getRegionInfo()) + .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()); CellComparatorImpl kvComparator = mock(CellComparatorImpl.class); return (TestStoreEngine) StoreEngine.create(store, conf, kvComparator); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 31b95ee14d6e..c3503848df68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.regionserver.HStore; @@ -206,7 +207,8 @@ private HStore createMockStore() { HStore s = mock(HStore.class); when(s.getStoreFileTtl()).thenReturn(Long.MAX_VALUE); when(s.getBlockingFileCount()).thenReturn(7L); - when(s.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); + when(s.getRegionInfo()) + .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()); return s; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 5eb94ac1d448..6636770579ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; @@ -100,6 +101,8 @@ public class TestStripeCompactionPolicy { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestStripeCompactionPolicy.class); + private static final RegionInfo FIRST_META_REGIONINFO = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); private static final byte[] KEY_A = Bytes.toBytes("aaa"); private static final byte[] KEY_B = Bytes.toBytes("bbb"); private static final byte[] KEY_C = Bytes.toBytes("ccc"); @@ -169,7 +172,7 @@ public void testSingleStripeCompaction() throws Exception { conf.setInt(StripeStoreConfig.MAX_FILES_KEY, 4); conf.setLong(StripeStoreConfig.SIZE_TO_SPLIT_KEY, 1000); // make sure the are no splits StoreConfigInformation sci = mock(StoreConfigInformation.class); - when(sci.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); + when(sci.getRegionInfo()).thenReturn(FIRST_META_REGIONINFO); StripeStoreConfig ssc = new StripeStoreConfig(conf, sci); StripeCompactionPolicy policy = new StripeCompactionPolicy(conf, sci, ssc) { @Override @@ -515,7 +518,7 @@ private static StripeCompactionPolicy createPolicy(Configuration conf, conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialCount); StoreConfigInformation sci = mock(StoreConfigInformation.class); when(sci.getStoreFileTtl()).thenReturn(hasTtl ? defaultTtl : Long.MAX_VALUE); - when(sci.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); + when(sci.getRegionInfo()).thenReturn(FIRST_META_REGIONINFO); StripeStoreConfig ssc = new StripeStoreConfig(conf, sci); return new StripeCompactionPolicy(conf, sci, ssc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index c2206399872d..0131944cb4be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -159,9 +158,8 @@ public void run() { this.log.info(getName() +" started"); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); try { - TableDescriptors tds = new FSTableDescriptors(TEST_UTIL.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration()); - TableDescriptor htd = tds.get(TableName.META_TABLE_NAME); + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); for (int i = 0; i < this.count; i++) { long now = EnvironmentEdgeManager.currentTime(); // Roll every ten edits @@ -171,7 +169,6 @@ public void run() { WALEdit edit = new WALEdit(); byte[] bytes = Bytes.toBytes(i); edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); - RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam: this.metaTableDescriptor.getColumnFamilyNames()) { scopes.put(fam, 0); @@ -199,8 +196,4 @@ public void run() { } } } - - //@org.junit.Rule - //public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = - // new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index a0d5cc961453..26ed0032670f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -67,9 +67,9 @@ public void testSystemTableWALEntryFilter() { SystemTableWALEntryFilter filter = new SystemTableWALEntryFilter(); // meta - WALKeyImpl key1 = - new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime()); + WALKeyImpl key1 = new WALKeyImpl( + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().getEncodedNameAsBytes(), + TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime()); Entry metaEntry = new Entry(key1, null); assertNull(filter.filter(metaEntry)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index 16d4456fc18f..26d69b5e4f42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.util.List; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; @@ -232,8 +232,8 @@ public void testVersion() throws DeserializationException, IOException { Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); assertTrue(CommonFSUtils.isExists(fs, versionFile)); assertTrue(CommonFSUtils.delete(fs, versionFile, true)); - Path metaRegionDir = - FSUtils.getRegionDirFromRootDir(rootdir, RegionInfoBuilder.FIRST_META_REGIONINFO); + Path metaRegionDir = FSUtils.getRegionDirFromRootDir(rootdir, + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()); FsPermission defaultPerms = CommonFSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); CommonFSUtils.create(fs, metaRegionDir, defaultPerms, false); @@ -297,7 +297,7 @@ public void testPermMask() throws Exception { assertEquals(new FsPermission("700"), filePerm); // then that the correct file is created - Path p = new Path("target" + File.separator + htu.getRandomUUID().toString()); + Path p = new Path("target" + File.separator + HBaseTestingUtil.getRandomUUID().toString()); try { FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null); out.close(); @@ -316,7 +316,7 @@ public void testDeleteAndExists() throws Exception { conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true); FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // then that the correct file is created - String file = htu.getRandomUUID().toString(); + String file = HBaseTestingUtil.getRandomUUID().toString(); Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file); Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file); try { @@ -357,7 +357,7 @@ public void testRenameAndSetModifyTime() throws Exception { FileSystem fs = FileSystem.get(conf); Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile"); - String file = htu.getRandomUUID().toString(); + String file = HBaseTestingUtil.getRandomUUID().toString(); Path p = new Path(testDir, file); FSDataOutputStream out = fs.create(p); @@ -371,7 +371,7 @@ public void testRenameAndSetModifyTime() throws Exception { mockEnv.setValue(expect); EnvironmentEdgeManager.injectEdge(mockEnv); try { - String dstFile = htu.getRandomUUID().toString(); + String dstFile = HBaseTestingUtil.getRandomUUID().toString(); Path dst = new Path(testDir , dstFile); assertTrue(CommonFSUtils.renameAndSetModifyTime(fs, p, dst)); @@ -453,7 +453,7 @@ private void verifyFileInDirWithStoragePolicy(final String policy) throws Except conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); CommonFSUtils.setStoragePolicy(fs, testDir, storagePolicy); - String file =htu.getRandomUUID().toString(); + String file = HBaseTestingUtil.getRandomUUID().toString(); Path p = new Path(testDir, file); WriteDataToHDFS(fs, p, 4096); HFileSystem hfs = new HFileSystem(fs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index 8bfd4dd9530b..50a28ab8055f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.Method; @@ -91,10 +92,12 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; @@ -135,6 +138,8 @@ public class TestWALSplit { private static String ROBBER; private static String ZOMBIE; private static String [] GROUP = new String [] {"supergroup"}; + private static RegionInfo FIRST_META_REGIONINFO = + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); static enum Corruptions { INSERT_GARBAGE_ON_FIRST_LINE, @@ -371,7 +376,7 @@ private void loop(final Writer writer) { public void testRecoveredEditsPathForMeta() throws IOException { Path p = createRecoveredEditsPathForRegion(); String parentOfParent = p.getParent().getParent().getName(); - assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + assertEquals(parentOfParent, FIRST_META_REGIONINFO.getEncodedName()); } /** @@ -383,18 +388,18 @@ public void testOldRecoveredEditsFileSidelined() throws IOException { Path p = createRecoveredEditsPathForRegion(); Path tdir = CommonFSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); Path regiondir = new Path(tdir, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); Path parent = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir); assertEquals(HConstants.RECOVERED_EDITS_DIR, parent.getName()); fs.createNewFile(parent); // create a recovered.edits file String parentOfParent = p.getParent().getParent().getName(); - assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + assertEquals(parentOfParent, FIRST_META_REGIONINFO.getEncodedName()); WALFactory.createRecoveredEditsWriter(fs, p, conf).close(); } private Path createRecoveredEditsPathForRegion() throws IOException { - byte[] encoded = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); + byte[] encoded = FIRST_META_REGIONINFO.getEncodedNameAsBytes(); long now = EnvironmentEdgeManager.currentTime(); Entry entry = new Entry( new WALKeyImpl(encoded, TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), @@ -408,10 +413,10 @@ private Path createRecoveredEditsPathForRegion() throws IOException { @Test public void testHasRecoveredEdits() throws IOException { Path p = createRecoveredEditsPathForRegion(); - assertFalse(WALSplitUtil.hasRecoveredEdits(conf, RegionInfoBuilder.FIRST_META_REGIONINFO)); + assertFalse(WALSplitUtil.hasRecoveredEdits(conf, FIRST_META_REGIONINFO)); String renamedEdit = p.getName().split("-")[0]; fs.createNewFile(new Path(p.getParent(), renamedEdit)); - assertTrue(WALSplitUtil.hasRecoveredEdits(conf, RegionInfoBuilder.FIRST_META_REGIONINFO)); + assertTrue(WALSplitUtil.hasRecoveredEdits(conf, FIRST_META_REGIONINFO)); } private void useDifferentDFSClient() throws IOException { diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java index a98bd6e00f58..22c7d77db7d9 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -21,7 +21,6 @@ import java.io.IOException; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; @@ -357,10 +356,7 @@ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOExce /** * Get the ServerName of region server serving the first hbase:meta region */ - public ServerName getServerHoldingMeta() throws IOException { - return getServerHoldingRegion(TableName.META_TABLE_NAME, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); - } + public abstract ServerName getServerHoldingMeta() throws IOException; /** * Get the ServerName of region server serving the specified region diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index f8dce2563867..7a9b397a824c 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -26,7 +26,8 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; @@ -832,7 +833,20 @@ public List getRegions(TableName tableName) { * of HRS carrying regionName. Returns -1 if none found. */ public int getServerWithMeta() { - return getServerWith(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); + int index = 0; + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { + HRegionServer hrs = rst.getRegionServer(); + if (!hrs.isStopped()) { + for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) { + RegionInfo ri = region.getRegionInfo(); + if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) { + return index; + } + } + } + index++; + } + return -1; } /** @@ -856,9 +870,25 @@ public int getServerWith(byte[] regionName) { return -1; } + @Override + public ServerName getServerHoldingMeta() throws IOException { + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { + HRegionServer hrs = rst.getRegionServer(); + if (!hrs.isStopped()) { + for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) { + RegionInfo ri = region.getRegionInfo(); + if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) { + return hrs.getServerName(); + } + } + } + } + return null; + } + @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException { + throws IOException { // Assume there is only one master thread which is the active master. // If there are multiple master threads, the backup master threads // should hold some regions. Please refer to #countServedRegions