Skip to content

Commit

Permalink
HBASE-22807 HBCK Report showed wrong orphans regions on FileSystem (a…
Browse files Browse the repository at this point in the history
…pache#461)

Signed-off-by: Sakthi <[email protected]>
  • Loading branch information
infraio committed Aug 8, 2019
1 parent 08b1f15 commit 6ec251c
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;

import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ScheduledChore;
Expand All @@ -40,8 +40,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.hbase.thirdparty.com.google.common.collect.Lists;

/**
* Used to do the hbck checking job at master side.
*/
Expand Down Expand Up @@ -69,7 +67,7 @@ public class HbckChore extends ScheduledChore {
/**
* The regions have directory on FileSystem, but no region info in meta.
*/
private final List<String> orphanRegionsOnFS = new LinkedList<>();
private final Set<String> orphanRegionsOnFS = new HashSet<>();
/**
* The inconsistent regions. There are three case:
* case 1. Master thought this region opened, but no regionserver reported it.
Expand All @@ -83,7 +81,7 @@ public class HbckChore extends ScheduledChore {
* The "snapshot" is used to save the last round's HBCK checking report.
*/
private final Map<String, ServerName> orphanRegionsOnRSSnapshot = new HashMap<>();
private final List<String> orphanRegionsOnFSSnapshot = new LinkedList<>();
private final Set<String> orphanRegionsOnFSSnapshot = new HashSet<>();
private final Map<String, Pair<ServerName, List<ServerName>>> inconsistentRegionsSnapshot =
new HashMap<>();

Expand Down Expand Up @@ -153,9 +151,11 @@ private void loadRegionsFromInMemoryState() {
regionState.getStamp());
regionInfoMap.put(regionInfo.getEncodedName(), new HbckRegionInfo(metaEntry));
}
LOG.info("Loaded {} regions from in-memory state of AssignmentManager", regionStates.size());
}

private void loadRegionsFromRSReport() {
int numRegions = 0;
Map<ServerName, Set<byte[]>> rsReports = master.getAssignmentManager().getRSReports();
for (Map.Entry<ServerName, Set<byte[]>> entry : rsReports.entrySet()) {
ServerName serverName = entry.getKey();
Expand All @@ -168,7 +168,10 @@ private void loadRegionsFromRSReport() {
}
hri.addServer(hri.getMetaEntry(), serverName);
}
numRegions += entry.getValue().size();
}
LOG.info("Loaded {} regions from {} regionservers' reports and found {} orphan regions",
numRegions, rsReports.size(), orphanRegionsOnFS.size());

for (Map.Entry<String, HbckRegionInfo> entry : regionInfoMap.entrySet()) {
String encodedRegionName = entry.getKey();
Expand All @@ -191,27 +194,24 @@ private void loadRegionsFromFS() throws IOException {
Path rootDir = master.getMasterFileSystem().getRootDir();
FileSystem fs = master.getMasterFileSystem().getFileSystem();

// list all tables from HDFS
List<FileStatus> tableDirs = Lists.newArrayList();
List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
for (Path path : paths) {
tableDirs.add(fs.getFileStatus(path));
}

for (FileStatus tableDir : tableDirs) {
FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
for (FileStatus regionDir : regionDirs) {
String encodedRegionName = regionDir.getPath().getName();
int numRegions = 0;
List<Path> tableDirs = FSUtils.getTableDirs(fs, rootDir);
for (Path tableDir : tableDirs) {
List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
for (Path regionDir : regionDirs) {
String encodedRegionName = regionDir.getName();
HbckRegionInfo hri = regionInfoMap.get(encodedRegionName);
if (hri == null) {
orphanRegionsOnFS.add(encodedRegionName);
continue;
}
HbckRegionInfo.HdfsEntry hdfsEntry =
new HbckRegionInfo.HdfsEntry(regionDir.getPath(), regionDir.getModificationTime());
HbckRegionInfo.HdfsEntry hdfsEntry = new HbckRegionInfo.HdfsEntry(regionDir);
hri.setHdfsEntry(hdfsEntry);
}
numRegions += regionDirs.size();
}
LOG.info("Loaded {} tables {} regions from filesyetem and found {} orphan regions",
tableDirs.size(), numRegions, orphanRegionsOnFS.size());
}

/**
Expand All @@ -237,7 +237,7 @@ public Map<String, ServerName> getOrphanRegionsOnRS() {
/**
* @return the regions have directory on FileSystem, but no region info in meta.
*/
public List<String> getOrphanRegionsOnFS() {
public Set<String> getOrphanRegionsOnFS() {
// Need synchronized here, as this "snapshot" may be changed after checking.
rwLock.readLock().lock();
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -330,9 +330,8 @@ public static class HdfsEntry {
HdfsEntry() {
}

public HdfsEntry(Path regionDir, long regionDirModTime) {
public HdfsEntry(Path regionDir) {
this.regionDir = regionDir;
this.regionDirModTime = regionDirModTime;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import="java.util.Date"
import="java.util.List"
import="java.util.Map"
import="java.util.Set"
import="java.util.stream.Collectors"
import="java.time.ZonedDateTime"
import="java.time.format.DateTimeFormatter"
Expand All @@ -41,7 +42,7 @@
HbckChore hbckChore = master.getHbckChore();
Map<String, Pair<ServerName, List<ServerName>>> inconsistentRegions = null;
Map<String, ServerName> orphanRegionsOnRS = null;
List<String> orphanRegionsOnFS = null;
Set<String> orphanRegionsOnFS = null;
long startTimestamp = 0;
long endTimestamp = 0;
if (hbckChore != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,18 @@
import java.util.Map;
import java.util.concurrent.Future;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.master.HbckChore;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.Before;
import org.junit.ClassRule;
Expand Down Expand Up @@ -141,4 +145,23 @@ public void testForUserTable() throws Exception {
inconsistentRegions = hbckChore.getInconsistentRegions();
assertFalse(inconsistentRegions.containsKey(regionName));
}

@Test
public void testOrphanRegionsOnFS() throws Exception {
TableName tableName = TableName.valueOf("testOrphanRegionsOnFS");
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
Configuration conf = util.getConfiguration();

hbckChore.choreForTesting();
assertEquals(0, hbckChore.getOrphanRegionsOnFS().size());

HRegion.createRegionDir(conf, regionInfo, FSUtils.getRootDir(conf));
hbckChore.choreForTesting();
assertEquals(1, hbckChore.getOrphanRegionsOnFS().size());
assertTrue(hbckChore.getOrphanRegionsOnFS().contains(regionInfo.getEncodedName()));

FSUtils.deleteRegionDir(conf, new HRegionInfo(regionInfo));
hbckChore.choreForTesting();
assertEquals(0, hbckChore.getOrphanRegionsOnFS().size());
}
}

0 comments on commit 6ec251c

Please sign in to comment.