From a3153bfde530f804046378001af78dab5a0f116a Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 10 Jun 2022 15:47:23 +0200 Subject: [PATCH] HBASE-27095 HbckChore should produce a report In #4470 for HBASE-26192, it was noted that the HbckChore is kind of a pain to use and test because it maintains a bunch of local state. By contract, the CatalogJanitorChore makes a nice self-contained report. Let's update HbckChore to do the same. Signed-off-by: Andrew Purtell --- .../apache/hadoop/hbase/master/HMaster.java | 1 + .../hbase/master/MasterRpcServices.java | 1 + .../hbase/master/{ => hbck}/HbckChore.java | 213 +++++------------- .../hadoop/hbase/master/hbck/HbckReport.java | 110 +++++++++ .../hbase/master/janitor/CatalogJanitor.java | 10 +- ...{Report.java => CatalogJanitorReport.java} | 2 +- .../hbase/master/janitor/MetaFixer.java | 6 +- .../master/janitor/ReportMakingVisitor.java | 4 +- .../resources/hbase-webapps/master/hbck.jsp | 125 +++++----- .../apache/hadoop/hbase/client/TestHbck.java | 20 +- .../master/TestMasterChoreScheduled.java | 1 + .../master/assignment/TestHbckChore.java | 48 ++-- .../master/janitor/TestCatalogJanitor.java | 2 +- .../janitor/TestCatalogJanitorCluster.java | 8 +- .../hbase/master/janitor/TestMetaFixer.java | 32 +-- 15 files changed, 304 insertions(+), 279 deletions(-) rename hbase-server/src/main/java/org/apache/hadoop/hbase/master/{ => hbck}/HbckChore.java (56%) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckReport.java rename hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/{Report.java => CatalogJanitorReport.java} (99%) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 195a504fb84f..b5e2c1c878d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner; import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore; +import org.apache.hadoop.hbase.master.hbck.HbckChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 6beb5361da76..401ed0d6b803 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.master.hbck.HbckChore; import org.apache.hadoop.hbase.master.janitor.MetaFixer; import org.apache.hadoop.hbase.master.locking.LockProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java similarity index 56% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java index 41e0d3addffe..0a3d310c98c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java @@ -15,16 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master; +package org.apache.hadoop.hbase.master.hbck; import java.io.IOException; +import java.time.Instant; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HbckRegionInfo; @@ -55,51 +57,14 @@ public class HbckChore extends ScheduledChore { private final MasterServices master; /** - * This map contains the state of all hbck items. It maps from encoded region name to - * HbckRegionInfo structure. The information contained in HbckRegionInfo is used to detect and - * correct consistency (hdfs/meta/deployment) problems. + * Saved report from last time this chore ran. Check its date. */ - private final Map regionInfoMap = new HashMap<>(); - - private final Set disabledTableRegions = new HashSet<>(); - private final Set splitParentRegions = new HashSet<>(); - - /** - * The regions only opened on RegionServers, but no region info in meta. - */ - private final Map orphanRegionsOnRS = new HashMap<>(); - /** - * The regions have directory on FileSystem, but no region info in meta. - */ - private final Map orphanRegionsOnFS = new HashMap<>(); - /** - * The inconsistent regions. There are three case: case 1. Master thought this region opened, but - * no regionserver reported it. case 2. Master thought this region opened on Server1, but - * regionserver reported Server2 case 3. More than one regionservers reported opened this region - */ - private final Map>> inconsistentRegions = - new HashMap<>(); - - /** - * The "snapshot" is used to save the last round's HBCK checking report. - */ - private final Map orphanRegionsOnRSSnapshot = new HashMap<>(); - private final Map orphanRegionsOnFSSnapshot = new HashMap<>(); - private final Map>> inconsistentRegionsSnapshot = - new HashMap<>(); - - /** - * The "snapshot" may be changed after checking. And this checking report "snapshot" may be - * accessed by web ui. Use this rwLock to synchronize. - */ - ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(); + private volatile HbckReport lastReport = null; /** * When running, the "snapshot" may be changed when this round's checking finish. */ private volatile boolean running = false; - private volatile long checkingStartTimestamp = 0; - private volatile long checkingEndTimestamp = 0; private boolean disabled = false; @@ -115,39 +80,47 @@ public HbckChore(MasterServices master) { } } + /** + * @return Returns last published Report that comes of last successful execution of this chore. + */ + public HbckReport getLastReport() { + return lastReport; + } + @Override protected synchronized void chore() { if (isDisabled() || isRunning()) { LOG.warn("hbckChore is either disabled or is already running. Can't run the chore"); return; } - regionInfoMap.clear(); - disabledTableRegions.clear(); - splitParentRegions.clear(); - orphanRegionsOnRS.clear(); - orphanRegionsOnFS.clear(); - inconsistentRegions.clear(); - checkingStartTimestamp = EnvironmentEdgeManager.currentTime(); running = true; + final HbckReport report = new HbckReport(); + report.setCheckingStartTimestamp(Instant.ofEpochMilli(EnvironmentEdgeManager.currentTime())); try { - loadRegionsFromInMemoryState(); - loadRegionsFromRSReport(); + loadRegionsFromInMemoryState(report); + loadRegionsFromRSReport(report); try { - loadRegionsFromFS(scanForMergedParentRegions()); + loadRegionsFromFS(scanForMergedParentRegions(), report); } catch (IOException e) { LOG.warn("Failed to load the regions from filesystem", e); } - saveCheckResultToSnapshot(); } catch (Throwable t) { LOG.warn("Unexpected", t); } + report.setCheckingEndTimestamp(Instant.ofEpochMilli(EnvironmentEdgeManager.currentTime())); + this.lastReport = report; running = false; - updateAssignmentManagerMetrics(); + updateAssignmentManagerMetrics(report); } - // This function does the sanity checks of making sure the chore is not run when it is - // disabled or when it's already running. It returns whether the chore was actually run or not. - protected boolean runChore() { + /** + * Request execution of this chore's action. + * @return {@code true} if the chore was executed, {@code false} if the chore is disabled or + * already running. + */ + public boolean runChore() { + // This function does the sanity checks of making sure the chore is not run when it is + // disabled or when it's already running. It returns whether the chore was actually run or not. if (isDisabled() || isRunning()) { if (isDisabled()) { LOG.warn("hbck chore is disabled! Set " + HBCK_CHORE_INTERVAL + " > 0 to enable it."); @@ -168,25 +141,6 @@ public boolean isDisabled() { return this.disabled; } - private void saveCheckResultToSnapshot() { - // Need synchronized here, as this "snapshot" may be access by web ui. - rwLock.writeLock().lock(); - try { - orphanRegionsOnRSSnapshot.clear(); - orphanRegionsOnRS.entrySet() - .forEach(e -> orphanRegionsOnRSSnapshot.put(e.getKey(), e.getValue())); - orphanRegionsOnFSSnapshot.clear(); - orphanRegionsOnFS.entrySet() - .forEach(e -> orphanRegionsOnFSSnapshot.put(e.getKey(), e.getValue())); - inconsistentRegionsSnapshot.clear(); - inconsistentRegions.entrySet() - .forEach(e -> inconsistentRegionsSnapshot.put(e.getKey(), e.getValue())); - checkingEndTimestamp = EnvironmentEdgeManager.currentTime(); - } finally { - rwLock.writeLock().unlock(); - } - } - /** * Scan hbase:meta to get set of merged parent regions, this is a very heavy scan. * @return Return generated {@link HashSet} @@ -209,7 +163,7 @@ private HashSet scanForMergedParentRegions() throws IOException { return mergedParentRegions; } - private void loadRegionsFromInMemoryState() { + private void loadRegionsFromInMemoryState(final HbckReport report) { List regionStates = master.getAssignmentManager().getRegionStates().getRegionStates(); for (RegionState regionState : regionStates) { @@ -217,18 +171,19 @@ private void loadRegionsFromInMemoryState() { if ( master.getTableStateManager().isTableState(regionInfo.getTable(), TableState.State.DISABLED) ) { - disabledTableRegions.add(regionInfo.getRegionNameAsString()); + report.getDisabledTableRegions().add(regionInfo.getRegionNameAsString()); } // Check both state and regioninfo for split status, see HBASE-26383 if (regionState.isSplit() || regionInfo.isSplit()) { - splitParentRegions.add(regionInfo.getRegionNameAsString()); + report.getSplitParentRegions().add(regionInfo.getRegionNameAsString()); } HbckRegionInfo.MetaEntry metaEntry = new HbckRegionInfo.MetaEntry(regionInfo, regionState.getServerName(), regionState.getStamp()); - regionInfoMap.put(regionInfo.getEncodedName(), new HbckRegionInfo(metaEntry)); + report.getRegionInfoMap().put(regionInfo.getEncodedName(), new HbckRegionInfo(metaEntry)); } LOG.info("Loaded {} regions ({} disabled, {} split parents) from in-memory state", - regionStates.size(), disabledTableRegions.size(), splitParentRegions.size()); + regionStates.size(), report.getDisabledTableRegions().size(), + report.getSplitParentRegions().size()); if (LOG.isDebugEnabled()) { Map stateCountMap = new HashMap<>(); for (RegionState regionState : regionStates) { @@ -246,22 +201,23 @@ private void loadRegionsFromInMemoryState() { } if (LOG.isTraceEnabled()) { for (RegionState regionState : regionStates) { - LOG.trace("{}: {}, serverName=", regionState.getRegion(), regionState.getState(), + LOG.trace("{}: {}, serverName={}", regionState.getRegion(), regionState.getState(), regionState.getServerName()); } } } - private void loadRegionsFromRSReport() { + private void loadRegionsFromRSReport(final HbckReport report) { int numRegions = 0; Map> rsReports = master.getAssignmentManager().getRSReports(); for (Map.Entry> entry : rsReports.entrySet()) { ServerName serverName = entry.getKey(); for (byte[] regionName : entry.getValue()) { String encodedRegionName = RegionInfo.encodeRegionName(regionName); - HbckRegionInfo hri = regionInfoMap.get(encodedRegionName); + HbckRegionInfo hri = report.getRegionInfoMap().get(encodedRegionName); if (hri == null) { - orphanRegionsOnRS.put(RegionInfo.getRegionNameAsString(regionName), serverName); + report.getOrphanRegionsOnRS().put(RegionInfo.getRegionNameAsString(regionName), + serverName); continue; } hri.addServer(hri.getMetaEntry(), serverName); @@ -269,9 +225,9 @@ private void loadRegionsFromRSReport() { numRegions += entry.getValue().size(); } LOG.info("Loaded {} regions from {} regionservers' reports and found {} orphan regions", - numRegions, rsReports.size(), orphanRegionsOnRS.size()); + numRegions, rsReports.size(), report.getOrphanRegionsOnRS().size()); - for (Map.Entry entry : regionInfoMap.entrySet()) { + for (Map.Entry entry : report.getRegionInfoMap().entrySet()) { HbckRegionInfo hri = entry.getValue(); ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); if (locationInMeta == null) { @@ -279,29 +235,30 @@ private void loadRegionsFromRSReport() { } if (hri.getDeployedOn().size() == 0) { // skip the offline region which belong to disabled table. - if (disabledTableRegions.contains(hri.getRegionNameAsString())) { + if (report.getDisabledTableRegions().contains(hri.getRegionNameAsString())) { continue; } // skip the split parent regions - if (splitParentRegions.contains(hri.getRegionNameAsString())) { + if (report.getSplitParentRegions().contains(hri.getRegionNameAsString())) { continue; } // Master thought this region opened, but no regionserver reported it. - inconsistentRegions.put(hri.getRegionNameAsString(), + report.getInconsistentRegions().put(hri.getRegionNameAsString(), new Pair<>(locationInMeta, new LinkedList<>())); } else if (hri.getDeployedOn().size() > 1) { // More than one regionserver reported opened this region - inconsistentRegions.put(hri.getRegionNameAsString(), + report.getInconsistentRegions().put(hri.getRegionNameAsString(), new Pair<>(locationInMeta, hri.getDeployedOn())); } else if (!hri.getDeployedOn().get(0).equals(locationInMeta)) { // Master thought this region opened on Server1, but regionserver reported Server2 - inconsistentRegions.put(hri.getRegionNameAsString(), + report.getInconsistentRegions().put(hri.getRegionNameAsString(), new Pair<>(locationInMeta, hri.getDeployedOn())); } } } - private void loadRegionsFromFS(final HashSet mergedParentRegions) throws IOException { + private void loadRegionsFromFS(final HashSet mergedParentRegions, final HbckReport report) + throws IOException { Path rootDir = master.getMasterFileSystem().getRootDir(); FileSystem fs = master.getMasterFileSystem().getFileSystem(); @@ -315,27 +272,27 @@ private void loadRegionsFromFS(final HashSet mergedParentRegions) throws LOG.warn("Failed get of encoded name from {}", regionDir); continue; } - HbckRegionInfo hri = regionInfoMap.get(encodedRegionName); + HbckRegionInfo hri = report.getRegionInfoMap().get(encodedRegionName); // If it is not in in-memory database and not a merged region, // report it as an orphan region. if (hri == null && !mergedParentRegions.contains(encodedRegionName)) { - orphanRegionsOnFS.put(encodedRegionName, regionDir); + report.getOrphanRegionsOnFS().put(encodedRegionName, regionDir); continue; } } numRegions += regionDirs.size(); } LOG.info("Loaded {} tables {} regions from filesystem and found {} orphan regions", - tableDirs.size(), numRegions, orphanRegionsOnFS.size()); + tableDirs.size(), numRegions, report.getOrphanRegionsOnFS().size()); } - private void updateAssignmentManagerMetrics() { + private void updateAssignmentManagerMetrics(final HbckReport report) { master.getAssignmentManager().getAssignmentManagerMetrics() - .updateOrphanRegionsOnRs(getOrphanRegionsOnRS().size()); + .updateOrphanRegionsOnRs(report.getOrphanRegionsOnRS().size()); master.getAssignmentManager().getAssignmentManagerMetrics() - .updateOrphanRegionsOnFs(getOrphanRegionsOnFS().size()); + .updateOrphanRegionsOnFs(report.getOrphanRegionsOnFS().size()); master.getAssignmentManager().getAssignmentManagerMetrics() - .updateInconsistentRegions(getInconsistentRegions().size()); + .updateInconsistentRegions(report.getInconsistentRegions().size()); } /** @@ -344,62 +301,4 @@ private void updateAssignmentManagerMetrics() { public boolean isRunning() { return running; } - - /** - * @return the regions only opened on RegionServers, but no region info in meta. - */ - public Map getOrphanRegionsOnRS() { - // Need synchronized here, as this "snapshot" may be changed after checking. - rwLock.readLock().lock(); - try { - return this.orphanRegionsOnRSSnapshot; - } finally { - rwLock.readLock().unlock(); - } - } - - /** - * @return the regions have directory on FileSystem, but no region info in meta. - */ - public Map getOrphanRegionsOnFS() { - // Need synchronized here, as this "snapshot" may be changed after checking. - rwLock.readLock().lock(); - try { - return this.orphanRegionsOnFSSnapshot; - } finally { - rwLock.readLock().unlock(); - } - } - - /** - * Found the inconsistent regions. There are three case: case 1. Master thought this region - * opened, but no regionserver reported it. case 2. Master thought this region opened on Server1, - * but regionserver reported Server2 case 3. More than one regionservers reported opened this - * region - * @return the map of inconsistent regions. Key is the region name. Value is a pair of location in - * meta and the regionservers which reported opened this region. - */ - public Map>> getInconsistentRegions() { - // Need synchronized here, as this "snapshot" may be changed after checking. - rwLock.readLock().lock(); - try { - return this.inconsistentRegionsSnapshot; - } finally { - rwLock.readLock().unlock(); - } - } - - /** - * Used for web ui to show when the HBCK checking started. - */ - public long getCheckingStartTimestamp() { - return this.checkingStartTimestamp; - } - - /** - * Used for web ui to show when the HBCK checking report generated. - */ - public long getCheckingEndTimestamp() { - return this.checkingEndTimestamp; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckReport.java new file mode 100644 index 000000000000..6971edcd053e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckReport.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.hbck; + +import java.time.Instant; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.HbckRegionInfo; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * The result of an {@link HbckChore} execution. + */ +@InterfaceAudience.Private +public class HbckReport { + + private final Map regionInfoMap = new HashMap<>(); + private final Set disabledTableRegions = new HashSet<>(); + private final Set splitParentRegions = new HashSet<>(); + private final Map orphanRegionsOnRS = new HashMap<>(); + private final Map orphanRegionsOnFS = new HashMap<>(); + private final Map>> inconsistentRegions = + new HashMap<>(); + + private Instant checkingStartTimestamp = null; + private Instant checkingEndTimestamp = null; + + /** + * Used for web ui to show when the HBCK checking started. + */ + public Instant getCheckingStartTimestamp() { + return checkingStartTimestamp; + } + + public void setCheckingStartTimestamp(Instant checkingStartTimestamp) { + this.checkingStartTimestamp = checkingStartTimestamp; + } + + /** + * Used for web ui to show when the HBCK checking report generated. + */ + public Instant getCheckingEndTimestamp() { + return checkingEndTimestamp; + } + + public void setCheckingEndTimestamp(Instant checkingEndTimestamp) { + this.checkingEndTimestamp = checkingEndTimestamp; + } + + /** + * This map contains the state of all hbck items. It maps from encoded region name to + * HbckRegionInfo structure. The information contained in HbckRegionInfo is used to detect and + * correct consistency (hdfs/meta/deployment) problems. + */ + public Map getRegionInfoMap() { + return regionInfoMap; + } + + public Set getDisabledTableRegions() { + return disabledTableRegions; + } + + public Set getSplitParentRegions() { + return splitParentRegions; + } + + /** + * The regions only opened on RegionServers, but no region info in meta. + */ + public Map getOrphanRegionsOnRS() { + return orphanRegionsOnRS; + } + + /** + * The regions have directory on FileSystem, but no region info in meta. + */ + public Map getOrphanRegionsOnFS() { + return orphanRegionsOnFS; + } + + /** + * The inconsistent regions. There are three case: case 1. Master thought this region opened, but + * no regionserver reported it. case 2. Master thought this region opened on Server1, but + * regionserver reported Server2 case 3. More than one regionservers reported opened this region + */ + public Map>> getInconsistentRegions() { + return inconsistentRegions; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java index 064b2ef66425..d5d8f9e0a62b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java @@ -85,7 +85,7 @@ public class CatalogJanitor extends ScheduledChore { * Saved report from last hbase:meta scan to completion. May be stale if having trouble completing * scan. Check its date. */ - private volatile Report lastReport; + private volatile CatalogJanitorReport lastReport; public CatalogJanitor(final MasterServices services) { super("CatalogJanitor-" + services.getServerName().toShortString(), services, @@ -229,10 +229,10 @@ && cleanParent(e.getKey(), e.getValue()) /** * Scan hbase:meta. - * @return Return generated {@link Report} + * @return Return generated {@link CatalogJanitorReport} */ // will be override in tests. - protected Report scanForReport() throws IOException { + protected CatalogJanitorReport scanForReport() throws IOException { ReportMakingVisitor visitor = new ReportMakingVisitor(this.services); // Null tablename means scan all of meta. MetaTableAccessor.scanMetaForTableRegions(this.services.getConnection(), visitor, null); @@ -242,7 +242,7 @@ protected Report scanForReport() throws IOException { /** * @return Returns last published Report that comes of last successful scan of hbase:meta. */ - public Report getLastReport() { + public CatalogJanitorReport getLastReport() { return this.lastReport; } @@ -495,7 +495,7 @@ public static void main(String[] args) throws IOException { t.put(p); } MetaTableAccessor.scanMetaForTableRegions(connection, visitor, null); - Report report = visitor.getReport(); + CatalogJanitorReport report = visitor.getReport(); LOG.info(report != null ? report.toString() : "empty"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/Report.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitorReport.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/Report.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitorReport.java index b16429c229aa..6a9eb7e1b4ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/Report.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitorReport.java @@ -34,7 +34,7 @@ * Report made by ReportMakingVisitor */ @InterfaceAudience.Private -public class Report { +public class CatalogJanitorReport { private final long now = EnvironmentEdgeManager.currentTime(); // Keep Map of found split parents. These are candidates for cleanup. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index 1e4ae5873adc..77410c3d91ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -76,7 +76,7 @@ public MetaFixer(MasterServices masterServices) { } public void fix() throws IOException { - Report report = this.masterServices.getCatalogJanitor().getLastReport(); + CatalogJanitorReport report = this.masterServices.getCatalogJanitor().getLastReport(); if (report == null) { LOG.info("CatalogJanitor has not generated a report yet; run 'catalogjanitor_run' in " + "shell or wait until CatalogJanitor chore runs."); @@ -93,7 +93,7 @@ public void fix() throws IOException { * If hole, it papers it over by adding a region in the filesystem and to hbase:meta. Does not * assign. */ - void fixHoles(Report report) { + void fixHoles(CatalogJanitorReport report) { final List> holes = report.getHoles(); if (holes.isEmpty()) { LOG.info("CatalogJanitor Report contains no holes to fix. Skipping."); @@ -229,7 +229,7 @@ private static List createMetaEntries(final MasterServices masterSer /** * Fix overlaps noted in CJ consistency report. */ - List fixOverlaps(Report report) throws IOException { + List fixOverlaps(CatalogJanitorReport report) throws IOException { List pidList = new ArrayList<>(); for (Set regions : calculateMerges(maxMergeCount, report.getOverlaps())) { RegionInfo[] regionsArray = regions.toArray(new RegionInfo[] {}); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java index 3fb45f99c9e8..a61c90106854 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java @@ -53,7 +53,7 @@ class ReportMakingVisitor implements MetaTableAccessor.CloseableVisitor { /** * Report is not done until after the close has been called. */ - private Report report = new Report(); + private CatalogJanitorReport report = new CatalogJanitorReport(); /** * RegionInfo from previous row. @@ -76,7 +76,7 @@ class ReportMakingVisitor implements MetaTableAccessor.CloseableVisitor { /** * Do not call until after {@link #close()}. Will throw a {@link RuntimeException} if you do. */ - Report getReport() { + CatalogJanitorReport getReport() { if (!this.closed) { throw new RuntimeException("Report not ready until after close()"); } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index 69b95e1a118e..21128cea0471 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -20,7 +20,6 @@ <%@ page contentType="text/html;charset=UTF-8" import="java.time.Instant" import="java.time.ZoneId" - import="java.util.Date" import="java.util.List" import="java.util.Map" import="java.util.stream.Collectors" @@ -29,14 +28,17 @@ %> <%@ page import="org.apache.hadoop.fs.Path" %> <%@ page import="org.apache.hadoop.hbase.client.RegionInfo" %> -<%@ page import="org.apache.hadoop.hbase.master.HbckChore" %> +<%@ page import="org.apache.hadoop.hbase.master.hbck.HbckChore" %> +<%@ page import="org.apache.hadoop.hbase.master.hbck.HbckReport" %> <%@ page import="org.apache.hadoop.hbase.master.HMaster" %> <%@ page import="org.apache.hadoop.hbase.master.ServerManager" %> <%@ page import="org.apache.hadoop.hbase.ServerName" %> <%@ page import="org.apache.hadoop.hbase.util.Bytes" %> <%@ page import="org.apache.hadoop.hbase.util.Pair" %> <%@ page import="org.apache.hadoop.hbase.master.janitor.CatalogJanitor" %> -<%@ page import="org.apache.hadoop.hbase.master.janitor.Report" %> +<%@ page import="org.apache.hadoop.hbase.master.janitor.CatalogJanitorReport" %> +<%@ page import="java.util.Optional" %> +<%@ page import="org.apache.hadoop.hbase.util.EnvironmentEdgeManager" %> <% final String cacheParameterValue = request.getParameter("cache"); final HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); @@ -55,26 +57,19 @@ } } HbckChore hbckChore = master.getHbckChore(); - Map>> inconsistentRegions = null; - Map orphanRegionsOnRS = null; - Map orphanRegionsOnFS = null; - long startTimestamp = 0; - long endTimestamp = 0; - if (hbckChore != null) { - inconsistentRegions = hbckChore.getInconsistentRegions(); - orphanRegionsOnRS = hbckChore.getOrphanRegionsOnRS(); - orphanRegionsOnFS = hbckChore.getOrphanRegionsOnFS(); - startTimestamp = hbckChore.getCheckingStartTimestamp(); - endTimestamp = hbckChore.getCheckingEndTimestamp(); - } - ZonedDateTime zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimestamp), - ZoneId.systemDefault()); - String iso8601start = startTimestamp == 0? "-1": zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); - zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(endTimestamp), - ZoneId.systemDefault()); - String iso8601end = startTimestamp == 0? "-1": zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); + HbckReport hbckReport = hbckChore == null ? null : hbckChore.getLastReport(); + String hbckReportStartTime = Optional.ofNullable(hbckReport) + .map(HbckReport::getCheckingStartTimestamp) + .map(start -> ZonedDateTime.ofInstant(start, ZoneId.systemDefault())) + .map(zdt -> zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)) + .orElse(null); + String hbckReportEndTime = Optional.ofNullable(hbckReport) + .map(HbckReport::getCheckingEndTimestamp) + .map(start -> ZonedDateTime.ofInstant(start, ZoneId.systemDefault())) + .map(zdt -> zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)) + .orElse(null); CatalogJanitor cj = master.getCatalogJanitor(); - Report report = cj == null? null: cj.getLastReport(); + CatalogJanitorReport cjReport = cj == null? null: cj.getLastReport(); final ServerManager serverManager = master.getServerManager(); %> @@ -109,20 +104,22 @@ - <% if (inconsistentRegions != null && inconsistentRegions.size() > 0) { %> + <% if (hbckReport != null && hbckReport.getInconsistentRegions().size() > 0) { %>