diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
index 149dec431e0f..5febcc8daa19 100644
--- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
@@ -84,7 +84,7 @@
* An asynchronous HDFS output stream implementation which fans out data to datanode and only
* supports writing file with only one block.
*
- * Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The mainly
+ * Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The main
* usage of this class is implementing WAL, so we only expose a little HDFS configurations in the
* method. And we place it here under io package because we want to make it independent of WAL
* implementation thus easier to move it to HDFS project finally.
@@ -104,8 +104,8 @@
@InterfaceAudience.Private
public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
- // The MAX_PACKET_SIZE is 16MB but it include the header size and checksum size. So here we set a
- // smaller limit for data size.
+ // The MAX_PACKET_SIZE is 16MB, but it includes the header size and checksum size. So here we set
+ // a smaller limit for data size.
private static final int MAX_DATA_LEN = 12 * 1024 * 1024;
private final Configuration conf;
@@ -173,7 +173,7 @@ public Callback(CompletableFuture future, long ackedLength,
private long nextPacketOffsetInBlock = 0L;
// the length of the trailing partial chunk, this is because the packet start offset must be
- // aligned with the length of checksum chunk so we need to resend the same data.
+ // aligned with the length of checksum chunk, so we need to resend the same data.
private int trailingPartialChunkLength = 0;
private long nextPacketSeqno = 0L;
@@ -437,7 +437,7 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf,
checksumBuf.release();
headerBuf.release();
- // This method takes ownership of the dataBuf so we need release it before returning.
+ // This method takes ownership of the dataBuf, so we need release it before returning.
dataBuf.release();
return;
}
diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
index 0014185b85c0..ff457cb5074e 100644
--- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
@@ -72,14 +72,14 @@ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf,
* file's primary node. If all is well, it should return near immediately. But, as is common, it
* is the very primary node that has crashed and so the namenode will be stuck waiting on a socket
* timeout before it will ask another datanode to start the recovery. It does not help if we call
- * recoverLease in the meantime and in particular, subsequent to the socket timeout, a
- * recoverLease invocation will cause us to start over from square one (possibly waiting on socket
- * timeout against primary node). So, in the below, we do the following: 1. Call recoverLease. 2.
- * If it returns true, break. 3. If it returns false, wait a few seconds and then call it again.
- * 4. If it returns true, break. 5. If it returns false, wait for what we think the datanode
- * socket timeout is (configurable) and then try again. 6. If it returns true, break. 7. If it
- * returns false, repeat starting at step 5. above. If HDFS-4525 is available, call it every
- * second and we might be able to exit early.
+ * recoverLease in the meantime and in particular, after the socket timeout, a recoverLease
+ * invocation will cause us to start over from square one (possibly waiting on socket timeout
+ * against primary node). So, in the below, we do the following: 1. Call recoverLease. 2. If it
+ * returns true, break. 3. If it returns false, wait a few seconds and then call it again. 4. If
+ * it returns true, break. 5. If it returns false, wait for what we think the datanode socket
+ * timeout is (configurable) and then try again. 6. If it returns true, break. 7. If it returns
+ * false, repeat starting at step 5. above. If HDFS-4525 is available, call it every second, and
+ * we might be able to exit early.
*/
private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
final Configuration conf, final CancelableProgressable reporter) throws IOException {
@@ -89,10 +89,10 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina
// usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
// beyond that limit 'to be safe'.
long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
- // This setting should be a little bit above what the cluster dfs heartbeat is set to.
+ // This setting should be a little above what the cluster dfs heartbeat is set to.
long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
// This should be set to how long it'll take for us to timeout against primary datanode if it
- // is dead. We set it to 64 seconds, 4 second than the default READ_TIMEOUT in HDFS, the
+ // is dead. We set it to 64 seconds, 4 seconds than the default READ_TIMEOUT in HDFS, the
// default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still failing after this
// timeout, then further recovery will take liner backoff with this base, to avoid endless
// preemptions when this value is not properly configured.
@@ -118,7 +118,7 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina
Thread.sleep(firstPause);
} else {
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
- // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
+ // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though).
long localStartWaiting = EnvironmentEdgeManager.currentTime();
while (
(EnvironmentEdgeManager.currentTime() - localStartWaiting)
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index b6abdd5c7f6e..a4560cc595a2 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -50,13 +50,13 @@
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
/**
- * The base class for load balancers. It provides the the functions used to by
- * {@code AssignmentManager} to assign regions in the edge cases. It doesn't provide an
- * implementation of the actual balancing algorithm.
+ * The base class for load balancers. It provides the functions used to by {@code AssignmentManager}
+ * to assign regions in the edge cases. It doesn't provide an implementation of the actual balancing
+ * algorithm.
*
* Since 3.0.0, all the balancers will be wrapped inside a {@code RSGroupBasedLoadBalancer}, it will
* be in charge of the synchronization of balancing and configuration changing, so we do not need to
- * synchronized by ourselves.
+ * synchronize by ourselves.
*/
@InterfaceAudience.Private
public abstract class BaseLoadBalancer implements LoadBalancer {
@@ -297,7 +297,7 @@ public Map> retainAssignment(Map(regions.keySet()));
}
- // Group all of the old assignments by their hostname.
+ // Group all the old assignments by their hostname.
// We can't group directly by ServerName since the servers all have
// new start-codes.
@@ -484,7 +484,7 @@ private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo reg
}
/**
- * Round robin a list of regions to a list of servers
+ * Round-robin a list of regions to a list of servers
*/
private void roundRobinAssignment(BalancerClusterState cluster, List regions,
List servers, Map> assignments) {