diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java
index 017586df582a..bc76a9df37e6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java
@@ -20,11 +20,11 @@
import java.util.Comparator;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
-import org.apache.hbase.thirdparty.com.google.common.primitives.Longs;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.common.primitives.Longs;
/**
* A comparator for case where {@link ByteBufferKeyValue} is prevalent type (BBKV
@@ -71,7 +71,6 @@ public BBKVComparator(Comparator fallback) {
@Override
public int compare(Object l, Object r) {
- // LOG.info("ltype={} rtype={}", l, r);
if ((l instanceof ByteBufferKeyValue) && (r instanceof ByteBufferKeyValue)) {
return compare((ByteBufferKeyValue)l, (ByteBufferKeyValue)r, false);
}
@@ -81,7 +80,7 @@ public int compare(Object l, Object r) {
// TODO: Come back here. We get a few percentage points extra of throughput if this is a
// private method.
- static final int compare(ByteBufferKeyValue left, ByteBufferKeyValue right,
+ static int compare(ByteBufferKeyValue left, ByteBufferKeyValue right,
boolean ignoreSequenceid) {
// NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not
// sharing gets us a few percent more throughput in compares. If changes here or there, make
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index 3529d54f7dfc..83a868d9caf1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -54,7 +54,7 @@ static CellComparator getInstance() {
/**
* Compare cells.
* @param ignoreSequenceid True if we are to compare the key portion only and ignore
- * the sequenceid. Set to false to compare key and consider sequenceid.
+ * the sequenceid. Set to false to compare key and consider sequenceid.
* @return 0 if equal, -1 if a < b, and +1 if a > b.
*/
int compare(Cell leftCell, Cell rightCell, boolean ignoreSequenceid);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index 707d919c2804..c647318716f1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase;
import java.util.Comparator;
@@ -23,13 +22,12 @@
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hbase.thirdparty.com.google.common.primitives.Longs;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
+import org.apache.hbase.thirdparty.com.google.common.primitives.Longs;
/**
* Compare two HBase cells. Do not use this method comparing -ROOT-
or
@@ -52,11 +50,13 @@
@InterfaceStability.Evolving
public class CellComparatorImpl implements CellComparator {
static final Logger LOG = LoggerFactory.getLogger(CellComparatorImpl.class);
+
/**
* Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion
* of KeyValue only.
*/
public static final CellComparatorImpl COMPARATOR = new CellComparatorImpl();
+
/**
* A {@link CellComparatorImpl} for hbase:meta
catalog table
* {@link KeyValue}s.
@@ -342,7 +342,7 @@ private static int compareRows(byte[] left, int loffset, int llength, byte[] rig
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
- } else if (leftDelimiter < 0 && rightDelimiter < 0) {
+ } else if (leftDelimiter < 0) {
return 0;
}
}
@@ -365,7 +365,7 @@ private static int compareRows(byte[] left, int loffset, int llength, byte[] rig
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
- } else if (leftDelimiter < 0 && rightDelimiter < 0) {
+ } else if (leftDelimiter < 0) {
return 0;
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
index ea4ba12f6661..d76ef9fd4443 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
@@ -31,7 +31,7 @@
* We cannot have Guava classes in our API hence this Type.
*/
@InterfaceAudience.Public
-public class Address implements Comparable
{
+public final class Address implements Comparable {
private HostAndPort hostAndPort;
private Address(HostAndPort hostAndPort) {
@@ -62,7 +62,7 @@ public String toString() {
/**
* If hostname is a.b.c and the port is 123, return a:123 instead of a.b.c:123.
* @return if host looks like it is resolved -- not an IP -- then strip the domain portion
- * otherwise returns same as {@link #toString()}}
+ * otherwise returns same as {@link #toString()}}
*/
public String toStringWithoutDomain() {
String hostname = getHostname();
@@ -100,7 +100,10 @@ public int hashCode() {
@Override
public int compareTo(Address that) {
int compare = this.getHostname().compareTo(that.getHostname());
- if (compare != 0) return compare;
+ if (compare != 0) {
+ return compare;
+ }
+
return this.getPort() - that.getPort();
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
index 14ef945d75a1..b967db7f27dc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
@@ -40,7 +40,7 @@ public class SpanReceiverHost {
private Configuration conf;
private boolean closed = false;
- private static enum SingletonHolder {
+ private enum SingletonHolder {
INSTANCE;
final transient Object lock = new Object();
transient SpanReceiverHost host = null;
@@ -78,7 +78,6 @@ public static Configuration getConfiguration(){
/**
* Reads the names of classes specified in the {@code hbase.trace.spanreceiver.classes} property
* and instantiates and registers them with the Tracer.
- *
*/
public void loadSpanReceivers() {
String[] receiverNames = conf.getStrings(SPAN_RECEIVERS_CONF_KEY);
@@ -93,7 +92,7 @@ public void loadSpanReceivers() {
SpanReceiver receiver = builder.className(className).build();
if (receiver != null) {
receivers.add(receiver);
- LOG.info("SpanReceiver " + className + " was loaded successfully.");
+ LOG.info("SpanReceiver {} was loaded successfully.", className);
}
}
for (SpanReceiver rcvr : receivers) {
@@ -105,7 +104,10 @@ public void loadSpanReceivers() {
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
*/
public synchronized void closeReceivers() {
- if (closed) return;
+ if (closed) {
+ return;
+ }
+
closed = true;
for (SpanReceiver rcvr : receivers) {
try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java
index 89386f4f6b7a..10665d898265 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java
@@ -38,7 +38,7 @@ private TraceUtil() {
}
public static void initTracer(Configuration c) {
- if(c != null) {
+ if (c != null) {
conf = new HBaseHTraceConfiguration(c);
}
@@ -62,7 +62,9 @@ public static TraceScope createTrace(String description) {
* @return TraceScope or null when not tracing
*/
public static TraceScope createTrace(String description, Span span) {
- if(span == null) return createTrace(description);
+ if (span == null) {
+ return createTrace(description);
+ }
return (tracer == null) ? null : tracer.newScope(description, span.getSpanId());
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
index 90f3bf38e760..fb0b33669f3a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase.util;
import java.io.IOException;
@@ -31,12 +30,16 @@
* Utility methods for working with {@link ByteRange}.
*/
@InterfaceAudience.Public
-public class ByteRangeUtils {
+public final class ByteRangeUtils {
+ private ByteRangeUtils() {
+ }
public static int numEqualPrefixBytes(ByteRange left, ByteRange right, int rightInnerOffset) {
int maxCompares = Math.min(left.getLength(), right.getLength() - rightInnerOffset);
- final byte[] lbytes = left.getBytes(), rbytes = right.getBytes();
- final int loffset = left.getOffset(), roffset = right.getOffset();
+ final byte[] lbytes = left.getBytes();
+ final byte[] rbytes = right.getBytes();
+ final int loffset = left.getOffset();
+ final int roffset = right.getOffset();
for (int i = 0; i < maxCompares; ++i) {
if (lbytes[loffset + i] != rbytes[roffset + rightInnerOffset + i]) {
return i;
@@ -76,5 +79,4 @@ public static void write(OutputStream os, ByteRange byteRange, int byteRangeInne
os.write(byteRange.getBytes(), byteRange.getOffset() + byteRangeInnerOffset,
byteRange.getLength() - byteRangeInnerOffset);
}
-
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index 6a0c1cd55c93..89ad8973fe22 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -64,7 +64,8 @@ public abstract class CommonFSUtils {
public static final String HBASE_WAL_DIR = "hbase.wal.dir";
/** Parameter to disable stream capability enforcement checks */
- public static final String UNSAFE_STREAM_CAPABILITY_ENFORCE = "hbase.unsafe.stream.capability.enforce";
+ public static final String UNSAFE_STREAM_CAPABILITY_ENFORCE =
+ "hbase.unsafe.stream.capability.enforce";
/** Full access permissions (starting point for a umask) */
public static final String FULL_RWX_PERMISSIONS = "777";
@@ -140,8 +141,7 @@ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTai
* @return True if deleted dir
* @throws IOException e
*/
- public static boolean deleteDirectory(final FileSystem fs, final Path dir)
- throws IOException {
+ public static boolean deleteDirectory(final FileSystem fs, final Path dir) throws IOException {
return fs.exists(dir) && fs.delete(dir, true);
}
@@ -160,7 +160,7 @@ public static long getDefaultBlockSize(final FileSystem fs, final Path path) thr
Method m = null;
Class extends FileSystem> cls = fs.getClass();
try {
- m = cls.getMethod("getDefaultBlockSize", new Class>[] { Path.class });
+ m = cls.getMethod("getDefaultBlockSize", Path.class);
} catch (NoSuchMethodException e) {
LOG.info("FileSystem doesn't support getDefaultBlockSize");
} catch (SecurityException e) {
@@ -195,7 +195,7 @@ public static short getDefaultReplication(final FileSystem fs, final Path path)
Method m = null;
Class extends FileSystem> cls = fs.getClass();
try {
- m = cls.getMethod("getDefaultReplication", new Class>[] { Path.class });
+ m = cls.getMethod("getDefaultReplication", Path.class);
} catch (NoSuchMethodException e) {
LOG.info("FileSystem doesn't support getDefaultReplication");
} catch (SecurityException e) {
@@ -248,7 +248,7 @@ public static int getDefaultBufferSize(final FileSystem fs) {
public static FSDataOutputStream create(FileSystem fs, Path path,
FsPermission perm, boolean overwrite) throws IOException {
if (LOG.isTraceEnabled()) {
- LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
+ LOG.trace("Creating file={} with permission={}, overwrite={}", path, perm, overwrite);
}
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
@@ -361,11 +361,11 @@ public static Path getRootDir(final Configuration c) throws IOException {
return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
- public static void setRootDir(final Configuration c, final Path root) throws IOException {
+ public static void setRootDir(final Configuration c, final Path root) {
c.set(HConstants.HBASE_DIR, root.toString());
}
- public static void setFsDefault(final Configuration c, final Path root) throws IOException {
+ public static void setFsDefault(final Configuration c, final Path root) {
c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+
}
@@ -390,7 +390,7 @@ public static Path getWALRootDir(final Configuration c) throws IOException {
}
@VisibleForTesting
- public static void setWALRootDir(final Configuration c, final Path root) throws IOException {
+ public static void setWALRootDir(final Configuration c, final Path root) {
c.set(HBASE_WAL_DIR, root.toString());
}
@@ -499,8 +499,7 @@ public static Path getNamespaceDir(Path rootdir, final String namespace) {
// this mapping means that under a federated FileSystem implementation, we'll
// only log the first failure from any of the underlying FileSystems at WARN and all others
// will be at DEBUG.
- private static final Map warningMap =
- new ConcurrentHashMap();
+ private static final Map warningMap = new ConcurrentHashMap<>();
/**
* Sets storage policy for given path.
@@ -572,8 +571,7 @@ private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
Method m = null;
Exception toThrow = null;
try {
- m = fs.getClass().getDeclaredMethod("setStoragePolicy",
- new Class>[] { Path.class, String.class });
+ m = fs.getClass().getDeclaredMethod("setStoragePolicy", Path.class, String.class);
m.setAccessible(true);
} catch (NoSuchMethodException e) {
toThrow = e;
@@ -605,7 +603,7 @@ private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
try {
m.invoke(fs, path, storagePolicy);
if (LOG.isDebugEnabled()) {
- LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
+ LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path);
}
} catch (Exception e) {
toThrow = e;
@@ -677,8 +675,7 @@ public static boolean isRecoveredEdits(Path path) {
* @return Returns the filesystem of the hbase rootdir.
* @throws IOException from underlying FileSystem
*/
- public static FileSystem getCurrentFileSystem(Configuration conf)
- throws IOException {
+ public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
return getRootDir(conf).getFileSystem(conf);
}
@@ -696,7 +693,7 @@ public static FileSystem getCurrentFileSystem(Configuration conf)
* @param filter path filter
* @return null if dir is empty or doesn't exist, otherwise FileStatus array
*/
- public static FileStatus [] listStatus(final FileSystem fs,
+ public static FileStatus[] listStatus(final FileSystem fs,
final Path dir, final PathFilter filter) throws IOException {
FileStatus [] status = null;
try {
@@ -704,7 +701,7 @@ public static FileSystem getCurrentFileSystem(Configuration conf)
} catch (FileNotFoundException fnfe) {
// if directory doesn't exist, return null
if (LOG.isTraceEnabled()) {
- LOG.trace(dir + " doesn't exist");
+ LOG.trace("{} doesn't exist", dir);
}
}
if (status == null || status.length < 1) {
@@ -747,7 +744,7 @@ public static List listLocatedStatus(final FileSystem fs,
} catch (FileNotFoundException fnfe) {
// if directory doesn't exist, return null
if (LOG.isTraceEnabled()) {
- LOG.trace(dir + " doesn't exist");
+ LOG.trace("{} doesn't exist", dir);
}
}
return status;
@@ -783,13 +780,13 @@ public static boolean isExists(final FileSystem fs, final Path path) throws IOEx
* Log the current state of the filesystem from a certain root directory
* @param fs filesystem to investigate
* @param root root file/directory to start logging from
- * @param LOG log to output information
+ * @param log log to output information
* @throws IOException if an unexpected exception occurs
*/
- public static void logFileSystemState(final FileSystem fs, final Path root, Logger LOG)
+ public static void logFileSystemState(final FileSystem fs, final Path root, Logger log)
throws IOException {
- LOG.debug("File system contents for path " + root);
- logFSTree(LOG, fs, root, "|-");
+ log.debug("File system contents for path {}", root);
+ logFSTree(log, fs, root, "|-");
}
/**
@@ -797,7 +794,7 @@ public static void logFileSystemState(final FileSystem fs, final Path root, Logg
*
* @see #logFileSystemState(FileSystem, Path, Logger)
*/
- private static void logFSTree(Logger LOG, final FileSystem fs, final Path root, String prefix)
+ private static void logFSTree(Logger log, final FileSystem fs, final Path root, String prefix)
throws IOException {
FileStatus[] files = listStatus(fs, root, null);
if (files == null) {
@@ -806,10 +803,10 @@ private static void logFSTree(Logger LOG, final FileSystem fs, final Path root,
for (FileStatus file : files) {
if (file.isDirectory()) {
- LOG.debug(prefix + file.getPath().getName() + "/");
- logFSTree(LOG, fs, file.getPath(), prefix + "---");
+ log.debug(prefix + file.getPath().getName() + "/");
+ logFSTree(log, fs, file.getPath(), prefix + "---");
} else {
- LOG.debug(prefix + file.getPath().getName());
+ log.debug(prefix + file.getPath().getName());
}
}
}
@@ -821,25 +818,6 @@ public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src
return fs.rename(src, dest);
}
- /**
- * Do our short circuit read setup.
- * Checks buffer size to use and whether to do checksumming in hbase or hdfs.
- * @param conf must not be null
- */
- public static void setupShortCircuitRead(final Configuration conf) {
- // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
- boolean shortCircuitSkipChecksum =
- conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
- boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
- if (shortCircuitSkipChecksum) {
- LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
- "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
- "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
- assert !shortCircuitSkipChecksum; //this will fail if assertions are on
- }
- checkShortCircuitReadBufferSize(conf);
- }
-
/**
* Check if short circuit read buffer size is set and if not, set it to hbase value.
* @param conf must not be null
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
index 3922a6db142c..50c0d63701d1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
@@ -79,7 +79,10 @@ public T next() {
if (!components.isEmpty()) {
this.nextWasCalled = true;
List src = components.get(currentComponent);
- if (++indexWithinComponent < src.size()) return src.get(indexWithinComponent);
+ if (++indexWithinComponent < src.size()) {
+ return src.get(indexWithinComponent);
+ }
+
if (++currentComponent < components.size()) {
indexWithinComponent = 0;
src = components.get(currentComponent);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
index b37142a2f30d..9d864ce6cfc2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
@@ -27,22 +27,31 @@
*/
@InterfaceAudience.Public
public enum Order {
-
ASCENDING {
@Override
- public int cmp(int cmp) { /* noop */ return cmp; }
+ public int cmp(int cmp) {
+ /* noop */ return cmp;
+ }
@Override
- public byte apply(byte val) { /* noop */ return val; }
+ public byte apply(byte val) {
+ /* noop */ return val;
+ }
@Override
- public void apply(byte[] val) { /* noop */ }
+ public void apply(byte[] val) {
+ /* noop */
+ }
@Override
- public void apply(byte[] val, int offset, int length) { /* noop */ }
+ public void apply(byte[] val, int offset, int length) {
+ /* noop */
+ }
@Override
- public String toString() { return "ASCENDING"; }
+ public String toString() {
+ return "ASCENDING";
+ }
},
DESCENDING {
@@ -53,23 +62,33 @@ public void apply(byte[] val, int offset, int length) { /* noop */ }
private static final byte MASK = (byte) 0xff;
@Override
- public int cmp(int cmp) { return -1 * cmp; }
+ public int cmp(int cmp) {
+ return -1 * cmp;
+ }
@Override
- public byte apply(byte val) { return (byte) (val ^ MASK); }
+ public byte apply(byte val) {
+ return (byte) (val ^ MASK);
+ }
@Override
public void apply(byte[] val) {
- for (int i = 0; i < val.length; i++) { val[i] ^= MASK; }
+ for (int i = 0; i < val.length; i++) {
+ val[i] ^= MASK;
+ }
}
@Override
public void apply(byte[] val, int offset, int length) {
- for (int i = 0; i < length; i++) { val[offset + i] ^= MASK; }
+ for (int i = 0; i < length; i++) {
+ val[offset + i] ^= MASK;
+ }
}
@Override
- public String toString() { return "DESCENDING"; }
+ public String toString() {
+ return "DESCENDING";
+ }
};
/**