midKey() throws IOException {
return Optional.ofNullable(dataBlockIndexReader.midkey(this));
}
@@ -1552,7 +1553,7 @@ public boolean next() throws IOException {
}
@Override
- public Cell getKey() {
+ public ExtendedCell getKey() {
assertValidSeek();
return seeker.getKey();
}
@@ -1564,7 +1565,7 @@ public ByteBuffer getValue() {
}
@Override
- public Cell getCell() {
+ public ExtendedCell getCell() {
if (this.curBlock == null) {
return null;
}
@@ -1589,13 +1590,13 @@ private void assertValidSeek() {
}
@Override
- protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
+ protected ExtendedCell getFirstKeyCellInBlock(HFileBlock curBlock) {
return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock));
}
@Override
- protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind,
- Cell key, boolean seekBefore) throws IOException {
+ protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, ExtendedCell nextIndexedKey,
+ boolean rewind, ExtendedCell key, boolean seekBefore) throws IOException {
if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) {
updateCurrentBlock(seekToBlock);
} else if (rewind) {
@@ -1606,7 +1607,7 @@ protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey,
}
@Override
- public int compareKey(CellComparator comparator, Cell key) {
+ public int compareKey(CellComparator comparator, ExtendedCell key) {
return seeker.compareKey(comparator, key);
}
}
@@ -1669,9 +1670,9 @@ public boolean prefetchStarted() {
/**
* Create a Scanner on this file. No seeks or reads are done on creation. Call
- * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up
- * in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this
- * overload of getScanner for compactions. See
+ * {@link HFileScanner#seekTo(ExtendedCell)} to position an start the read. There is nothing to
+ * clean up in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not
+ * use this overload of getScanner for compactions. See
* {@link #getScanner(Configuration, boolean, boolean, boolean)}
* @param conf Store configuration.
* @param cacheBlocks True if we should cache blocks read in by this scanner.
@@ -1686,8 +1687,8 @@ public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final bo
/**
* Create a Scanner on this file. No seeks or reads are done on creation. Call
- * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up
- * in a Scanner. Letting go of your references to the scanner is sufficient.
+ * {@link HFileScanner#seekTo(ExtendedCell)} to position an start the read. There is nothing to
+ * clean up in a Scanner. Letting go of your references to the scanner is sufficient.
* @param conf Store configuration.
* @param cacheBlocks True if we should cache blocks read in by this scanner.
* @param pread Use positional read rather than seek+read if true (pread is better for
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
index 0393d3b788a7..79ed7a22016f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
@@ -21,7 +21,7 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.function.IntConsumer;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.regionserver.Shipper;
import org.apache.yetus.audience.InterfaceAudience;
@@ -30,7 +30,7 @@
* reposition yourself as well.
*
* A scanner doesn't always have a key/value that it is pointing to when it is first created and
- * before {@link #seekTo()}/{@link #seekTo(Cell)} are called. In this case,
+ * before {@link #seekTo()}/{@link #seekTo(ExtendedCell)} are called. In this case,
* {@link #getKey()}/{@link #getValue()} returns null. At most other times, a key and value will be
* available. The general pattern is that you position the Scanner using the seekTo variants and
* then getKey and getValue.
@@ -48,7 +48,7 @@ public interface HFileScanner extends Shipper, Closeable {
* will position itself at the end of the file and next() will return false when it is
* called.
*/
- int seekTo(Cell cell) throws IOException;
+ int seekTo(ExtendedCell cell) throws IOException;
/**
* Reseek to or just before the passed cell
. Similar to seekTo except that this can
@@ -63,7 +63,7 @@ public interface HFileScanner extends Shipper, Closeable {
* @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in
* position i; and 1, such that c[i] < cell, and scanner is left in position i.
*/
- int reseekTo(Cell cell) throws IOException;
+ int reseekTo(ExtendedCell cell) throws IOException;
/**
* Consider the cell stream of all the cells in the file, c[0] .. c[n]
, where there
@@ -73,7 +73,7 @@ public interface HFileScanner extends Shipper, Closeable {
* cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but
* there may also NOT be a c[i+1], and next() will return false (EOF).
*/
- boolean seekBefore(Cell cell) throws IOException;
+ boolean seekBefore(ExtendedCell cell) throws IOException;
/**
* Positions this scanner at the start of the file.
@@ -89,26 +89,26 @@ public interface HFileScanner extends Shipper, Closeable {
boolean next() throws IOException;
/**
- * Gets the current key in the form of a cell. You must call {@link #seekTo(Cell)} before this
- * method.
+ * Gets the current key in the form of a cell. You must call {@link #seekTo(ExtendedCell)} before
+ * this method.
* @return gets the current key as a Cell.
*/
- Cell getKey();
+ ExtendedCell getKey();
/**
- * Gets a buffer view to the current value. You must call {@link #seekTo(Cell)} before this
- * method.
+ * Gets a buffer view to the current value. You must call {@link #seekTo(ExtendedCell)} before
+ * this method.
* @return byte buffer for the value. The limit is set to the value size, and the position is 0,
* the start of the buffer view.
*/
ByteBuffer getValue();
- /** Returns Instance of {@link org.apache.hadoop.hbase.Cell}. */
- Cell getCell();
+ /** Returns Instance of {@link ExtendedCell}. */
+ ExtendedCell getCell();
/**
* Convenience method to get a copy of the key as a string - interpreting the bytes as UTF8. You
- * must call {@link #seekTo(Cell)} before this method.
+ * must call {@link #seekTo(ExtendedCell)} before this method.
* @return key as a string
* @deprecated Since hbase-2.0.0
*/
@@ -117,7 +117,7 @@ public interface HFileScanner extends Shipper, Closeable {
/**
* Convenience method to get a copy of the value as a string - interpreting the bytes as UTF8. You
- * must call {@link #seekTo(Cell)} before this method.
+ * must call {@link #seekTo(ExtendedCell)} before this method.
* @return value as a string
* @deprecated Since hbase-2.0.0
*/
@@ -128,13 +128,14 @@ public interface HFileScanner extends Shipper, Closeable {
HFile.Reader getReader();
/**
- * @return True is scanner has had one of the seek calls invoked; i.e. {@link #seekBefore(Cell)}
- * or {@link #seekTo()} or {@link #seekTo(Cell)}. Otherwise returns false.
+ * @return True is scanner has had one of the seek calls invoked; i.e.
+ * {@link #seekBefore(ExtendedCell)} or {@link #seekTo()} or
+ * {@link #seekTo(ExtendedCell)}. Otherwise returns false.
*/
boolean isSeeked();
/** Returns the next key in the index (the key to seek to the next block) */
- Cell getNextIndexedKey();
+ ExtendedCell getNextIndexedKey();
/**
* Close this HFile scanner and do necessary cleanup.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index d2dfaf62106a..0f54fafba954 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -75,7 +76,7 @@ public class HFileWriterImpl implements HFile.Writer {
private final int encodedBlockSizeLimit;
/** The Cell previously appended. Becomes the last cell in the file. */
- protected Cell lastCell = null;
+ protected ExtendedCell lastCell = null;
/** FileSystem stream to write into. */
protected FSDataOutputStream outputStream;
@@ -112,7 +113,7 @@ public class HFileWriterImpl implements HFile.Writer {
/**
* First cell in a block. This reference should be short-lived since we write hfiles in a burst.
*/
- protected Cell firstCellInBlock = null;
+ protected ExtendedCell firstCellInBlock = null;
/** May be null if we were passed a stream. */
protected final Path path;
@@ -163,7 +164,7 @@ public class HFileWriterImpl implements HFile.Writer {
* The last(stop) Cell of the previous data block. This reference should be short-lived since we
* write hfiles in a burst.
*/
- private Cell lastCellOfPreviousBlock = null;
+ private ExtendedCell lastCellOfPreviousBlock = null;
/** Additional data items to be written to the "load-on-open" section. */
private List additionalLoadOnOpenData = new ArrayList<>();
@@ -360,7 +361,7 @@ private void finishBlock() throws IOException {
lastDataBlockOffset = outputStream.getPos();
blockWriter.writeHeaderAndData(outputStream);
int onDiskSize = blockWriter.getOnDiskSizeWithHeader();
- Cell indexEntry =
+ ExtendedCell indexEntry =
getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, firstCellInBlock);
dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry),
lastDataBlockOffset, onDiskSize);
@@ -377,8 +378,8 @@ private void finishBlock() throws IOException {
* cell.
* @return A cell that sorts between left
and right
.
*/
- public static Cell getMidpoint(final CellComparator comparator, final Cell left,
- final Cell right) {
+ public static ExtendedCell getMidpoint(final CellComparator comparator, final ExtendedCell left,
+ final ExtendedCell right) {
if (right == null) {
throw new IllegalArgumentException("right cell can not be null");
}
@@ -733,7 +734,7 @@ public HFileContext getFileContext() {
* construction. Cell to add. Cannot be empty nor null.
*/
@Override
- public void append(final Cell cell) throws IOException {
+ public void append(final ExtendedCell cell) throws IOException {
// checkKey uses comparator to check we are writing in order.
boolean dupKey = checkKey(cell);
if (!dupKey) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
index d64f0e4ce53d..002b26295f33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
@@ -20,7 +20,7 @@
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.encoding.EncodingState;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
@@ -47,7 +47,7 @@ private NoOpDataBlockEncoder() {
}
@Override
- public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
+ public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException {
NoneEncodingState state = (NoneEncodingState) encodingCtx.getEncodingState();
NoneEncoder encoder = state.encoder;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
index 4162fca6afe5..0d9767f62210 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding;
@@ -138,12 +139,12 @@ protected static class NoOpEncodedSeeker implements EncodedSeeker {
protected int midLeafBlockOnDiskSize = -1;
protected int midKeyEntry = -1;
- private Cell[] blockKeys;
+ private ExtendedCell[] blockKeys;
private CellComparator comparator;
protected int searchTreeLevel;
/** Pre-computed mid-key */
- private AtomicReference midKey = new AtomicReference<>();
+ private AtomicReference midKey = new AtomicReference<>();
@Override
public long heapSize() {
@@ -184,7 +185,7 @@ public boolean isEmpty() {
}
@Override
- public Cell getRootBlockKey(int i) {
+ public ExtendedCell getRootBlockKey(int i) {
return blockKeys[i];
}
@@ -238,7 +239,7 @@ private void readRootIndex(DataInput in, final int numEntries) throws IOExceptio
}
private void initialize(int numEntries) {
- blockKeys = new Cell[numEntries];
+ blockKeys = new ExtendedCell[numEntries];
}
private void add(final byte[] key, final long offset, final int dataSize) {
@@ -250,10 +251,12 @@ private void add(final byte[] key, final long offset, final int dataSize) {
}
@Override
- public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException {
- if (rootCount == 0) throw new IOException("HFile empty");
+ public ExtendedCell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException {
+ if (rootCount == 0) {
+ throw new IOException("HFile empty");
+ }
- Cell targetMidKey = this.midKey.get();
+ ExtendedCell targetMidKey = this.midKey.get();
if (targetMidKey != null) {
return targetMidKey;
}
@@ -285,7 +288,7 @@ public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOExcepti
}
@Override
- public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
+ public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock,
boolean cacheBlocks, boolean pread, boolean isCompaction,
DataBlockEncoding expectedDataBlockEncoding, HFile.CachingBlockReader cachingBlockReader)
throws IOException {
@@ -295,7 +298,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB
}
// the next indexed key
- Cell nextIndexedKey = null;
+ ExtendedCell nextIndexedKey = null;
// Read the next-level (intermediate or leaf) index block.
long currentOffset = blockOffsets[rootLevelIndex];
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index 82224851375c..bd64ca1ec51c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -34,7 +34,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
@@ -325,7 +325,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
compactMOBs, this.ioOptimizedMode, ioOptimizedMode, maxMobFileSize, major, getStoreInfo());
// Since scanner.next() can return 'false' but still be delivering data,
// we have to use a do/while loop.
- List cells = new ArrayList<>();
+ List cells = new ArrayList<>();
// Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME
long currentTime = EnvironmentEdgeManager.currentTime();
long lastMillis = 0;
@@ -355,7 +355,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
long shippedCallSizeLimit =
(long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize();
- Cell mobCell = null;
+ ExtendedCell mobCell = null;
List committedMobWriterFileNames = new ArrayList<>();
try {
@@ -363,9 +363,9 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
do {
- hasMore = scanner.next(cells, scannerContext);
+ hasMore = scanner.next((List) cells, scannerContext);
now = EnvironmentEdgeManager.currentTime();
- for (Cell c : cells) {
+ for (ExtendedCell c : cells) {
if (compactMOBs) {
if (MobUtils.isMobReferenceCell(c)) {
String fName = MobUtils.getMobFileName(c);
@@ -516,7 +516,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
mobCells++;
// append the original keyValue in the mob file.
mobFileWriter.append(c);
- Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
+ ExtendedCell reference =
+ MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
// write the cell whose value is the path of a mob file to the store file.
writer.append(reference);
cellsCountCompactedToMob++;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index e7b0f8260822..f8a55abde115 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -27,7 +27,7 @@
import java.util.function.Consumer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
@@ -192,7 +192,7 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
ScannerContext scannerContext =
ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
- List cells = new ArrayList<>();
+ List cells = new ArrayList<>();
boolean hasMore;
String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush");
boolean control =
@@ -205,9 +205,9 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
mobRefSet.get().clear();
try {
do {
- hasMore = scanner.next(cells, scannerContext);
+ hasMore = scanner.next((List) cells, scannerContext);
if (!cells.isEmpty()) {
- for (Cell c : cells) {
+ for (ExtendedCell c : cells) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
@@ -223,7 +223,7 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
mobCount++;
// append the tags to the KeyValue.
// The key is same, the value is the filename of the mob file
- Cell reference =
+ ExtendedCell reference =
MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
writer.append(reference);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java
index f55088ea6be5..fe66535ee55e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java
@@ -20,6 +20,7 @@
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.yetus.audience.InterfaceAudience;
@@ -45,20 +46,20 @@
@InterfaceAudience.Private
public class MobCell implements Closeable {
- private final Cell cell;
+ private final ExtendedCell cell;
private final StoreFileScanner sfScanner;
- public MobCell(Cell cell) {
+ public MobCell(ExtendedCell cell) {
this.cell = cell;
this.sfScanner = null;
}
- public MobCell(Cell cell, StoreFileScanner sfScanner) {
+ public MobCell(ExtendedCell cell, StoreFileScanner sfScanner) {
this.cell = cell;
this.sfScanner = sfScanner;
}
- public Cell getCell() {
+ public ExtendedCell getCell() {
return cell;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
index 3293208771ac..102617ae74df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
@@ -66,7 +66,7 @@ public StoreFileScanner getScanner() throws IOException {
* @param cacheMobBlocks Should this scanner cache blocks.
* @return The cell in the mob file.
*/
- public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException {
+ public MobCell readCell(ExtendedCell search, boolean cacheMobBlocks) throws IOException {
return readCell(search, cacheMobBlocks, sf.getMaxMemStoreTS());
}
@@ -77,7 +77,8 @@ public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException
* @param readPt the read point.
* @return The cell in the mob file.
*/
- public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException {
+ public MobCell readCell(ExtendedCell search, boolean cacheMobBlocks, long readPt)
+ throws IOException {
StoreFileScanner scanner = null;
boolean succ = false;
try {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index b6b8be9d1791..2a1428196e6e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
@@ -497,7 +498,8 @@ public static boolean removeMobFiles(Configuration conf, FileSystem fs, TableNam
* snapshot.
* @return The mob reference KeyValue.
*/
- public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag) {
+ public static ExtendedCell createMobRefCell(ExtendedCell cell, byte[] fileName,
+ Tag tableNameTag) {
// Append the tags to the KeyValue.
// The key is same, the value is the filename of the mob file
List tags = new ArrayList<>();
@@ -512,7 +514,8 @@ public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag
return createMobRefCell(cell, fileName, TagUtil.fromList(tags));
}
- public static Cell createMobRefCell(Cell cell, byte[] fileName, byte[] refCellTags) {
+ public static ExtendedCell createMobRefCell(ExtendedCell cell, byte[] fileName,
+ byte[] refCellTags) {
byte[] refValue = Bytes.add(Bytes.toBytes(cell.getValueLength()), fileName);
return PrivateCellUtil.createCell(cell, refValue, TagUtil.concatTags(refCellTags, cell));
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java
index b76680d0fdbe..24598c12bd1b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
@@ -102,7 +103,7 @@ protected int doWork() throws Exception {
out.println(
String.format(KEY_TMPL, sequenceId, FORMATTER.format(Instant.ofEpochMilli(writeTime))));
for (Cell cell : edit.getCells()) {
- Map op = WALPrettyPrinter.toStringMap(cell);
+ Map op = WALPrettyPrinter.toStringMap((ExtendedCell) cell);
if (
!Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(),
cell.getFamilyOffset(), cell.getFamilyLength())
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 9a88cab450af..9e15358c3673 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -22,7 +22,6 @@
import java.util.NavigableSet;
import java.util.SortedSet;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
@@ -111,14 +110,14 @@ protected void resetTimeOfOldestEdit() {
public abstract void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent);
@Override
- public void add(Iterable cells, MemStoreSizing memstoreSizing) {
- for (Cell cell : cells) {
+ public void add(Iterable cells, MemStoreSizing memstoreSizing) {
+ for (ExtendedCell cell : cells) {
add(cell, memstoreSizing);
}
}
@Override
- public void add(Cell cell, MemStoreSizing memstoreSizing) {
+ public void add(ExtendedCell cell, MemStoreSizing memstoreSizing) {
doAddOrUpsert(cell, 0, memstoreSizing, true);
}
@@ -131,11 +130,11 @@ public void add(Cell cell, MemStoreSizing memstoreSizing) {
* @param readpoint readpoint below which we can safely remove duplicate KVs
* @param memstoreSizing object to accumulate changed size
*/
- private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) {
+ private void upsert(ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing) {
doAddOrUpsert(cell, readpoint, memstoreSizing, false);
}
- private void doAddOrUpsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing,
+ private void doAddOrUpsert(ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing,
boolean doAdd) {
MutableSegment currentActive;
boolean succ = false;
@@ -153,8 +152,9 @@ private void doAddOrUpsert(Cell cell, long readpoint, MemStoreSizing memstoreSiz
}
}
- protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing memstoreSizing) {
- Cell toAdd = maybeCloneWithAllocator(currentActive, cell, false);
+ protected void doAdd(MutableSegment currentActive, ExtendedCell cell,
+ MemStoreSizing memstoreSizing) {
+ ExtendedCell toAdd = maybeCloneWithAllocator(currentActive, cell, false);
boolean mslabUsed = (toAdd != cell);
// This cell data is backed by the same byte[] where we read request in RPC(See
// HBASE-15180). By default, MSLAB is ON and we might have copied cell to MSLAB area. If
@@ -171,14 +171,14 @@ protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing mem
internalAdd(currentActive, toAdd, mslabUsed, memstoreSizing);
}
- private void doUpsert(MutableSegment currentActive, Cell cell, long readpoint,
+ private void doUpsert(MutableSegment currentActive, ExtendedCell cell, long readpoint,
MemStoreSizing memstoreSizing) {
// Add the Cell to the MemStore
- // Use the internalAdd method here since we (a) already have a lock
- // and (b) cannot safely use the MSLAB here without potentially
- // hitting OOME - see TestMemStore.testUpsertMSLAB for a
- // test that triggers the pathological case if we don't avoid MSLAB
- // here.
+ // Use the internalAdd method here since we
+ // (a) already have a lock and
+ // (b) cannot safely use the MSLAB here without potentially hitting OOME
+ // - see TestMemStore.testUpsertMSLAB for a test that triggers the pathological case if we don't
+ // avoid MSLAB here.
// This cell data is backed by the same byte[] where we read request in RPC(See
// HBASE-15180). We must do below deep copy. Or else we will keep referring to the bigger
// chunk of memory and prevent it from getting GCed.
@@ -195,7 +195,7 @@ private void doUpsert(MutableSegment currentActive, Cell cell, long readpoint,
* @param memstoreSizing object to accumulate region size changes
* @return true iff can proceed with applying the update
*/
- protected abstract boolean preUpdate(MutableSegment currentActive, Cell cell,
+ protected abstract boolean preUpdate(MutableSegment currentActive, ExtendedCell cell,
MemStoreSizing memstoreSizing);
/**
@@ -204,16 +204,13 @@ protected abstract boolean preUpdate(MutableSegment currentActive, Cell cell,
*/
protected abstract void postUpdate(MutableSegment currentActive);
- private static Cell deepCopyIfNeeded(Cell cell) {
- if (cell instanceof ExtendedCell) {
- return ((ExtendedCell) cell).deepClone();
- }
- return cell;
+ private static ExtendedCell deepCopyIfNeeded(ExtendedCell cell) {
+ return cell.deepClone();
}
@Override
- public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) {
- for (Cell cell : cells) {
+ public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) {
+ for (ExtendedCell cell : cells) {
upsert(cell, readpoint, memstoreSizing);
}
}
@@ -281,10 +278,8 @@ protected void dump(Logger log) {
snapshot.dump(log);
}
- /*
- * @return Return lowest of a or b or null if both a and b are null
- */
- protected Cell getLowest(final Cell a, final Cell b) {
+ /** Returns Return lowest of a or b or null if both a and b are null */
+ protected ExtendedCell getLowest(final ExtendedCell a, final ExtendedCell b) {
if (a == null) {
return b;
}
@@ -294,17 +289,17 @@ protected Cell getLowest(final Cell a, final Cell b) {
return comparator.compareRows(a, b) <= 0 ? a : b;
}
- /*
+ /**
* @param key Find row that follows this one. If null, return first.
* @param set Set to look in for a row beyond row .
* @return Next row or null if none found. If one found, will be a new KeyValue -- can be
- * destroyed by subsequent calls to this method.
+ * destroyed by subsequent calls to this method.
*/
- protected Cell getNextRow(final Cell key, final NavigableSet set) {
- Cell result = null;
- SortedSet tail = key == null ? set : set.tailSet(key);
+ protected ExtendedCell getNextRow(final ExtendedCell key, final NavigableSet set) {
+ ExtendedCell result = null;
+ SortedSet tail = key == null ? set : set.tailSet(key);
// Iterate until we fall into the next row; i.e. move off current row
- for (Cell cell : tail) {
+ for (ExtendedCell cell : tail) {
if (comparator.compareRows(cell, key) <= 0) {
continue;
}
@@ -326,20 +321,20 @@ protected Cell getNextRow(final Cell key, final NavigableSet set) {
* @param forceCloneOfBigCell true only during the process of flattening to CellChunkMap.
* @return either the given cell or its clone
*/
- private Cell maybeCloneWithAllocator(MutableSegment currentActive, Cell cell,
+ private ExtendedCell maybeCloneWithAllocator(MutableSegment currentActive, ExtendedCell cell,
boolean forceCloneOfBigCell) {
return currentActive.maybeCloneWithAllocator(cell, forceCloneOfBigCell);
}
- /*
+ /**
* Internal version of add() that doesn't clone Cells with the allocator, and doesn't take the
* lock. Callers should ensure they already have the read lock taken
- * @param toAdd the cell to add
- * @param mslabUsed whether using MSLAB
+ * @param toAdd the cell to add
+ * @param mslabUsed whether using MSLAB
* @param memstoreSizing object to accumulate changed size
*/
- private void internalAdd(MutableSegment currentActive, final Cell toAdd, final boolean mslabUsed,
- MemStoreSizing memstoreSizing) {
+ private void internalAdd(MutableSegment currentActive, final ExtendedCell toAdd,
+ final boolean mslabUsed, MemStoreSizing memstoreSizing) {
boolean sizeAddedPreOperation = sizeAddedPreOperation();
currentActive.add(toAdd, mslabUsed, memstoreSizing, sizeAddedPreOperation);
setOldestEditTimeToNow();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java
index f62b0d615149..618ae07a9a8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.yetus.audience.InterfaceAudience;
@@ -86,15 +87,14 @@ protected boolean canBeFlattened() {
// Create CellSet based on CellArrayMap from compacting iterator
private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator,
MemStoreCompactionStrategy.Action action) {
-
boolean merge = (action == MemStoreCompactionStrategy.Action.MERGE
|| action == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS);
- Cell[] cells = new Cell[numOfCells]; // build the Cell Array
+ ExtendedCell[] cells = new ExtendedCell[numOfCells]; // build the Cell Array
int i = 0;
int numUniqueKeys = 0;
Cell prev = null;
while (iterator.hasNext()) {
- Cell c = iterator.next();
+ ExtendedCell c = iterator.next();
// The scanner behind the iterator is doing all the elimination logic
if (merge) {
// if this is merge we just move the Cell object without copying MSLAB
@@ -126,8 +126,8 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
- CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, i, false);
- this.setCellSet(null, new CellSet(cam, numUniqueKeys)); // update the CellSet of this Segment
+ CellArrayMap cam = new CellArrayMap<>(getComparator(), cells, 0, i, false);
+ this.setCellSet(null, new CellSet<>(cam, numUniqueKeys)); // update the CellSet of this Segment
}
/*------------------------------------------------------------------------*/
@@ -135,12 +135,12 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator
// (without compacting iterator)
// We do not consider cells bigger than chunks!
private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner,
- CellSet oldCellSet, MemStoreCompactionStrategy.Action action) {
- Cell[] cells = new Cell[numOfCells]; // build the Cell Array
- Cell curCell;
+ CellSet oldCellSet, MemStoreCompactionStrategy.Action action) {
+ ExtendedCell[] cells = new ExtendedCell[numOfCells]; // build the Cell Array
+ ExtendedCell curCell;
int idx = 0;
int numUniqueKeys = 0;
- Cell prev = null;
+ ExtendedCell prev = null;
try {
while ((curCell = segmentScanner.next()) != null) {
cells[idx++] = curCell;
@@ -165,9 +165,9 @@ private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner,
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
- CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, idx, false);
+ CellArrayMap cam = new CellArrayMap<>(getComparator(), cells, 0, idx, false);
// update the CellSet of this Segment
- this.setCellSet(oldCellSet, new CellSet(cam, numUniqueKeys));
+ this.setCellSet(oldCellSet, new CellSet<>(cam, numUniqueKeys));
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
index af60c8e93cf6..1ebf693bda6e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
@@ -26,15 +26,15 @@
* CellArrayMap's array of references pointing to Cell objects.
*/
@InterfaceAudience.Private
-public class CellArrayMap extends CellFlatMap {
+public class CellArrayMap extends CellFlatMap {
- private final Cell[] block;
+ private final T[] block;
/*
* The Cells Array is created only when CellArrayMap is created, all sub-CellBlocks use boundary
* indexes. The given Cell array must be ordered.
*/
- public CellArrayMap(Comparator super Cell> comparator, Cell[] b, int min, int max,
+ public CellArrayMap(Comparator super T> comparator, T[] b, int min, int max,
boolean descending) {
super(comparator, min, max, descending);
this.block = b;
@@ -42,12 +42,12 @@ public CellArrayMap(Comparator super Cell> comparator, Cell[] b, int min, int
/* To be used by base class only to create a sub-CellFlatMap */
@Override
- protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) {
- return new CellArrayMap(comparator(), this.block, min, max, descending);
+ protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) {
+ return new CellArrayMap<>(comparator(), this.block, min, max, descending);
}
@Override
- protected Cell getCell(int i) {
+ protected T getCell(int i) {
if ((i < minCellIdx) || (i >= maxCellIdx)) return null;
return block[i];
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
index de6377668f93..a623c823cb33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ExtendedCell;
@@ -135,19 +134,17 @@ protected boolean canBeFlattened() {
// Create CellSet based on CellChunkMap from compacting iterator
private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator,
MemStoreCompactionStrategy.Action action) {
-
int numOfCellsAfterCompaction = 0;
int currentChunkIdx = 0;
int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
int numUniqueKeys = 0;
- Cell prev = null;
+ ExtendedCell prev = null;
Chunk[] chunks = allocIndexChunks(numOfCells);
while (iterator.hasNext()) { // the iterator hides the elimination logic for compaction
boolean alreadyCopied = false;
- Cell c = iterator.next();
+ ExtendedCell c = iterator.next();
numOfCellsAfterCompaction++;
- assert (c instanceof ExtendedCell);
- if (((ExtendedCell) c).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
+ if (c.getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
// CellChunkMap assumes all cells are allocated on MSLAB.
// Therefore, cells which are not allocated on MSLAB initially,
// are copied into MSLAB here.
@@ -190,9 +187,9 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
- CellChunkMap ccm =
- new CellChunkMap(getComparator(), chunks, 0, numOfCellsAfterCompaction, false);
- this.setCellSet(null, new CellSet(ccm, numUniqueKeys)); // update the CellSet of this Segment
+ CellChunkMap ccm =
+ new CellChunkMap<>(getComparator(), chunks, 0, numOfCellsAfterCompaction, false);
+ this.setCellSet(null, new CellSet<>(ccm, numUniqueKeys)); // update the CellSet of this Segment
}
/*------------------------------------------------------------------------*/
@@ -200,19 +197,19 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator
// (without compacting iterator)
// This is a service for not-flat immutable segments
private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner,
- CellSet oldCellSet, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) {
- Cell curCell;
+ CellSet oldCellSet, MemStoreSizing memstoreSizing,
+ MemStoreCompactionStrategy.Action action) {
+ ExtendedCell curCell;
Chunk[] chunks = allocIndexChunks(numOfCells);
int currentChunkIdx = 0;
int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
int numUniqueKeys = 0;
- Cell prev = null;
+ ExtendedCell prev = null;
try {
while ((curCell = segmentScanner.next()) != null) {
- assert (curCell instanceof ExtendedCell);
- if (((ExtendedCell) curCell).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
+ if (curCell.getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
// CellChunkMap assumes all cells are allocated on MSLAB.
// Therefore, cells which are not allocated on MSLAB initially,
// are copied into MSLAB here.
@@ -246,9 +243,10 @@ private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner,
segmentScanner.close();
}
- CellChunkMap ccm = new CellChunkMap(getComparator(), chunks, 0, numOfCells, false);
+ CellChunkMap ccm =
+ new CellChunkMap<>(getComparator(), chunks, 0, numOfCells, false);
// update the CellSet of this Segment
- this.setCellSet(oldCellSet, new CellSet(ccm, numUniqueKeys));
+ this.setCellSet(oldCellSet, new CellSet<>(ccm, numUniqueKeys));
}
/*------------------------------------------------------------------------*/
@@ -317,7 +315,7 @@ private Chunk[] allocIndexChunks(int numOfCells) {
return chunks;
}
- private Cell copyCellIntoMSLAB(Cell cell, MemStoreSizing memstoreSizing) {
+ private ExtendedCell copyCellIntoMSLAB(ExtendedCell cell, MemStoreSizing memstoreSizing) {
// Take care for a special case when a cell is copied from on-heap to (probably off-heap) MSLAB.
// The cell allocated as an on-heap JVM object (byte array) occupies slightly different
// amount of memory, than when the cell serialized and allocated on the MSLAB.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java
index e4bfcf05ab2d..f6dad226cce0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java
@@ -51,7 +51,7 @@
*
*/
@InterfaceAudience.Private
-public class CellChunkMap extends CellFlatMap {
+public class CellChunkMap extends CellFlatMap {
private final Chunk[] chunks; // the array of chunks, on which the index is based
@@ -69,7 +69,7 @@ public class CellChunkMap extends CellFlatMap {
* @param max number of Cells or the index of the cell after the maximal cell
* @param descending the order of the given array
*/
- public CellChunkMap(Comparator super Cell> comparator, Chunk[] chunks, int min, int max,
+ public CellChunkMap(Comparator super T> comparator, Chunk[] chunks, int min, int max,
boolean descending) {
super(comparator, min, max, descending);
this.chunks = chunks;
@@ -86,12 +86,12 @@ public CellChunkMap(Comparator super Cell> comparator, Chunk[] chunks, int min
* create only CellChunkMap from CellChunkMap
*/
@Override
- protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) {
- return new CellChunkMap(this.comparator(), this.chunks, min, max, descending);
+ protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) {
+ return new CellChunkMap<>(this.comparator(), this.chunks, min, max, descending);
}
@Override
- protected Cell getCell(int i) {
+ protected T getCell(int i) {
// get the index of the relevant chunk inside chunk array
int chunkIndex = (i / numOfCellRepsInChunk);
ByteBuffer block = chunks[chunkIndex].getData();// get the ByteBuffer of the relevant chunk
@@ -127,6 +127,9 @@ protected Cell getCell(int i) {
+ ". We were looking for a cell at index " + i);
}
- return new ByteBufferChunkKeyValue(buf, offsetOfCell, lengthOfCell, cellSeqID);
+ @SuppressWarnings("unchecked")
+ T cell = (T) new ByteBufferChunkKeyValue(buf, offsetOfCell, lengthOfCell, cellSeqID);
+
+ return cell;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
index 8a64d80c15ed..0c95f7ddb4ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
@@ -26,8 +26,6 @@
import java.util.Set;
import org.apache.hadoop.hbase.Cell;
import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* CellFlatMap stores a constant number of elements and is immutable after creation stage. Being
@@ -38,15 +36,15 @@
* sequential array and thus requires less memory than ConcurrentSkipListMap.
*/
@InterfaceAudience.Private
-public abstract class CellFlatMap implements NavigableMap {
- private static final Logger LOG = LoggerFactory.getLogger(CellFlatMap.class);
- private final Comparator super Cell> comparator;
+public abstract class CellFlatMap implements NavigableMap {
+
+ private final Comparator super T> comparator;
protected int minCellIdx = 0; // the index of the minimal cell (for sub-sets)
protected int maxCellIdx = 0; // the index of the cell after the maximal cell (for sub-sets)
private boolean descending = false;
/* C-tor */
- public CellFlatMap(Comparator super Cell> comparator, int min, int max, boolean d) {
+ public CellFlatMap(Comparator super T> comparator, int min, int max, boolean d) {
this.comparator = comparator;
this.minCellIdx = min;
this.maxCellIdx = max;
@@ -54,10 +52,10 @@ public CellFlatMap(Comparator super Cell> comparator, int min, int max, boolea
}
/* Used for abstract CellFlatMap creation, implemented by derived class */
- protected abstract CellFlatMap createSubCellFlatMap(int min, int max, boolean descending);
+ protected abstract CellFlatMap createSubCellFlatMap(int min, int max, boolean descending);
/* Returns the i-th cell in the cell block */
- protected abstract Cell getCell(int i);
+ protected abstract T getCell(int i);
/**
* Binary search for a given key in between given boundaries of the array. Positive returned
@@ -67,13 +65,13 @@ public CellFlatMap(Comparator super Cell> comparator, int min, int max, boolea
* @param needle The key to look for in all of the entries
* @return Same return value as Arrays.binarySearch.
*/
- private int find(Cell needle) {
+ private int find(T needle) {
int begin = minCellIdx;
int end = maxCellIdx - 1;
while (begin <= end) {
int mid = begin + ((end - begin) >> 1);
- Cell midCell = getCell(mid);
+ T midCell = getCell(mid);
int compareRes = comparator.compare(midCell, needle);
if (compareRes == 0) {
@@ -98,7 +96,7 @@ private int find(Cell needle) {
* the given key exists in the set or not. taking into consideration whether the key should be
* inclusive or exclusive.
*/
- private int getValidIndex(Cell key, boolean inclusive, boolean tail) {
+ private int getValidIndex(T key, boolean inclusive, boolean tail) {
final int index = find(key);
// get the valid (positive) insertion point from the output of the find() method
int insertionPoint = index < 0 ? ~index : index;
@@ -125,7 +123,7 @@ private int getValidIndex(Cell key, boolean inclusive, boolean tail) {
}
@Override
- public Comparator super Cell> comparator() {
+ public Comparator super T> comparator() {
return comparator;
}
@@ -141,8 +139,7 @@ public boolean isEmpty() {
// ---------------- Sub-Maps ----------------
@Override
- public NavigableMap subMap(Cell fromKey, boolean fromInclusive, Cell toKey,
- boolean toInclusive) {
+ public NavigableMap subMap(T fromKey, boolean fromInclusive, T toKey, boolean toInclusive) {
final int lessCellIndex = getValidIndex(fromKey, fromInclusive, true);
final int greaterCellIndex = getValidIndex(toKey, toInclusive, false);
if (descending) {
@@ -153,7 +150,7 @@ public NavigableMap subMap(Cell fromKey, boolean fromInclusive, Cell
}
@Override
- public NavigableMap headMap(Cell toKey, boolean inclusive) {
+ public NavigableMap headMap(T toKey, boolean inclusive) {
if (descending) {
return createSubCellFlatMap(getValidIndex(toKey, inclusive, false), maxCellIdx, descending);
} else {
@@ -162,7 +159,7 @@ public NavigableMap headMap(Cell toKey, boolean inclusive) {
}
@Override
- public NavigableMap tailMap(Cell fromKey, boolean inclusive) {
+ public NavigableMap tailMap(T fromKey, boolean inclusive) {
if (descending) {
return createSubCellFlatMap(minCellIdx, getValidIndex(fromKey, inclusive, true), descending);
} else {
@@ -171,28 +168,28 @@ public NavigableMap tailMap(Cell fromKey, boolean inclusive) {
}
@Override
- public NavigableMap descendingMap() {
+ public NavigableMap descendingMap() {
return createSubCellFlatMap(minCellIdx, maxCellIdx, true);
}
@Override
- public NavigableMap subMap(Cell k1, Cell k2) {
+ public NavigableMap subMap(T k1, T k2) {
return this.subMap(k1, true, k2, true);
}
@Override
- public NavigableMap headMap(Cell k) {
+ public NavigableMap headMap(T k) {
return this.headMap(k, true);
}
@Override
- public NavigableMap tailMap(Cell k) {
+ public NavigableMap tailMap(T k) {
return this.tailMap(k, true);
}
// -------------------------------- Key's getters --------------------------------
@Override
- public Cell firstKey() {
+ public T firstKey() {
if (isEmpty()) {
return null;
}
@@ -200,7 +197,7 @@ public Cell firstKey() {
}
@Override
- public Cell lastKey() {
+ public T lastKey() {
if (isEmpty()) {
return null;
}
@@ -208,7 +205,7 @@ public Cell lastKey() {
}
@Override
- public Cell lowerKey(Cell k) {
+ public T lowerKey(T k) {
if (isEmpty()) {
return null;
}
@@ -219,7 +216,7 @@ public Cell lowerKey(Cell k) {
}
@Override
- public Cell floorKey(Cell k) {
+ public T floorKey(T k) {
if (isEmpty()) {
return null;
}
@@ -229,7 +226,7 @@ public Cell floorKey(Cell k) {
}
@Override
- public Cell ceilingKey(Cell k) {
+ public T ceilingKey(T k) {
if (isEmpty()) {
return null;
}
@@ -239,7 +236,7 @@ public Cell ceilingKey(Cell k) {
}
@Override
- public Cell higherKey(Cell k) {
+ public T higherKey(T k) {
if (isEmpty()) {
return null;
}
@@ -250,7 +247,7 @@ public Cell higherKey(Cell k) {
@Override
public boolean containsKey(Object o) {
- int index = find((Cell) o);
+ int index = find((T) o);
return (index >= 0);
}
@@ -260,99 +257,99 @@ public boolean containsValue(Object o) { // use containsKey(Object o) instead
}
@Override
- public Cell get(Object o) {
- int index = find((Cell) o);
+ public T get(Object o) {
+ int index = find((T) o);
return (index >= 0) ? getCell(index) : null;
}
// -------------------------------- Entry's getters --------------------------------
- private static class CellFlatMapEntry implements Entry {
- private final Cell cell;
+ private static class CellFlatMapEntry implements Entry {
+ private final T cell;
- public CellFlatMapEntry(Cell cell) {
+ public CellFlatMapEntry(T cell) {
this.cell = cell;
}
@Override
- public Cell getKey() {
+ public T getKey() {
return cell;
}
@Override
- public Cell getValue() {
+ public T getValue() {
return cell;
}
@Override
- public Cell setValue(Cell value) {
+ public T setValue(T value) {
throw new UnsupportedOperationException();
}
}
@Override
- public Entry lowerEntry(Cell k) {
- Cell cell = lowerKey(k);
+ public Entry lowerEntry(T k) {
+ T cell = lowerKey(k);
if (cell == null) {
return null;
}
- return new CellFlatMapEntry(cell);
+ return new CellFlatMapEntry<>(cell);
}
@Override
- public Entry higherEntry(Cell k) {
- Cell cell = higherKey(k);
+ public Entry higherEntry(T k) {
+ T cell = higherKey(k);
if (cell == null) {
return null;
}
- return new CellFlatMapEntry(cell);
+ return new CellFlatMapEntry<>(cell);
}
@Override
- public Entry ceilingEntry(Cell k) {
- Cell cell = ceilingKey(k);
+ public Entry ceilingEntry(T k) {
+ T cell = ceilingKey(k);
if (cell == null) {
return null;
}
- return new CellFlatMapEntry(cell);
+ return new CellFlatMapEntry<>(cell);
}
@Override
- public Entry floorEntry(Cell k) {
- Cell cell = floorKey(k);
+ public Entry floorEntry(T k) {
+ T cell = floorKey(k);
if (cell == null) {
return null;
}
- return new CellFlatMapEntry(cell);
+ return new CellFlatMapEntry<>(cell);
}
@Override
- public Entry firstEntry() {
- Cell cell = firstKey();
+ public Entry firstEntry() {
+ T cell = firstKey();
if (cell == null) {
return null;
}
- return new CellFlatMapEntry(cell);
+ return new CellFlatMapEntry<>(cell);
}
@Override
- public Entry lastEntry() {
- Cell cell = lastKey();
+ public Entry lastEntry() {
+ T cell = lastKey();
if (cell == null) {
return null;
}
- return new CellFlatMapEntry(cell);
+ return new CellFlatMapEntry<>(cell);
}
// The following 2 methods (pollFirstEntry, pollLastEntry) are unsupported because these are
// updating methods.
@Override
- public Entry pollFirstEntry() {
+ public Entry pollFirstEntry() {
throw new UnsupportedOperationException();
}
@Override
- public Entry pollLastEntry() {
+ public Entry pollLastEntry() {
throw new UnsupportedOperationException();
}
@@ -362,7 +359,7 @@ public Entry pollLastEntry() {
// fill up with Cells and provided in construction time.
// Later the structure is immutable.
@Override
- public Cell put(Cell k, Cell v) {
+ public T put(T k, T v) {
throw new UnsupportedOperationException();
}
@@ -372,43 +369,43 @@ public void clear() {
}
@Override
- public Cell remove(Object o) {
+ public T remove(Object o) {
throw new UnsupportedOperationException();
}
@Override
- public void putAll(Map extends Cell, ? extends Cell> map) {
+ public void putAll(Map extends T, ? extends T> map) {
throw new UnsupportedOperationException();
}
// -------------------------------- Sub-Sets --------------------------------
@Override
- public NavigableSet navigableKeySet() {
+ public NavigableSet navigableKeySet() {
throw new UnsupportedOperationException();
}
@Override
- public NavigableSet descendingKeySet() {
+ public NavigableSet descendingKeySet() {
throw new UnsupportedOperationException();
}
@Override
- public NavigableSet keySet() {
+ public NavigableSet keySet() {
throw new UnsupportedOperationException();
}
@Override
- public Collection values() {
+ public Collection values() {
return new CellFlatMapCollection();
}
@Override
- public Set> entrySet() {
+ public Set> entrySet() {
throw new UnsupportedOperationException();
}
// -------------------------------- Iterator K --------------------------------
- private final class CellFlatMapIterator implements Iterator {
+ private final class CellFlatMapIterator implements Iterator {
int index;
private CellFlatMapIterator() {
@@ -421,8 +418,8 @@ public boolean hasNext() {
}
@Override
- public Cell next() {
- Cell result = getCell(index);
+ public T next() {
+ T result = getCell(index);
if (descending) {
index--;
} else {
@@ -438,7 +435,7 @@ public void remove() {
}
// -------------------------------- Collection --------------------------------
- private final class CellFlatMapCollection implements Collection {
+ private final class CellFlatMapCollection implements Collection {
@Override
public int size() {
@@ -461,7 +458,7 @@ public boolean contains(Object o) {
}
@Override
- public Iterator iterator() {
+ public Iterator iterator() {
return new CellFlatMapIterator();
}
@@ -476,7 +473,7 @@ public T[] toArray(T[] ts) {
}
@Override
- public boolean add(Cell k) {
+ public boolean add(T k) {
throw new UnsupportedOperationException();
}
@@ -491,7 +488,7 @@ public boolean containsAll(Collection> collection) {
}
@Override
- public boolean addAll(Collection extends Cell> collection) {
+ public boolean addAll(Collection extends T> collection) {
throw new UnsupportedOperationException();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
index 4890c8a84494..c8d9b5b2ea67 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
@@ -36,7 +36,7 @@
* and set and won't throw ConcurrentModificationException when iterating.
*/
@InterfaceAudience.Private
-public class CellSet implements NavigableSet {
+public class CellSet implements NavigableSet {
public static final int UNKNOWN_NUM_UNIQUES = -1;
// Implemented on top of a {@link java.util.concurrent.ConcurrentSkipListMap}
@@ -44,127 +44,127 @@ public class CellSet implements NavigableSet {
// is not already present.", this implementation "Adds the specified element to this set EVEN
// if it is already present overwriting what was there previous".
// Otherwise, has same attributes as ConcurrentSkipListSet
- private final NavigableMap delegatee; ///
+ private final NavigableMap delegatee; ///
private final int numUniqueKeys;
- public CellSet(final CellComparator c) {
+ public CellSet(CellComparator c) {
this.delegatee = new ConcurrentSkipListMap<>(c.getSimpleComparator());
this.numUniqueKeys = UNKNOWN_NUM_UNIQUES;
}
- CellSet(final NavigableMap m, int numUniqueKeys) {
+ CellSet(final NavigableMap m, int numUniqueKeys) {
this.delegatee = m;
this.numUniqueKeys = numUniqueKeys;
}
- CellSet(final NavigableMap m) {
+ CellSet(final NavigableMap m) {
this.delegatee = m;
this.numUniqueKeys = UNKNOWN_NUM_UNIQUES;
}
- NavigableMap getDelegatee() {
+ NavigableMap getDelegatee() {
return delegatee;
}
@Override
- public Cell ceiling(Cell e) {
+ public T ceiling(T e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public Iterator descendingIterator() {
+ public Iterator descendingIterator() {
return this.delegatee.descendingMap().values().iterator();
}
@Override
- public NavigableSet descendingSet() {
+ public NavigableSet descendingSet() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public Cell floor(Cell e) {
+ public T floor(T e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public SortedSet headSet(final Cell toElement) {
+ public SortedSet headSet(final T toElement) {
return headSet(toElement, false);
}
@Override
- public NavigableSet headSet(final Cell toElement, boolean inclusive) {
- return new CellSet(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES);
+ public NavigableSet headSet(final T toElement, boolean inclusive) {
+ return new CellSet<>(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES);
}
@Override
- public Cell higher(Cell e) {
+ public T higher(T e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public Iterator iterator() {
+ public Iterator iterator() {
return this.delegatee.values().iterator();
}
@Override
- public Cell lower(Cell e) {
+ public T lower(T e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public Cell pollFirst() {
+ public T pollFirst() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public Cell pollLast() {
+ public T pollLast() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public SortedSet subSet(Cell fromElement, Cell toElement) {
+ public SortedSet subSet(T fromElement, T toElement) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement,
+ public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement,
boolean toInclusive) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public SortedSet tailSet(Cell fromElement) {
+ public SortedSet tailSet(T fromElement) {
return tailSet(fromElement, true);
}
@Override
- public NavigableSet tailSet(Cell fromElement, boolean inclusive) {
- return new CellSet(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES);
+ public NavigableSet tailSet(T fromElement, boolean inclusive) {
+ return new CellSet<>(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES);
}
@Override
- public Comparator super Cell> comparator() {
+ public Comparator super T> comparator() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@Override
- public Cell first() {
+ public T first() {
return this.delegatee.firstEntry().getValue();
}
@Override
- public Cell last() {
+ public T last() {
return this.delegatee.lastEntry().getValue();
}
@Override
- public boolean add(Cell e) {
+ public boolean add(T e) {
return this.delegatee.put(e, e) == null;
}
@Override
- public boolean addAll(Collection extends Cell> c) {
+ public boolean addAll(Collection extends T> c) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java
index 1d838d86abcf..f3f260f8cf7c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java
@@ -19,7 +19,7 @@
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.yetus.audience.InterfaceAudience;
@@ -34,14 +34,14 @@ public interface CellSink {
* Append the given cell
* @param cell the cell to be added
*/
- void append(Cell cell) throws IOException;
+ void append(ExtendedCell cell) throws IOException;
/**
* Append the given (possibly partial) list of cells of a row
* @param cellList the cell list to be added
*/
- default void appendAll(List cellList) throws IOException {
- for (Cell cell : cellList) {
+ default void appendAll(List cellList) throws IOException {
+ for (ExtendedCell cell : cellList) {
append(cell);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 1a2cbc6bdabf..568a7b061021 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
@@ -306,7 +307,7 @@ public void stopReplayingFromWAL() {
* @return true iff can proceed with applying the update
*/
@Override
- protected boolean preUpdate(MutableSegment currentActive, Cell cell,
+ protected boolean preUpdate(MutableSegment currentActive, ExtendedCell cell,
MemStoreSizing memstoreSizing) {
if (currentActive.sharedLock()) {
if (checkAndAddToActiveSize(currentActive, cell, memstoreSizing)) {
@@ -621,8 +622,8 @@ boolean isMemStoreFlushingInMemory() {
* @param cell Find the row that comes after this one. If null, we return the first.
* @return Next row or null if none found.
*/
- Cell getNextRow(final Cell cell) {
- Cell lowest = null;
+ ExtendedCell getNextRow(final ExtendedCell cell) {
+ ExtendedCell lowest = null;
List segments = getSegments();
for (Segment segment : segments) {
if (lowest == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index f955eb5d5825..af09e462140c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -23,6 +23,7 @@
import java.util.SortedSet;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -93,7 +94,7 @@ public void close() {
* @return either the given cell or its clone
*/
@Override
- public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {
+ public ExtendedCell maybeCloneWithAllocator(ExtendedCell cell, boolean forceCloneOfBigCell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@@ -192,17 +193,17 @@ public TimeRangeTracker getTimeRangeTracker() {
// *** Methods for SegmentsScanner
@Override
- public Cell last() {
+ public ExtendedCell last() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
- public Iterator iterator() {
+ public Iterator iterator() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
- public SortedSet headSet(Cell firstKeyOnRow) {
+ public SortedSet headSet(ExtendedCell firstKeyOnRow) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@@ -218,18 +219,18 @@ public int compareRows(Cell left, Cell right) {
/** Returns a set of all cells in the segment */
@Override
- protected CellSet getCellSet() {
+ protected CellSet getCellSet() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
- protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing,
+ protected void internalAdd(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memstoreSizing,
boolean sizeAddedPreOperation) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
- protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
+ protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, boolean mslabUsed,
MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@@ -240,7 +241,7 @@ protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
* @return a subset of the segment cell set, which starts with the given cell
*/
@Override
- protected SortedSet tailSet(Cell firstCell) {
+ protected SortedSet tailSet(ExtendedCell firstCell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
index e5ee8041c350..b800178e8a28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
@@ -23,7 +23,7 @@
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -54,7 +54,7 @@ public DateTieredMultiFileWriter(List lowerBoundaries,
}
@Override
- public void append(Cell cell) throws IOException {
+ public void append(ExtendedCell cell) throws IOException {
Map.Entry entry = lowerBoundary2Writer.floorEntry(cell.getTimestamp());
StoreFileWriter writer = entry.getValue();
if (writer == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 420dad51e377..433105e998f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.InnerStoreCellComparator;
@@ -155,7 +156,7 @@ protected List getSegments() throws IOException {
* @param cell Find the row that comes after this one. If null, we return the first.
* @return Next row or null if none found.
*/
- Cell getNextRow(final Cell cell) {
+ ExtendedCell getNextRow(final ExtendedCell cell) {
return getLowest(getNextRow(cell, this.getActive().getCellSet()),
getNextRow(cell, this.snapshot.getCellSet()));
}
@@ -165,7 +166,7 @@ public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) {
}
@Override
- protected boolean preUpdate(MutableSegment currentActive, Cell cell,
+ protected boolean preUpdate(MutableSegment currentActive, ExtendedCell cell,
MemStoreSizing memstoreSizing) {
return true;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index d4b24de33cc3..c71498cfab09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -33,10 +33,10 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@@ -300,7 +300,7 @@ private void validateMobFile(Path path) throws IOException {
* @param cacheBlocks Whether the scanner should cache blocks.
* @return The cell found in the mob file.
*/
- public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException {
+ public MobCell resolve(ExtendedCell reference, boolean cacheBlocks) throws IOException {
return resolve(reference, cacheBlocks, -1, true);
}
@@ -313,8 +313,8 @@ public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException {
* resolved.
* @return The cell found in the mob file.
*/
- public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyValueOnMobCellMiss)
- throws IOException {
+ public MobCell resolve(ExtendedCell reference, boolean cacheBlocks,
+ boolean readEmptyValueOnMobCellMiss) throws IOException {
return resolve(reference, cacheBlocks, -1, readEmptyValueOnMobCellMiss);
}
@@ -328,7 +328,7 @@ public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyVal
* corrupt.
* @return The cell found in the mob file.
*/
- public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt,
+ public MobCell resolve(ExtendedCell reference, boolean cacheBlocks, long readPt,
boolean readEmptyValueOnMobCellMiss) throws IOException {
MobCell mobCell = null;
if (MobUtils.hasValidMobRefCellValue(reference)) {
@@ -343,7 +343,7 @@ public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt,
if (mobCell == null) {
LOG.warn("The Cell result is null, assemble a new Cell with the same row,family,"
+ "qualifier,timestamp,type and tags but with an empty value to return.");
- Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+ ExtendedCell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
.setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength())
.setFamily(reference.getFamilyArray(), reference.getFamilyOffset(),
reference.getFamilyLength())
@@ -397,7 +397,7 @@ public List getLocations(TableName tableName) throws IOException {
* corrupt.
* @return The found cell. Null if there's no such a cell.
*/
- private MobCell readCell(List locations, String fileName, Cell search,
+ private MobCell readCell(List locations, String fileName, ExtendedCell search,
boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException {
FileSystem fs = getFileSystem();
IOException ioe = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 2381458a48bb..51927799f289 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -91,6 +91,7 @@
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.DroppedSnapshotException;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
@@ -3145,18 +3146,18 @@ public void delete(Delete delete) throws IOException {
*
* Caller should have the row and region locks.
*/
- private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap,
+ private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap,
byte[] byteNow) throws IOException {
- for (Map.Entry> e : familyMap.entrySet()) {
+ for (Map.Entry> e : familyMap.entrySet()) {
byte[] family = e.getKey();
- List cells = e.getValue();
+ List cells = e.getValue();
assert cells instanceof RandomAccess;
Map kvCount = new TreeMap<>(Bytes.BYTES_COMPARATOR);
int listSize = cells.size();
for (int i = 0; i < listSize; i++) {
- Cell cell = cells.get(i);
+ ExtendedCell cell = cells.get(i);
// Check if time is LATEST, change to time of most recent addition if so
// This is expensive.
if (
@@ -3242,7 +3243,7 @@ private abstract static class BatchOperation {
protected final OperationStatus[] retCodeDetails;
protected final WALEdit[] walEditsFromCoprocessors;
// reference family cell maps directly so coprocessors can mutate them if desired
- protected final Map>[] familyCellMaps;
+ protected final Map>[] familyCellMaps;
// For Increment/Append operations
protected final Result[] results;
@@ -3411,7 +3412,9 @@ protected void checkAndPrepareMutation(int index, long timestamp) throws IOExcep
if (mutation instanceof Put || mutation instanceof Delete) {
// store the family map reference to allow for mutations
- familyCellMaps[index] = mutation.getFamilyCellMap();
+ // we know that in mutation, only ExtendedCells are allow so here we do a fake cast, to
+ // simplify later logic
+ familyCellMaps[index] = (Map) mutation.getFamilyCellMap();
}
// store durability for the batch (highest durability of all operations in the batch)
@@ -3602,7 +3605,7 @@ public boolean visit(int index) throws IOException {
walEdit.add(cell);
}
}
- walEdit.add(familyCellMaps[index]);
+ walEdit.add((Map) familyCellMaps[index]);
return true;
}
@@ -3663,11 +3666,11 @@ public void doPostOpCleanupForMiniBatch(
* also does not check the families for validity.
* @param familyMap Map of Cells by family
*/
- protected void applyFamilyMapToMemStore(Map> familyMap,
+ protected void applyFamilyMapToMemStore(Map> familyMap,
MemStoreSizing memstoreAccounting) {
- for (Map.Entry> e : familyMap.entrySet()) {
+ for (Map.Entry> e : familyMap.entrySet()) {
byte[] family = e.getKey();
- List cells = e.getValue();
+ List cells = e.getValue();
assert cells instanceof RandomAccess;
region.applyToMemStore(region.getStore(family), cells, false, memstoreAccounting);
}
@@ -3844,7 +3847,7 @@ public void prepareMiniBatchOperations(MiniBatchOperationInProgress mi
return true;
}
- List results = returnResults ? new ArrayList<>(mutation.size()) : null;
+ List results = returnResults ? new ArrayList<>(mutation.size()) : null;
familyCellMaps[index] = reckonDeltas(mutation, results, timestamp);
this.results[index] = results != null ? Result.create(results) : Result.EMPTY_RESULT;
@@ -3934,19 +3937,19 @@ private static Get toGet(final Mutation mutation) throws IOException {
return get;
}
- private Map> reckonDeltas(Mutation mutation, List results, long now)
- throws IOException {
+ private Map> reckonDeltas(Mutation mutation,
+ List results, long now) throws IOException {
assert mutation instanceof Increment || mutation instanceof Append;
- Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+ Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// Process a Store/family at a time.
for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) {
final byte[] columnFamilyName = entry.getKey();
- List deltas = entry.getValue();
+ List deltas = (List) entry.getValue();
// Reckon for the Store what to apply to WAL and MemStore.
- List toApply =
+ List toApply =
reckonDeltasByStore(region.stores.get(columnFamilyName), mutation, now, deltas, results);
if (!toApply.isEmpty()) {
- for (Cell cell : toApply) {
+ for (ExtendedCell cell : toApply) {
HStore store = region.getStore(cell);
if (store == null) {
region.checkFamily(CellUtil.cloneFamily(cell));
@@ -3971,11 +3974,11 @@ private Map> reckonDeltas(Mutation mutation, List resul
* @return Resulting Cells after deltas have been applied to current values. Side
* effect is our filling out of the results List.
*/
- private List reckonDeltasByStore(HStore store, Mutation mutation, long now,
- List deltas, List results) throws IOException {
+ private List reckonDeltasByStore(HStore store, Mutation mutation, long now,
+ List deltas, List results) throws IOException {
assert mutation instanceof Increment || mutation instanceof Append;
byte[] columnFamily = store.getColumnFamilyDescriptor().getName();
- List> cellPairs = new ArrayList<>(deltas.size());
+ List> cellPairs = new ArrayList<>(deltas.size());
// Sort the cells so that they match the order that they appear in the Get results.
// Otherwise, we won't be able to find the existing values if the cells are not specified
@@ -3984,7 +3987,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now
// Get previous values for all columns in this family.
Get get = new Get(mutation.getRow());
- for (Cell cell : deltas) {
+ for (ExtendedCell cell : deltas) {
get.addColumn(columnFamily, CellUtil.cloneQualifier(cell));
}
TimeRange tr;
@@ -4001,14 +4004,14 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now
try (RegionScanner scanner = region.getScanner(new Scan(get))) {
// NOTE: Please don't use HRegion.get() instead,
// because it will copy cells to heap. See HBASE-26036
- List currentValues = new ArrayList<>();
- scanner.next(currentValues);
+ List currentValues = new ArrayList<>();
+ scanner.next((List) currentValues);
// Iterate the input columns and update existing values if they were found, otherwise
// add new column initialized to the delta amount
int currentValuesIndex = 0;
for (int i = 0; i < deltas.size(); i++) {
- Cell delta = deltas.get(i);
- Cell currentValue = null;
+ ExtendedCell delta = deltas.get(i);
+ ExtendedCell currentValue = null;
if (
currentValuesIndex < currentValues.size()
&& CellUtil.matchingQualifier(currentValues.get(currentValuesIndex), delta)
@@ -4019,7 +4022,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now
}
}
// Switch on whether this an increment or an append building the new Cell to apply.
- Cell newCell;
+ ExtendedCell newCell;
if (mutation instanceof Increment) {
long deltaAmount = getLongValue(delta);
final long newValue =
@@ -4053,14 +4056,14 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now
if (region.coprocessorHost != null) {
// Here the operation must be increment or append.
cellPairs = mutation instanceof Increment
- ? region.coprocessorHost.postIncrementBeforeWAL(mutation, cellPairs)
- : region.coprocessorHost.postAppendBeforeWAL(mutation, cellPairs);
+ ? region.coprocessorHost.postIncrementBeforeWAL(mutation, (List) cellPairs)
+ : region.coprocessorHost.postAppendBeforeWAL(mutation, (List) cellPairs);
}
}
return cellPairs.stream().map(Pair::getSecond).collect(Collectors.toList());
}
- private static Cell reckonDelta(final Cell delta, final Cell currentCell,
+ private static ExtendedCell reckonDelta(final Cell delta, final Cell currentCell,
final byte[] columnFamily, final long now, Mutation mutation, Function supplier)
throws IOException {
// Forward any tags found on the delta.
@@ -4080,7 +4083,11 @@ private static Cell reckonDelta(final Cell delta, final Cell currentCell,
} else {
tags = TagUtil.carryForwardTTLTag(tags, mutation.getTTL());
PrivateCellUtil.updateLatestStamp(delta, now);
- return CollectionUtils.isEmpty(tags) ? delta : PrivateCellUtil.createCell(delta, tags);
+ assert delta instanceof ExtendedCell;
+ ExtendedCell deltaCell = (ExtendedCell) delta;
+ return CollectionUtils.isEmpty(tags)
+ ? deltaCell
+ : PrivateCellUtil.createCell(deltaCell, tags);
}
}
@@ -4295,7 +4302,7 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> cpFamilyMap = cpMutation.getFamilyCellMap();
+ Map | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |