diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 6915adec0181..9c1817559c86 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -125,19 +125,19 @@ private Result(boolean readonly) { * Note: You must ensure that the keyvalues are already sorted. * @param cells List of cells */ - public static Result create(List cells) { + public static Result create(List cells) { return create(cells, null); } - public static Result create(List cells, Boolean exists) { + public static Result create(List cells, Boolean exists) { return create(cells, exists, false); } - public static Result create(List cells, Boolean exists, boolean stale) { + public static Result create(List cells, Boolean exists, boolean stale) { return create(cells, exists, stale, false); } - public static Result create(List cells, Boolean exists, boolean stale, + public static Result create(List cells, Boolean exists, boolean stale, boolean mayHaveMoreCellsInRow) { if (exists != null) { return new Result(null, exists, stale, mayHaveMoreCellsInRow); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index 29bfce4b07d0..145d532fa7f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -129,15 +129,18 @@ public ReturnCode filterCell(final Cell c) throws IOException { } /** - * Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new Cell - * object must be returned. + * Give the filter a chance to transform the passed Cell. If the Cell is changed a new Cell object + * must be returned. + *

+ * NOTICE: Filter will be evaluate at server side so the returned {@link Cell} + * must be an {@link org.apache.hadoop.hbase.ExtendedCell}, although it is marked as IA.Private. * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() The transformed KeyValue is what is * eventually returned to the client. Most filters will return the passed KeyValue unchanged. * @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transformCell(Cell) for an example of a * transformation. Concrete implementers can signal a failure condition in their code by * throwing an {@link IOException}. - * @param v the KeyValue in question - * @return the changed KeyValue + * @param v the Cell in question + * @return the changed Cell * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract public Cell transformCell(final Cell v) throws IOException; @@ -213,6 +216,8 @@ public enum ReturnCode { * the next key it must seek to. After receiving the match code SEEK_NEXT_USING_HINT, the * QueryMatcher would call this function to find out which key it must next seek to. Concrete * implementers can signal a failure condition in their code by throwing an {@link IOException}. + * NOTICE: Filter will be evaluate at server side so the returned {@link Cell} + * must be an {@link org.apache.hadoop.hbase.ExtendedCell}, although it is marked as IA.Private. * @return KeyValue which must be next seeked. return null if the filter is not sure which key to * seek to next. * @throws IOException in case an I/O or an filter specific failure needs to be signaled. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index ff17b0c681da..e5663bb8d7a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -26,9 +26,11 @@ import java.util.Optional; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; @@ -150,7 +152,7 @@ public int hashCode() { return Objects.hash(this.lenAsVal); } - static class KeyOnlyCell implements Cell { + static class KeyOnlyCell implements ExtendedCell { private Cell cell; private int keyLen; private boolean lenAsVal; @@ -273,6 +275,21 @@ public int getTagsLength() { public long heapSize() { return cell.heapSize(); } + + @Override + public void setSequenceId(long seqId) throws IOException { + PrivateCellUtil.setSequenceId(cell, seqId); + } + + @Override + public void setTimestamp(long ts) throws IOException { + PrivateCellUtil.setTimestamp(cell, ts); + } + + @Override + public void setTimestamp(byte[] ts) throws IOException { + PrivateCellUtil.setTimestamp(cell, ts); + } } static class KeyOnlyByteBufferExtendedCell extends ByteBufferExtendedCell { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 56af3c58e899..2822446e84e0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -560,7 +560,7 @@ public static Cell createCell(final byte[] row, final byte[] family, final byte[ */ @Deprecated public static Cell createCell(Cell cell, List tags) { - return PrivateCellUtil.createCell(cell, tags); + return PrivateCellUtil.createCell((ExtendedCell) cell, tags); } /** @@ -571,7 +571,7 @@ public static Cell createCell(Cell cell, List tags) { */ @Deprecated public static Cell createCell(Cell cell, byte[] tags) { - return PrivateCellUtil.createCell(cell, tags); + return PrivateCellUtil.createCell((ExtendedCell) cell, tags); } /** @@ -581,7 +581,7 @@ public static Cell createCell(Cell cell, byte[] tags) { */ @Deprecated public static Cell createCell(Cell cell, byte[] value, byte[] tags) { - return PrivateCellUtil.createCell(cell, value, tags); + return PrivateCellUtil.createCell((ExtendedCell) cell, value, tags); } /** Returns CellScanner interface over cellIterables */ @@ -1690,6 +1690,8 @@ public static void cloneIfNecessary(ArrayList cells) { } public static Cell cloneIfNecessary(Cell cell) { - return (cell instanceof ByteBufferExtendedCell ? KeyValueUtil.copyToNewKeyValue(cell) : cell); + return (cell instanceof ByteBufferExtendedCell + ? KeyValueUtil.copyToNewKeyValue((ExtendedCell) cell) + : cell); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index b1b59af7bf34..6cc17fee09c3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -99,7 +99,7 @@ public static KeyValue copyToNewKeyValue(final Cell cell) { * The position will be set to the beginning of the new ByteBuffer * @return the Bytebuffer containing the key part of the cell */ - public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) { + public static ByteBuffer copyKeyToNewByteBuffer(final ExtendedCell cell) { byte[] bytes = new byte[keyLength(cell)]; appendKeyTo(cell, bytes, 0); ByteBuffer buffer = ByteBuffer.wrap(bytes); @@ -110,7 +110,7 @@ public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) { * Copies the key to a new KeyValue * @return the KeyValue that consists only the key part of the incoming cell */ - public static KeyValue toNewKeyCell(final Cell cell) { + public static KeyValue toNewKeyCell(final ExtendedCell cell) { byte[] bytes = new byte[keyLength(cell)]; appendKeyTo(cell, bytes, 0); KeyValue kv = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); @@ -163,7 +163,7 @@ public static int appendToByteArray(Cell cell, byte[] output, int offset, boolea /** * Copy the Cell content into the passed buf in KeyValue serialization format. */ - public static int appendTo(Cell cell, ByteBuffer buf, int offset, boolean withTags) { + public static int appendTo(ExtendedCell cell, ByteBuffer buf, int offset, boolean withTags) { offset = ByteBufferUtils.putInt(buf, offset, keyLength(cell));// Key length offset = ByteBufferUtils.putInt(buf, offset, cell.getValueLength());// Value length offset = appendKeyTo(cell, buf, offset); @@ -176,7 +176,7 @@ public static int appendTo(Cell cell, ByteBuffer buf, int offset, boolean withTa return offset; } - public static int appendKeyTo(Cell cell, ByteBuffer buf, int offset) { + public static int appendKeyTo(ExtendedCell cell, ByteBuffer buf, int offset) { offset = ByteBufferUtils.putShort(buf, offset, cell.getRowLength());// RK length offset = CellUtil.copyRowTo(cell, buf, offset);// Row bytes offset = ByteBufferUtils.putByte(buf, offset, cell.getFamilyLength());// CF length @@ -433,10 +433,10 @@ public static KeyValue ensureKeyValue(final Cell cell) { } @Deprecated - public static List ensureKeyValues(List cells) { - List lazyList = Lists.transform(cells, new Function() { + public static List ensureKeyValues(List cells) { + List lazyList = Lists.transform(cells, new Function() { @Override - public KeyValue apply(Cell arg0) { + public KeyValue apply(ExtendedCell arg0) { return KeyValueUtil.ensureKeyValue(arg0); } }); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java index 83de4312cc81..742c091e61c5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java @@ -101,19 +101,19 @@ public static ByteBuffer getValueBufferShallowCopy(Cell cell) { } /** Returns A new cell which is having the extra tags also added to it. */ - public static Cell createCell(Cell cell, List tags) { + public static ExtendedCell createCell(ExtendedCell cell, List tags) { return createCell(cell, TagUtil.fromList(tags)); } /** Returns A new cell which is having the extra tags also added to it. */ - public static Cell createCell(Cell cell, byte[] tags) { + public static ExtendedCell createCell(ExtendedCell cell, byte[] tags) { if (cell instanceof ByteBufferExtendedCell) { return new TagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) cell, tags); } return new TagRewriteCell(cell, tags); } - public static Cell createCell(Cell cell, byte[] value, byte[] tags) { + public static ExtendedCell createCell(ExtendedCell cell, byte[] value, byte[] tags) { if (cell instanceof ByteBufferExtendedCell) { return new ValueAndTagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) cell, value, tags); @@ -127,7 +127,7 @@ public static Cell createCell(Cell cell, byte[] value, byte[] tags) { * other parts, refer to the original Cell. */ static class TagRewriteCell implements ExtendedCell { - protected Cell cell; + protected ExtendedCell cell; protected byte[] tags; private static final int HEAP_SIZE_OVERHEAD = ClassSize.OBJECT + 2 * ClassSize.REFERENCE; @@ -136,8 +136,7 @@ static class TagRewriteCell implements ExtendedCell { * @param cell The original Cell which it rewrites * @param tags the tags bytes. The array suppose to contain the tags bytes alone. */ - public TagRewriteCell(Cell cell, byte[] tags) { - assert cell instanceof ExtendedCell; + public TagRewriteCell(ExtendedCell cell, byte[] tags) { assert tags != null; this.cell = cell; this.tags = tags; @@ -303,7 +302,7 @@ public void write(ByteBuffer buf, int offset) { @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); return new TagRewriteCell(clonedBaseCell, this.tags); } } @@ -482,7 +481,7 @@ public void write(ByteBuffer buf, int offset) { @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); if (clonedBaseCell instanceof ByteBufferExtendedCell) { return new TagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) clonedBaseCell, this.tags); @@ -545,7 +544,7 @@ static class ValueAndTagRewriteCell extends TagRewriteCell { protected byte[] value; - public ValueAndTagRewriteCell(Cell cell, byte[] value, byte[] tags) { + public ValueAndTagRewriteCell(ExtendedCell cell, byte[] value, byte[] tags) { super(cell, tags); this.value = value; } @@ -618,7 +617,7 @@ public void write(ByteBuffer buf, int offset) { * Made into a static method so as to reuse the logic within * ValueAndTagRewriteByteBufferExtendedCell */ - static void write(ByteBuffer buf, int offset, Cell cell, byte[] value, byte[] tags) { + static void write(ByteBuffer buf, int offset, ExtendedCell cell, byte[] value, byte[] tags) { offset = ByteBufferUtils.putInt(buf, offset, KeyValueUtil.keyLength(cell));// Key length offset = ByteBufferUtils.putInt(buf, offset, value.length);// Value length offset = KeyValueUtil.appendKeyTo(cell, buf, offset); @@ -633,7 +632,7 @@ static void write(ByteBuffer buf, int offset, Cell cell, byte[] value, byte[] ta @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); return new ValueAndTagRewriteCell(clonedBaseCell, this.value, this.tags); } } @@ -699,7 +698,7 @@ public void write(ByteBuffer buf, int offset) { @Override public ExtendedCell deepClone() { - Cell clonedBaseCell = this.cell.deepClone(); + ExtendedCell clonedBaseCell = this.cell.deepClone(); if (clonedBaseCell instanceof ByteBufferExtendedCell) { return new ValueAndTagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) clonedBaseCell, this.value, this.tags); @@ -837,7 +836,7 @@ public static boolean isDelete(final byte type) { } /** Returns True if this cell is a {@link KeyValue.Type#Delete} type. */ - public static boolean isDeleteType(Cell cell) { + public static boolean isDeleteType(ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.Delete.getCode(); } @@ -845,20 +844,20 @@ public static boolean isDeleteFamily(final Cell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteFamily.getCode(); } - public static boolean isDeleteFamilyVersion(final Cell cell) { + public static boolean isDeleteFamilyVersion(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode(); } - public static boolean isDeleteColumns(final Cell cell) { + public static boolean isDeleteColumns(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.DeleteColumn.getCode(); } - public static boolean isDeleteColumnVersion(final Cell cell) { + public static boolean isDeleteColumnVersion(final ExtendedCell cell) { return cell.getTypeByte() == KeyValue.Type.Delete.getCode(); } /** Returns True if this cell is a delete family or column type. */ - public static boolean isDeleteColumnOrFamily(Cell cell) { + public static boolean isDeleteColumnOrFamily(ExtendedCell cell) { int t = cell.getTypeByte(); return t == KeyValue.Type.DeleteColumn.getCode() || t == KeyValue.Type.DeleteFamily.getCode(); } @@ -1158,8 +1157,9 @@ public static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFami * special API used in scan optimization. */ // compare a key against row/fam/qual/ts/type - public static final int compareKeyBasedOnColHint(CellComparator comparator, Cell nextIndexedCell, - Cell currentCell, int foff, int flen, byte[] colHint, int coff, int clen, long ts, byte type) { + public static final int compareKeyBasedOnColHint(CellComparator comparator, + ExtendedCell nextIndexedCell, ExtendedCell currentCell, int foff, int flen, byte[] colHint, + int coff, int clen, long ts, byte type) { int compare = comparator.compareRows(nextIndexedCell, currentCell); if (compare != 0) { return compare; @@ -2537,7 +2537,7 @@ public static BigDecimal getValueAsBigDecimal(Cell cell) { * @param tagCompressionContext the TagCompressionContext * @throws IOException can throw IOException if the compression encounters issue */ - public static void compressTags(OutputStream out, Cell cell, + public static void compressTags(OutputStream out, ExtendedCell cell, TagCompressionContext tagCompressionContext) throws IOException { if (cell instanceof ByteBufferExtendedCell) { tagCompressionContext.compressTags(out, ((ByteBufferExtendedCell) cell).getTagsByteBuffer(), @@ -2689,7 +2689,7 @@ static final int compareWithoutRow(CellComparator comparator, Cell left, byte[] * Return a new cell is located following input cell. If both of type and timestamp are minimum, * the input cell will be returned directly. */ - public static Cell createNextOnRowCol(Cell cell) { + public static ExtendedCell createNextOnRowCol(ExtendedCell cell) { long ts = cell.getTimestamp(); byte type = cell.getTypeByte(); if (type != KeyValue.Type.Minimum.getCode()) { @@ -2703,7 +2703,7 @@ public static Cell createNextOnRowCol(Cell cell) { return createNextOnRowCol(cell, ts, type); } - static Cell createNextOnRowCol(Cell cell, long ts, byte type) { + static ExtendedCell createNextOnRowCol(ExtendedCell cell, long ts, byte type) { if (cell instanceof ByteBufferExtendedCell) { return new LastOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2767,7 +2767,7 @@ public static int estimatedSerializedSizeOfKey(final Cell cell) { * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed * a null cell */ - public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) { + public static byte[] getCellKeySerializedAsKeyValueKey(final ExtendedCell cell) { if (cell == null) return null; byte[] b = new byte[KeyValueUtil.keyLength(cell)]; KeyValueUtil.appendKeyTo(cell, b, 0); @@ -2778,7 +2778,7 @@ public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) { * Create a Cell that is smaller than all other possible Cells for the given Cell's row. * @return First possible Cell on passed Cell's row. */ - public static Cell createFirstOnRow(final Cell cell) { + public static ExtendedCell createFirstOnRow(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2787,26 +2787,27 @@ public static Cell createFirstOnRow(final Cell cell) { return new FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); } - public static Cell createFirstOnRow(final byte[] row, int roffset, short rlength) { + public static ExtendedCell createFirstOnRow(final byte[] row, int roffset, short rlength) { return new FirstOnRowCell(row, roffset, rlength); } - public static Cell createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) { + public static ExtendedCell createFirstOnRow(final byte[] row, final byte[] family, + final byte[] col) { return createFirstOnRow(row, 0, (short) row.length, family, 0, (byte) family.length, col, 0, col.length); } - public static Cell createFirstOnRow(final byte[] row, int roffset, short rlength, + public static ExtendedCell createFirstOnRow(final byte[] row, int roffset, short rlength, final byte[] family, int foffset, byte flength, final byte[] col, int coffset, int clength) { return new FirstOnRowColCell(row, roffset, rlength, family, foffset, flength, col, coffset, clength); } - public static Cell createFirstOnRow(final byte[] row) { + public static ExtendedCell createFirstOnRow(final byte[] row) { return createFirstOnRow(row, 0, (short) row.length); } - public static Cell createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) { + public static ExtendedCell createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2817,7 +2818,7 @@ public static Cell createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, in fArray, foff, (byte) flen, HConstants.EMPTY_BYTE_ARRAY, 0, 0); } - public static Cell createFirstOnRowCol(final Cell cell) { + public static ExtendedCell createFirstOnRowCol(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2831,7 +2832,7 @@ public static Cell createFirstOnRowCol(final Cell cell) { cell.getQualifierLength()); } - public static Cell createFirstOnNextRow(final Cell cell) { + public static ExtendedCell createFirstOnNextRow(final Cell cell) { byte[] nextRow = new byte[cell.getRowLength() + 1]; CellUtil.copyRowTo(cell, nextRow, 0); nextRow[nextRow.length - 1] = 0;// maybe not necessary @@ -2843,7 +2844,8 @@ public static Cell createFirstOnNextRow(final Cell cell) { * passed qualifier. * @return Last possible Cell on passed Cell's rk:cf and passed qualifier. */ - public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) { + public static ExtendedCell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, + int qlength) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2863,7 +2865,7 @@ public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffe * combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored. * @param cell - cell */ - public static Cell createFirstOnRowColTS(Cell cell, long ts) { + public static ExtendedCell createFirstOnRowColTS(Cell cell, long ts) { if (cell instanceof ByteBufferExtendedCell) { return new FirstOnRowColTSByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2882,7 +2884,7 @@ public static Cell createFirstOnRowColTS(Cell cell, long ts) { * Create a Cell that is larger than all other possible Cells for the given Cell's row. * @return Last possible Cell on passed Cell's row. */ - public static Cell createLastOnRow(final Cell cell) { + public static ExtendedCell createLastOnRow(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new LastOnRowByteBufferExtendedCell(((ByteBufferExtendedCell) cell).getRowByteBuffer(), ((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength()); @@ -2890,7 +2892,7 @@ public static Cell createLastOnRow(final Cell cell) { return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); } - public static Cell createLastOnRow(final byte[] row) { + public static ExtendedCell createLastOnRow(final byte[] row) { return new LastOnRowCell(row, 0, (short) row.length); } @@ -2900,7 +2902,7 @@ public static Cell createLastOnRow(final byte[] row) { * we already know is not in the file. * @return Last possible Cell on passed Cell's rk:cf:q. */ - public static Cell createLastOnRowCol(final Cell cell) { + public static ExtendedCell createLastOnRowCol(final Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return new LastOnRowColByteBufferExtendedCell( ((ByteBufferExtendedCell) cell).getRowByteBuffer(), @@ -2922,7 +2924,7 @@ public static Cell createLastOnRowCol(final Cell cell) { * @param fam - family name * @return First Delete Family possible key on passed row. */ - public static Cell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) { + public static ExtendedCell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) { return new FirstOnRowDeleteFamilyCell(row, fam); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index b4d26dbfee93..5ba344770a3d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -38,7 +38,7 @@ public interface RawCell extends Cell { * @return the byte[] having the tags */ default byte[] cloneTags() { - return PrivateCellUtil.cloneTags(this); + return PrivateCellUtil.cloneTags((ExtendedCell) this); } /** @@ -46,7 +46,7 @@ default byte[] cloneTags() { * @return a list of tags */ default Iterator getTags() { - return PrivateCellUtil.tagsIterator(this); + return PrivateCellUtil.tagsIterator((ExtendedCell) this); } /** @@ -55,7 +55,7 @@ default Iterator getTags() { * @return the specific tag if available or null */ default Optional getTag(byte type) { - return PrivateCellUtil.getTag(this, type); + return PrivateCellUtil.getTag((ExtendedCell) this, type); } /** @@ -71,6 +71,6 @@ public static void checkForTagsLength(int tagsLength) { /** Returns A new cell which is having the extra tags also added to it. */ public static Cell createCell(Cell cell, List tags) { - return PrivateCellUtil.createCell(cell, tags); + return PrivateCellUtil.createCell((ExtendedCell) cell, tags); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java index c551d2aabd54..b7b70e25ce40 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java @@ -21,7 +21,7 @@ import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -50,7 +50,7 @@ protected void postEncoding(HFileBlockEncodingContext encodingCtx) throws IOExce } } - protected Cell createFirstKeyCell(ByteBuffer key, int keyLength) { + protected ExtendedCell createFirstKeyCell(ByteBuffer key, int keyLength) { if (key.hasArray()) { return new KeyValue.KeyOnlyKeyValue(key.array(), key.arrayOffset() + key.position(), keyLength); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index 0f15151fe88b..a44d09ae4c0c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -281,7 +281,7 @@ protected void copyFromNext(SeekerState nextState) { } } - public Cell toCell() { + public ExtendedCell toCell() { // Buffer backing the value and tags part from the HFileBlock's buffer // When tag compression in use, this will be only the value bytes area. ByteBuffer valAndTagsBuffer; @@ -304,7 +304,7 @@ public Cell toCell() { } } - private Cell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset, + private ExtendedCell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset, int tagsLenSerializationSize) { byte[] tagsArray = HConstants.EMPTY_BYTE_ARRAY; int tOffset = 0; @@ -326,7 +326,7 @@ private Cell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset, this.tagsLength); } - private Cell toOffheapCell(ByteBuffer valAndTagsBuffer, int vOffset, + private ExtendedCell toOffheapCell(ByteBuffer valAndTagsBuffer, int vOffset, int tagsLenSerializationSize) { ByteBuffer tagsBuf = HConstants.EMPTY_BYTE_BUFFER; int tOffset = 0; @@ -825,7 +825,7 @@ public BufferedEncodedSeeker(HFileBlockDecodingContext decodingCtx) { } @Override - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { keyOnlyKV.setKey(current.keyBuffer, 0, current.keyLength); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, keyOnlyKV); } @@ -853,7 +853,7 @@ public void setCurrentBuffer(ByteBuff buffer) { } @Override - public Cell getKey() { + public ExtendedCell getKey() { byte[] key = new byte[current.keyLength]; System.arraycopy(current.keyBuffer, 0, key, 0, current.keyLength); return new KeyValue.KeyOnlyKeyValue(key); @@ -869,7 +869,7 @@ public ByteBuffer getValueShallowCopy() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { return current.toCell(); } @@ -927,7 +927,7 @@ protected void decodeTags() { } @Override - public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { + public int seekToKeyInBlock(ExtendedCell seekCell, boolean seekBefore) { int rowCommonPrefix = 0; int familyCommonPrefix = 0; int qualCommonPrefix = 0; @@ -1020,7 +1020,7 @@ public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { return 1; } - private int compareTypeBytes(Cell key, Cell right) { + private int compareTypeBytes(ExtendedCell key, ExtendedCell right) { if ( key.getFamilyLength() + key.getQualifierLength() == 0 && key.getTypeByte() == KeyValue.Type.Minimum.getCode() @@ -1129,7 +1129,7 @@ protected STATE createSeekerState() { } /** Returns unencoded size added */ - protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out, + protected final int afterEncodingKeyValue(ExtendedCell cell, DataOutputStream out, HFileBlockDefaultEncodingContext encodingCtx) throws IOException { int size = 0; if (encodingCtx.getHFileContext().isIncludesTags()) { @@ -1245,7 +1245,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOut } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { EncodingState state = encodingCtx.getEncodingState(); int posBeforeEncode = out.size(); @@ -1253,8 +1253,8 @@ public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputS state.postCellEncode(encodedKvSize, out.size() - posBeforeEncode); } - public abstract int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCtx, - DataOutputStream out) throws IOException; + public abstract int internalEncode(ExtendedCell cell, + HFileBlockDefaultEncodingContext encodingCtx, DataOutputStream out) throws IOException; @Override public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java index 5abe65dc4f97..1ec21cea6664 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java @@ -21,7 +21,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; @@ -56,7 +56,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOut } @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { CopyKeyEncodingState state = (CopyKeyEncodingState) encodingContext.getEncodingState(); NoneEncoder encoder = state.encoder; @@ -64,7 +64,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { int keyLength = block.getIntAfterPosition(Bytes.SIZEOF_INT); int pos = 3 * Bytes.SIZEOF_INT; ByteBuffer key = block.asSubByteBuffer(pos + keyLength).duplicate(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index 52825b6c683d..78ebbce38306 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; @@ -55,7 +55,7 @@ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream * Encodes a KeyValue. After the encode, {@link EncodingState#postCellEncode(int, int)} needs to * be called to keep track of the encoded and unencoded data size */ - void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException; /** @@ -81,7 +81,7 @@ ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingContext dec * @param block encoded block we want index, the position will not change * @return First key in block as a cell. */ - Cell getFirstKeyCellInBlock(ByteBuff block); + ExtendedCell getFirstKeyCellInBlock(ByteBuff block); /** * Create a HFileBlock seeker which find KeyValues within a block. @@ -119,7 +119,7 @@ interface EncodedSeeker { * From the current position creates a cell using the key part of the current buffer * @return key at current position */ - Cell getKey(); + ExtendedCell getKey(); /** * Does a shallow copy of the value at the current position. A shallow copy is possible because @@ -129,7 +129,7 @@ interface EncodedSeeker { ByteBuffer getValueShallowCopy(); /** Returns the Cell at the current position. Includes memstore timestamp. */ - Cell getCell(); + ExtendedCell getCell(); /** Set position to beginning of given block */ void rewind(); @@ -154,12 +154,12 @@ interface EncodedSeeker { * Does not matter in case of an inexact match. * @return 0 on exact match, 1 on inexact match. */ - int seekToKeyInBlock(Cell key, boolean seekBefore); + int seekToKeyInBlock(ExtendedCell key, boolean seekBefore); /** * Compare the given key against the current key * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater */ - public int compareKey(CellComparator comparator, Cell key); + public int compareKey(CellComparator comparator, ExtendedCell key); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java index e865d0b12523..d58f5e2c923e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java @@ -21,7 +21,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -181,7 +181,7 @@ private void uncompressSingleKeyValue(DataInputStream source, ByteBuffer buffer, } @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { EncodingState state = encodingContext.getEncodingState(); int size = compressSingleKeyValue(out, cell, state.prevCell); @@ -190,7 +190,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo return size; } - private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell) + private int compressSingleKeyValue(DataOutputStream out, ExtendedCell cell, ExtendedCell prevCell) throws IOException { int flag = 0; // Do not use more bits that can fit into a byte int kLength = KeyValueUtil.keyLength(cell); @@ -291,7 +291,7 @@ private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCel } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT); byte familyLength = block.get(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java index 8aeb1824eb30..8ab4e320552e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodingState.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.encoding; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -30,7 +30,7 @@ public class EncodingState { /** * The previous Cell the encoder encoded. */ - protected Cell prevCell = null; + protected ExtendedCell prevCell = null; // Size of actual data being written. Not considering the block encoding/compression. This // includes the header size also. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java index df3d6c34216b..26b695abfca9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java @@ -22,7 +22,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -215,7 +215,7 @@ private void uncompressSingleKeyValue(DataInputStream source, ByteBuffer out, } @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { EncodingState state = encodingContext.getEncodingState(); int size = compressSingleKeyValue(out, cell, state.prevCell); @@ -224,7 +224,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo return size; } - private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell) + private int compressSingleKeyValue(DataOutputStream out, ExtendedCell cell, ExtendedCell prevCell) throws IOException { int flag = 0; // Do not use more bits than will fit into a byte int kLength = KeyValueUtil.keyLength(cell); @@ -330,7 +330,7 @@ protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocat } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE); int keyLength = ByteBuff.readCompressedInt(block); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 820dd6179542..e9858b5ffba1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -22,7 +22,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -42,7 +42,7 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder { @Override - public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, + public int internalEncode(ExtendedCell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { int klength = KeyValueUtil.keyLength(cell); int vlength = cell.getValueLength(); @@ -69,7 +69,7 @@ public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingCo return size; } - private void writeKeyExcludingCommon(Cell cell, int commonPrefix, DataOutputStream out) + private void writeKeyExcludingCommon(ExtendedCell cell, int commonPrefix, DataOutputStream out) throws IOException { short rLen = cell.getRowLength(); if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) { @@ -162,7 +162,7 @@ private int decodeKeyValue(DataInputStream source, ByteBuffer buffer, int prevKe } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT); int keyLength = ByteBuff.readCompressedInt(block); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java index 1507582f2220..135a1337cf27 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java @@ -23,7 +23,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -72,7 +72,7 @@ public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOut } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { RowIndexEncodingState state = (RowIndexEncodingState) encodingCtx.getEncodingState(); RowIndexEncoderV1 encoder = state.encoder; @@ -104,7 +104,7 @@ public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingCont } else { RowIndexSeekerV1 seeker = new RowIndexSeekerV1(decodingCtx); seeker.setCurrentBuffer(new SingleByteBuff(sourceAsBuffer)); - List kvs = new ArrayList<>(); + List kvs = new ArrayList<>(); kvs.add(seeker.getCell()); while (seeker.next()) { kvs.add(seeker.getCell()); @@ -112,7 +112,7 @@ public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingCont boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos); - for (Cell cell : kvs) { + for (ExtendedCell cell : kvs) { KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(cell); out.write(currentCell.getBuffer(), currentCell.getOffset(), currentCell.getLength()); if (includesMvcc) { @@ -125,7 +125,7 @@ public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingCont } @Override - public Cell getFirstKeyCellInBlock(ByteBuff block) { + public ExtendedCell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); int keyLength = block.getInt(); block.getInt(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java index 028473e0897e..7ec4f767ccb8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java @@ -19,7 +19,7 @@ import java.io.DataOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.util.Bytes; @@ -32,7 +32,7 @@ public class RowIndexEncoderV1 { private static final Logger LOG = LoggerFactory.getLogger(RowIndexEncoderV1.class); /** The Cell previously appended. */ - private Cell lastCell = null; + private ExtendedCell lastCell = null; private DataOutputStream out; private NoneEncoder encoder; @@ -46,7 +46,7 @@ public RowIndexEncoderV1(DataOutputStream out, HFileBlockDefaultEncodingContext this.context = encodingCtx; } - public void write(Cell cell) throws IOException { + public void write(ExtendedCell cell) throws IOException { // checkRow uses comparator to check we are writing in order. int extraBytesForRowIndex = 0; @@ -63,7 +63,7 @@ public void write(Cell cell) throws IOException { context.getEncodingState().postCellEncode(size, size + extraBytesForRowIndex); } - protected boolean checkRow(final Cell cell) throws IOException { + protected boolean checkRow(final ExtendedCell cell) throws IOException { boolean isDuplicateRow = false; if (cell == null) { throw new IOException("Key cannot be null or empty"); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java index e283803a143b..89bac4a609e9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -81,7 +82,7 @@ public void setCurrentBuffer(ByteBuff buffer) { @Override @SuppressWarnings("ByteBufferBackingArray") - public Cell getKey() { + public ExtendedCell getKey() { if (current.keyBuffer.hasArray()) { return new KeyValue.KeyOnlyKeyValue(current.keyBuffer.array(), current.keyBuffer.arrayOffset() + current.keyBuffer.position(), current.keyLength); @@ -103,7 +104,7 @@ public ByteBuffer getValueShallowCopy() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { return current.toCell(); } @@ -164,7 +165,7 @@ private ByteBuffer getRow(int index) { } @Override - public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { + public int seekToKeyInBlock(ExtendedCell seekCell, boolean seekBefore) { previous.invalidate(); int index = binarySearch(seekCell, seekBefore); if (index < 0) { @@ -230,7 +231,7 @@ private void moveToPrevious() { } @Override - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, current.currentKey); } @@ -343,8 +344,8 @@ protected int getCellBufSize() { return kvBufSize; } - public Cell toCell() { - Cell ret; + public ExtendedCell toCell() { + ExtendedCell ret; int cellBufSize = getCellBufSize(); long seqId = 0L; if (includesMvcc()) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java index 0f9203facff9..b39e9cf20ac0 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java @@ -26,8 +26,8 @@ import java.util.Random; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.io.WritableUtils; @@ -270,8 +270,8 @@ public List generateTestKeyValues(int howMany, boolean useTags) { * @param howMany How many Key values should be generated. * @return sorted list of key values */ - public List generateTestExtendedOffheapKeyValues(int howMany, boolean useTags) { - List result = new ArrayList<>(); + public List generateTestExtendedOffheapKeyValues(int howMany, boolean useTags) { + List result = new ArrayList<>(); List rows = generateRows(); Map> rowsToQualifier = new HashMap<>(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 6ab3bdd25048..3f9da73aad7c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; @@ -353,8 +354,8 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { } // we now have the proper WAL writer. full steam ahead - PrivateCellUtil.updateLatestStamp(cell, this.now); - wl.writer.append(kv); + PrivateCellUtil.updateLatestStamp(kv, this.now); + wl.writer.append((ExtendedCell) kv); wl.written += length; // Copy the row so we know when a row transition. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 5ab4e5a292e9..90dc5c1d555f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; @@ -62,7 +63,7 @@ protected void reduce(K row, Iterable vals, Context context) List cells = familyMap.get(entry.getKey()); List kvs = (cells != null) ? (List) cells : null; for (Cell cell : entry.getValue()) { - KeyValue kv = KeyValueUtil.ensureKeyValue(cell); + KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) cell); curSize += kv.heapSize(); if (kvs != null) { kvs.add(kv); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index 90905090f89d..b4061d6be6a9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; @@ -100,7 +101,7 @@ protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, throw new IOException("Invalid visibility expression found in mutation " + p, e); } for (List cells : p.getFamilyCellMap().values()) { - for (Cell cell : cells) { + for (ExtendedCell cell : (List) (List) cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 2fba01978581..b374aa86c018 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -26,8 +26,8 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; @@ -168,10 +168,10 @@ protected void reduce(ImmutableBytesWritable rowKey, java.lang.Iterable li } // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. - Cell cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), - parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, - parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); + ExtendedCell cell = (ExtendedCell) this.kvCreator.create(lineBytes, + parsed.getRowKeyOffset(), parsed.getRowKeyLength(), parser.getFamily(i), 0, + parser.getFamily(i).length, parser.getQualifier(i), 0, parser.getQualifier(i).length, + ts, lineBytes, parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); kvs.add(kv); curSize += kv.heapSize(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 842bb3e6edb5..437689844f8c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -887,7 +888,7 @@ public void preBatchMutate(ObserverContext c, Cell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + Cell updatedCell = PrivateCellUtil.createCell((ExtendedCell) cell, tags); updatedCells.add(updatedCell); } m.getFamilyCellMap().clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 2119a3e7cbef..b3de99ecdb62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -22,7 +22,7 @@ import java.util.Optional; import java.util.function.IntConsumer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -58,9 +58,9 @@ public class HalfStoreFileReader extends StoreFileReader { // i.e. empty column and a timestamp of LATEST_TIMESTAMP. protected final byte[] splitkey; - private final Cell splitCell; + private final ExtendedCell splitCell; - private Optional firstKey = Optional.empty(); + private Optional firstKey = Optional.empty(); private boolean firstKeySeeked = false; @@ -100,8 +100,10 @@ public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, public boolean atEnd = false; @Override - public Cell getKey() { - if (atEnd) return null; + public ExtendedCell getKey() { + if (atEnd) { + return null; + } return delegate.getKey(); } @@ -114,7 +116,9 @@ public String getKeyString() { @Override public ByteBuffer getValue() { - if (atEnd) return null; + if (atEnd) { + return null; + } return delegate.getValue(); } @@ -127,8 +131,10 @@ public String getValueString() { } @Override - public Cell getCell() { - if (atEnd) return null; + public ExtendedCell getCell() { + if (atEnd) { + return null; + } return delegate.getCell(); } @@ -187,7 +193,7 @@ public boolean isSeeked() { } @Override - public int seekTo(Cell key) throws IOException { + public int seekTo(ExtendedCell key) throws IOException { if (top) { if (PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) < 0) { return -1; @@ -209,10 +215,9 @@ public int seekTo(Cell key) throws IOException { } @Override - public int reseekTo(Cell key) throws IOException { + public int reseekTo(ExtendedCell key) throws IOException { // This function is identical to the corresponding seekTo function - // except - // that we call reseekTo (and not seekTo) on the delegate. + // except that we call reseekTo (and not seekTo) on the delegate. if (top) { if (PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) < 0) { return -1; @@ -237,9 +242,9 @@ public int reseekTo(Cell key) throws IOException { } @Override - public boolean seekBefore(Cell key) throws IOException { + public boolean seekBefore(ExtendedCell key) throws IOException { if (top) { - Optional fk = getFirstKey(); + Optional fk = getFirstKey(); if ( fk.isPresent() && PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0 @@ -265,7 +270,7 @@ public boolean seekBefore(Cell key) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } @@ -292,7 +297,7 @@ public boolean passesKeyRangeFilter(Scan scan) { } @Override - public Optional getLastKey() { + public Optional getLastKey() { if (top) { return super.getLastKey(); } @@ -313,13 +318,13 @@ public Optional getLastKey() { } @Override - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { // Returns null to indicate file is not splitable. return Optional.empty(); } @Override - public Optional getFirstKey() { + public Optional getFirstKey() { if (!firstKeySeeked) { HFileScanner scanner = getScanner(true, true, false); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java index c340254e07c9..37001d93b12d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -31,9 +31,9 @@ public class BlockWithScanInfo { * The first key in the next block following this one in the HFile. If this key is unknown, this * is reference-equal with HConstants.NO_NEXT_INDEXED_KEY */ - private final Cell nextIndexedKey; + private final ExtendedCell nextIndexedKey; - public BlockWithScanInfo(HFileBlock hFileBlock, Cell nextIndexedKey) { + public BlockWithScanInfo(HFileBlock hFileBlock, ExtendedCell nextIndexedKey) { this.hFileBlock = hFileBlock; this.nextIndexedKey = nextIndexedKey; } @@ -42,7 +42,7 @@ public HFileBlock getHFileBlock() { return hFileBlock; } - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return nextIndexedKey; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index 97f418fd3f93..1d68f41930bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -21,10 +21,12 @@ import java.io.DataOutput; import java.io.IOException; import java.util.LinkedList; +import java.util.Objects; import java.util.Queue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.BloomType; @@ -59,7 +61,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase /** The size of individual Bloom filter chunks to create */ private int chunkByteSize; /** The prev Cell that was processed */ - private Cell prevCell; + private ExtendedCell prevCell; /** A Bloom filter chunk enqueued for writing */ private static class ReadyChunk { @@ -145,8 +147,8 @@ private void enqueueReadyChunk(boolean closing) { } @Override - public void append(Cell cell) throws IOException { - if (cell == null) throw new NullPointerException(); + public void append(ExtendedCell cell) throws IOException { + Objects.requireNonNull(cell); enqueueReadyChunk(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index ae79ad857244..135c6cfecbcc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -32,8 +32,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.MetricsIO; @@ -395,15 +395,15 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException; - Optional getLastKey(); + Optional getLastKey(); - Optional midKey() throws IOException; + Optional midKey() throws IOException; long length(); long getEntries(); - Optional getFirstKey(); + Optional getFirstKey(); long indexSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 16bec1e95888..2cc68f10e862 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -894,7 +894,7 @@ DataOutputStream startWriting(BlockType newBlockType) throws IOException { /** * Writes the Cell to this block */ - void write(Cell cell) throws IOException { + void write(ExtendedCell cell) throws IOException { expectState(State.WRITING); this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 592c19c866cf..816beea8a45f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -145,7 +146,7 @@ public byte[] getRootBlockKey(int i) { } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException { @@ -221,9 +222,9 @@ public String toString() { */ static class CellBasedKeyBlockIndexReader extends BlockIndexReader { - private Cell[] blockKeys; + private ExtendedCell[] blockKeys; /** Pre-computed mid-key */ - private AtomicReference midKey = new AtomicReference<>(); + private AtomicReference midKey = new AtomicReference<>(); /** Needed doing lookup on blocks. */ protected CellComparator comparator; @@ -258,12 +259,12 @@ public boolean isEmpty() { /** * from 0 to {@link #getRootBlockCount() - 1} */ - public Cell getRootBlockKey(int i) { + public ExtendedCell getRootBlockKey(int i) { return blockKeys[i]; } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException { @@ -273,7 +274,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } // the next indexed key - Cell nextIndexedKey = null; + ExtendedCell nextIndexedKey = null; // Read the next-level (intermediate or leaf) index block. long currentOffset = blockOffsets[rootLevelIndex]; @@ -381,10 +382,12 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } @Override - public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) throw new IOException("HFile empty"); + public ExtendedCell midkey(CachingBlockReader cachingBlockReader) throws IOException { + if (rootCount == 0) { + throw new IOException("HFile empty"); + } - Cell targetMidKey = this.midKey.get(); + ExtendedCell targetMidKey = this.midKey.get(); if (targetMidKey != null) { return targetMidKey; } @@ -416,7 +419,7 @@ public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { @Override protected void initialize(int numEntries) { - blockKeys = new Cell[numEntries]; + blockKeys = new ExtendedCell[numEntries]; } /** @@ -501,7 +504,7 @@ public boolean isEmpty() { } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException { @@ -510,14 +513,14 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } @Override - public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { + public ExtendedCell midkey(CachingBlockReader cachingBlockReader) throws IOException { return seeker.midkey(cachingBlockReader); } /** * from 0 to {@link #getRootBlockCount() - 1} */ - public Cell getRootBlockKey(int i) { + public ExtendedCell getRootBlockKey(int i) { return seeker.getRootBlockKey(i); } @@ -601,9 +604,10 @@ public void ensureNonEmpty() { * the block irrespective of the encoding * @return reader a basic way to load blocks */ - public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, - boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + public HFileBlock seekToDataBlock(final ExtendedCell key, HFileBlock currentBlock, + boolean cacheBlocks, boolean pread, boolean isCompaction, + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction, expectedDataBlockEncoding, cachingBlockReader); if (blockWithScanInfo == null) { @@ -625,8 +629,8 @@ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boole * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as * nextIndexedKey. */ - public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, - boolean cacheBlocks, boolean pread, boolean isCompaction, + public abstract BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, + HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 1629536c1488..f05b5415f02e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -20,7 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; @@ -47,7 +47,7 @@ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream /** * Encodes a KeyValue. */ - void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index 6505e3d33fe8..fd1c1adb0d55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -20,7 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -90,7 +90,7 @@ public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { this.encoding.getEncoder().encode(cell, encodingCtx, out); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java index a84204cadf1b..99ee8f3554c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileIndexBlockEncoder.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding; @@ -58,15 +59,15 @@ void initRootIndex(HFileBlock blk, int numEntries, CellComparator comparator, in boolean isEmpty(); - Cell getRootBlockKey(int i); + ExtendedCell getRootBlockKey(int i); int getRootBlockCount(); - Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException; + ExtendedCell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException; int rootBlockContainingKey(Cell key); - BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, HFile.CachingBlockReader cachingBlockReader) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 1f2e5ec6d965..aa8f42c8948d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -91,7 +92,7 @@ public class HFileInfo implements SortedMap { static final int MAX_MINOR_VERSION = 3; /** Last key in the file. Filled in when we read in the file info */ - private Cell lastKeyCell = null; + private ExtendedCell lastKeyCell = null; /** Average key length read from file info */ private int avgKeyLen = -1; /** Average value length read from file info */ @@ -510,7 +511,7 @@ public List getLoadOnOpenBlocks() { return loadOnOpenBlocks; } - public Cell getLastKeyCell() { + public ExtendedCell getLastKeyCell() { return lastKeyCell; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index ace662414f40..851dd3d6f27e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -188,7 +189,7 @@ public long length() { * the first row key, but rather the byte form of the first KeyValue. */ @Override - public Optional getFirstKey() { + public Optional getFirstKey() { if (dataBlockIndexReader == null) { throw new BlockIndexNotLoadedException(path); } @@ -330,7 +331,7 @@ protected static class HFileScannerImpl implements HFileScanner { * last data block. If the nextIndexedKey is null, it means the nextIndexedKey has not been * loaded yet. */ - protected Cell nextIndexedKey; + protected ExtendedCell nextIndexedKey; // Current block being used. NOTICE: DON't release curBlock separately except in shipped() or // close() methods. Because the shipped() or close() will do the release finally, even if any // exception occur the curBlock will be released by the close() method (see @@ -621,17 +622,17 @@ protected int blockSeek(Cell key, boolean seekBefore) { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return nextIndexedKey; } @Override - public int seekTo(Cell key) throws IOException { + public int seekTo(ExtendedCell key) throws IOException { return seekTo(key, true); } @Override - public int reseekTo(Cell key) throws IOException { + public int reseekTo(ExtendedCell key) throws IOException { int compared; if (isSeeked()) { compared = compareKey(reader.getComparator(), key); @@ -672,7 +673,7 @@ public int reseekTo(Cell key) throws IOException { * key, 1 if we are past the given key -2 if the key is earlier than the first key of * the file while using a faked index key */ - public int seekTo(Cell key, boolean rewind) throws IOException { + public int seekTo(ExtendedCell key, boolean rewind) throws IOException { HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, curBlock, cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); @@ -685,13 +686,13 @@ public int seekTo(Cell key, boolean rewind) throws IOException { } @Override - public boolean seekBefore(Cell key) throws IOException { + public boolean seekBefore(ExtendedCell key) throws IOException { HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, curBlock, cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), reader); if (seekToBlock == null) { return false; } - Cell firstKey = getFirstKeyCellInBlock(seekToBlock); + ExtendedCell firstKey = getFirstKeyCellInBlock(seekToBlock); if (PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), firstKey, key) >= 0) { long previousBlockOffset = seekToBlock.getPrevBlockOffset(); // The key we are interested in @@ -771,12 +772,12 @@ public DataBlockEncoding getEffectiveDataBlockEncoding() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { if (!isSeeked()) { return null; } - Cell ret; + ExtendedCell ret; int cellBufSize = getKVBufSize(); long seqId = 0L; if (this.reader.getHFileInfo().shouldIncludeMemStoreTS()) { @@ -816,7 +817,7 @@ public Cell getCell() { } @Override - public Cell getKey() { + public ExtendedCell getKey() { assertSeeked(); // Create a new object so that this getKey is cached as firstKey, lastKey ObjectIntPair keyPair = new ObjectIntPair<>(); @@ -966,8 +967,8 @@ protected void readAndUpdateNewBlock(long firstDataBlockOffset) throws IOExcepti updateCurrentBlock(newBlock); } - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, - Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, ExtendedCell nextIndexedKey, + boolean rewind, ExtendedCell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1025,7 +1026,7 @@ protected void updateCurrentBlock(HFileBlock newBlock) throws IOException { this.nextIndexedKey = null; } - protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { + protected ExtendedCell getFirstKeyCellInBlock(HFileBlock curBlock) { ByteBuff buffer = curBlock.getBufferWithoutHeader(); // It is safe to manipulate this buffer because we own the buffer object. buffer.rewind(); @@ -1050,7 +1051,7 @@ public String getValueString() { return ByteBufferUtils.toStringBinary(getValue()); } - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair); this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen, rowLen); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, this.bufBackedKeyOnlyKv); @@ -1432,7 +1433,7 @@ private void validateBlockType(HFileBlock block, BlockType expectedBlockType) th * the last row key, but it is the Cell representation of the last key */ @Override - public Optional getLastKey() { + public Optional getLastKey() { return dataBlockIndexReader.isEmpty() ? Optional.empty() : Optional.of(fileInfo.getLastKeyCell()); @@ -1443,7 +1444,7 @@ public Optional getLastKey() { * approximation only. */ @Override - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { return Optional.ofNullable(dataBlockIndexReader.midkey(this)); } @@ -1552,7 +1553,7 @@ public boolean next() throws IOException { } @Override - public Cell getKey() { + public ExtendedCell getKey() { assertValidSeek(); return seeker.getKey(); } @@ -1564,7 +1565,7 @@ public ByteBuffer getValue() { } @Override - public Cell getCell() { + public ExtendedCell getCell() { if (this.curBlock == null) { return null; } @@ -1589,13 +1590,13 @@ private void assertValidSeek() { } @Override - protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { + protected ExtendedCell getFirstKeyCellInBlock(HFileBlock curBlock) { return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock)); } @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, - Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, ExtendedCell nextIndexedKey, + boolean rewind, ExtendedCell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1606,7 +1607,7 @@ protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, } @Override - public int compareKey(CellComparator comparator, Cell key) { + public int compareKey(CellComparator comparator, ExtendedCell key) { return seeker.compareKey(comparator, key); } } @@ -1669,9 +1670,9 @@ public boolean prefetchStarted() { /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up - * in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this - * overload of getScanner for compactions. See + * {@link HFileScanner#seekTo(ExtendedCell)} to position an start the read. There is nothing to + * clean up in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not + * use this overload of getScanner for compactions. See * {@link #getScanner(Configuration, boolean, boolean, boolean)} * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. @@ -1686,8 +1687,8 @@ public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final bo /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up - * in a Scanner. Letting go of your references to the scanner is sufficient. + * {@link HFileScanner#seekTo(ExtendedCell)} to position an start the read. There is nothing to + * clean up in a Scanner. Letting go of your references to the scanner is sufficient. * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. * @param pread Use positional read rather than seek+read if true (pread is better for diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 0393d3b788a7..79ed7a22016f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.function.IntConsumer; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.Shipper; import org.apache.yetus.audience.InterfaceAudience; @@ -30,7 +30,7 @@ * reposition yourself as well. *

* A scanner doesn't always have a key/value that it is pointing to when it is first created and - * before {@link #seekTo()}/{@link #seekTo(Cell)} are called. In this case, + * before {@link #seekTo()}/{@link #seekTo(ExtendedCell)} are called. In this case, * {@link #getKey()}/{@link #getValue()} returns null. At most other times, a key and value will be * available. The general pattern is that you position the Scanner using the seekTo variants and * then getKey and getValue. @@ -48,7 +48,7 @@ public interface HFileScanner extends Shipper, Closeable { * will position itself at the end of the file and next() will return false when it is * called. */ - int seekTo(Cell cell) throws IOException; + int seekTo(ExtendedCell cell) throws IOException; /** * Reseek to or just before the passed cell. Similar to seekTo except that this can @@ -63,7 +63,7 @@ public interface HFileScanner extends Shipper, Closeable { * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in * position i; and 1, such that c[i] < cell, and scanner is left in position i. */ - int reseekTo(Cell cell) throws IOException; + int reseekTo(ExtendedCell cell) throws IOException; /** * Consider the cell stream of all the cells in the file, c[0] .. c[n], where there @@ -73,7 +73,7 @@ public interface HFileScanner extends Shipper, Closeable { * cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but * there may also NOT be a c[i+1], and next() will return false (EOF). */ - boolean seekBefore(Cell cell) throws IOException; + boolean seekBefore(ExtendedCell cell) throws IOException; /** * Positions this scanner at the start of the file. @@ -89,26 +89,26 @@ public interface HFileScanner extends Shipper, Closeable { boolean next() throws IOException; /** - * Gets the current key in the form of a cell. You must call {@link #seekTo(Cell)} before this - * method. + * Gets the current key in the form of a cell. You must call {@link #seekTo(ExtendedCell)} before + * this method. * @return gets the current key as a Cell. */ - Cell getKey(); + ExtendedCell getKey(); /** - * Gets a buffer view to the current value. You must call {@link #seekTo(Cell)} before this - * method. + * Gets a buffer view to the current value. You must call {@link #seekTo(ExtendedCell)} before + * this method. * @return byte buffer for the value. The limit is set to the value size, and the position is 0, * the start of the buffer view. */ ByteBuffer getValue(); - /** Returns Instance of {@link org.apache.hadoop.hbase.Cell}. */ - Cell getCell(); + /** Returns Instance of {@link ExtendedCell}. */ + ExtendedCell getCell(); /** * Convenience method to get a copy of the key as a string - interpreting the bytes as UTF8. You - * must call {@link #seekTo(Cell)} before this method. + * must call {@link #seekTo(ExtendedCell)} before this method. * @return key as a string * @deprecated Since hbase-2.0.0 */ @@ -117,7 +117,7 @@ public interface HFileScanner extends Shipper, Closeable { /** * Convenience method to get a copy of the value as a string - interpreting the bytes as UTF8. You - * must call {@link #seekTo(Cell)} before this method. + * must call {@link #seekTo(ExtendedCell)} before this method. * @return value as a string * @deprecated Since hbase-2.0.0 */ @@ -128,13 +128,14 @@ public interface HFileScanner extends Shipper, Closeable { HFile.Reader getReader(); /** - * @return True is scanner has had one of the seek calls invoked; i.e. {@link #seekBefore(Cell)} - * or {@link #seekTo()} or {@link #seekTo(Cell)}. Otherwise returns false. + * @return True is scanner has had one of the seek calls invoked; i.e. + * {@link #seekBefore(ExtendedCell)} or {@link #seekTo()} or + * {@link #seekTo(ExtendedCell)}. Otherwise returns false. */ boolean isSeeked(); /** Returns the next key in the index (the key to seek to the next block) */ - Cell getNextIndexedKey(); + ExtendedCell getNextIndexedKey(); /** * Close this HFile scanner and do necessary cleanup. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index d2dfaf62106a..0f54fafba954 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -75,7 +76,7 @@ public class HFileWriterImpl implements HFile.Writer { private final int encodedBlockSizeLimit; /** The Cell previously appended. Becomes the last cell in the file. */ - protected Cell lastCell = null; + protected ExtendedCell lastCell = null; /** FileSystem stream to write into. */ protected FSDataOutputStream outputStream; @@ -112,7 +113,7 @@ public class HFileWriterImpl implements HFile.Writer { /** * First cell in a block. This reference should be short-lived since we write hfiles in a burst. */ - protected Cell firstCellInBlock = null; + protected ExtendedCell firstCellInBlock = null; /** May be null if we were passed a stream. */ protected final Path path; @@ -163,7 +164,7 @@ public class HFileWriterImpl implements HFile.Writer { * The last(stop) Cell of the previous data block. This reference should be short-lived since we * write hfiles in a burst. */ - private Cell lastCellOfPreviousBlock = null; + private ExtendedCell lastCellOfPreviousBlock = null; /** Additional data items to be written to the "load-on-open" section. */ private List additionalLoadOnOpenData = new ArrayList<>(); @@ -360,7 +361,7 @@ private void finishBlock() throws IOException { lastDataBlockOffset = outputStream.getPos(); blockWriter.writeHeaderAndData(outputStream); int onDiskSize = blockWriter.getOnDiskSizeWithHeader(); - Cell indexEntry = + ExtendedCell indexEntry = getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, firstCellInBlock); dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); @@ -377,8 +378,8 @@ private void finishBlock() throws IOException { * cell. * @return A cell that sorts between left and right. */ - public static Cell getMidpoint(final CellComparator comparator, final Cell left, - final Cell right) { + public static ExtendedCell getMidpoint(final CellComparator comparator, final ExtendedCell left, + final ExtendedCell right) { if (right == null) { throw new IllegalArgumentException("right cell can not be null"); } @@ -733,7 +734,7 @@ public HFileContext getFileContext() { * construction. Cell to add. Cannot be empty nor null. */ @Override - public void append(final Cell cell) throws IOException { + public void append(final ExtendedCell cell) throws IOException { // checkKey uses comparator to check we are writing in order. boolean dupKey = checkKey(cell); if (!dupKey) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index d64f0e4ce53d..002b26295f33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -20,7 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodingState; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -47,7 +47,7 @@ private NoOpDataBlockEncoder() { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + public void encode(ExtendedCell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException { NoneEncodingState state = (NoneEncodingState) encodingCtx.getEncodingState(); NoneEncoder encoder = state.encoder; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java index 4162fca6afe5..0d9767f62210 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding; @@ -138,12 +139,12 @@ protected static class NoOpEncodedSeeker implements EncodedSeeker { protected int midLeafBlockOnDiskSize = -1; protected int midKeyEntry = -1; - private Cell[] blockKeys; + private ExtendedCell[] blockKeys; private CellComparator comparator; protected int searchTreeLevel; /** Pre-computed mid-key */ - private AtomicReference midKey = new AtomicReference<>(); + private AtomicReference midKey = new AtomicReference<>(); @Override public long heapSize() { @@ -184,7 +185,7 @@ public boolean isEmpty() { } @Override - public Cell getRootBlockKey(int i) { + public ExtendedCell getRootBlockKey(int i) { return blockKeys[i]; } @@ -238,7 +239,7 @@ private void readRootIndex(DataInput in, final int numEntries) throws IOExceptio } private void initialize(int numEntries) { - blockKeys = new Cell[numEntries]; + blockKeys = new ExtendedCell[numEntries]; } private void add(final byte[] key, final long offset, final int dataSize) { @@ -250,10 +251,12 @@ private void add(final byte[] key, final long offset, final int dataSize) { } @Override - public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) throw new IOException("HFile empty"); + public ExtendedCell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOException { + if (rootCount == 0) { + throw new IOException("HFile empty"); + } - Cell targetMidKey = this.midKey.get(); + ExtendedCell targetMidKey = this.midKey.get(); if (targetMidKey != null) { return targetMidKey; } @@ -285,7 +288,7 @@ public Cell midkey(HFile.CachingBlockReader cachingBlockReader) throws IOExcepti } @Override - public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, + public BlockWithScanInfo loadDataBlockWithScanInfo(ExtendedCell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, HFile.CachingBlockReader cachingBlockReader) throws IOException { @@ -295,7 +298,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB } // the next indexed key - Cell nextIndexedKey = null; + ExtendedCell nextIndexedKey = null; // Read the next-level (intermediate or leaf) index block. long currentOffset = blockOffsets[rootLevelIndex]; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index 82224851375c..bd64ca1ec51c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -34,7 +34,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -325,7 +325,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel compactMOBs, this.ioOptimizedMode, ioOptimizedMode, maxMobFileSize, major, getStoreInfo()); // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME long currentTime = EnvironmentEdgeManager.currentTime(); long lastMillis = 0; @@ -355,7 +355,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long shippedCallSizeLimit = (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); - Cell mobCell = null; + ExtendedCell mobCell = null; List committedMobWriterFileNames = new ArrayList<>(); try { @@ -363,9 +363,9 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); do { - hasMore = scanner.next(cells, scannerContext); + hasMore = scanner.next((List) cells, scannerContext); now = EnvironmentEdgeManager.currentTime(); - for (Cell c : cells) { + for (ExtendedCell c : cells) { if (compactMOBs) { if (MobUtils.isMobReferenceCell(c)) { String fName = MobUtils.getMobFileName(c); @@ -516,7 +516,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel mobCells++; // append the original keyValue in the mob file. mobFileWriter.append(c); - Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); + ExtendedCell reference = + MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); // write the cell whose value is the path of a mob file to the store file. writer.append(reference); cellsCountCompactedToMob++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java index e7b0f8260822..f8a55abde115 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java @@ -27,7 +27,7 @@ import java.util.function.Consumer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; @@ -192,7 +192,7 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); boolean control = @@ -205,9 +205,9 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, mobRefSet.get().clear(); try { do { - hasMore = scanner.next(cells, scannerContext); + hasMore = scanner.next((List) cells, scannerContext); if (!cells.isEmpty()) { - for (Cell c : cells) { + for (ExtendedCell c : cells) { // If we know that this KV is going to be included always, then let us // set its memstoreTS to 0. This will help us save space when writing to // disk. @@ -223,7 +223,7 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, mobCount++; // append the tags to the KeyValue. // The key is same, the value is the filename of the mob file - Cell reference = + ExtendedCell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); writer.append(reference); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java index f55088ea6be5..fe66535ee55e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCell.java @@ -20,6 +20,7 @@ import java.io.Closeable; import java.io.IOException; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.yetus.audience.InterfaceAudience; @@ -45,20 +46,20 @@ @InterfaceAudience.Private public class MobCell implements Closeable { - private final Cell cell; + private final ExtendedCell cell; private final StoreFileScanner sfScanner; - public MobCell(Cell cell) { + public MobCell(ExtendedCell cell) { this.cell = cell; this.sfScanner = null; } - public MobCell(Cell cell, StoreFileScanner sfScanner) { + public MobCell(ExtendedCell cell, StoreFileScanner sfScanner) { this.cell = cell; this.sfScanner = sfScanner; } - public Cell getCell() { + public ExtendedCell getCell() { return cell; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java index 3293208771ac..102617ae74df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStoreFile; @@ -66,7 +66,7 @@ public StoreFileScanner getScanner() throws IOException { * @param cacheMobBlocks Should this scanner cache blocks. * @return The cell in the mob file. */ - public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException { + public MobCell readCell(ExtendedCell search, boolean cacheMobBlocks) throws IOException { return readCell(search, cacheMobBlocks, sf.getMaxMemStoreTS()); } @@ -77,7 +77,8 @@ public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException * @param readPt the read point. * @return The cell in the mob file. */ - public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException { + public MobCell readCell(ExtendedCell search, boolean cacheMobBlocks, long readPt) + throws IOException { StoreFileScanner scanner = null; boolean succ = false; try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index b6b8be9d1791..2a1428196e6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -497,7 +498,8 @@ public static boolean removeMobFiles(Configuration conf, FileSystem fs, TableNam * snapshot. * @return The mob reference KeyValue. */ - public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag) { + public static ExtendedCell createMobRefCell(ExtendedCell cell, byte[] fileName, + Tag tableNameTag) { // Append the tags to the KeyValue. // The key is same, the value is the filename of the mob file List tags = new ArrayList<>(); @@ -512,7 +514,8 @@ public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag return createMobRefCell(cell, fileName, TagUtil.fromList(tags)); } - public static Cell createMobRefCell(Cell cell, byte[] fileName, byte[] refCellTags) { + public static ExtendedCell createMobRefCell(ExtendedCell cell, byte[] fileName, + byte[] refCellTags) { byte[] refValue = Bytes.add(Bytes.toBytes(cell.getValueLength()), fileName); return PrivateCellUtil.createCell(cell, refValue, TagUtil.concatTags(refCellTags, cell)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java index b76680d0fdbe..24598c12bd1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; @@ -102,7 +103,7 @@ protected int doWork() throws Exception { out.println( String.format(KEY_TMPL, sequenceId, FORMATTER.format(Instant.ofEpochMilli(writeTime)))); for (Cell cell : edit.getCells()) { - Map op = WALPrettyPrinter.toStringMap(cell); + Map op = WALPrettyPrinter.toStringMap((ExtendedCell) cell); if ( !Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 9a88cab450af..9e15358c3673 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -22,7 +22,6 @@ import java.util.NavigableSet; import java.util.SortedSet; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; @@ -111,14 +110,14 @@ protected void resetTimeOfOldestEdit() { public abstract void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent); @Override - public void add(Iterable cells, MemStoreSizing memstoreSizing) { - for (Cell cell : cells) { + public void add(Iterable cells, MemStoreSizing memstoreSizing) { + for (ExtendedCell cell : cells) { add(cell, memstoreSizing); } } @Override - public void add(Cell cell, MemStoreSizing memstoreSizing) { + public void add(ExtendedCell cell, MemStoreSizing memstoreSizing) { doAddOrUpsert(cell, 0, memstoreSizing, true); } @@ -131,11 +130,11 @@ public void add(Cell cell, MemStoreSizing memstoreSizing) { * @param readpoint readpoint below which we can safely remove duplicate KVs * @param memstoreSizing object to accumulate changed size */ - private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) { + private void upsert(ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing) { doAddOrUpsert(cell, readpoint, memstoreSizing, false); } - private void doAddOrUpsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing, + private void doAddOrUpsert(ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing, boolean doAdd) { MutableSegment currentActive; boolean succ = false; @@ -153,8 +152,9 @@ private void doAddOrUpsert(Cell cell, long readpoint, MemStoreSizing memstoreSiz } } - protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing memstoreSizing) { - Cell toAdd = maybeCloneWithAllocator(currentActive, cell, false); + protected void doAdd(MutableSegment currentActive, ExtendedCell cell, + MemStoreSizing memstoreSizing) { + ExtendedCell toAdd = maybeCloneWithAllocator(currentActive, cell, false); boolean mslabUsed = (toAdd != cell); // This cell data is backed by the same byte[] where we read request in RPC(See // HBASE-15180). By default, MSLAB is ON and we might have copied cell to MSLAB area. If @@ -171,14 +171,14 @@ protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing mem internalAdd(currentActive, toAdd, mslabUsed, memstoreSizing); } - private void doUpsert(MutableSegment currentActive, Cell cell, long readpoint, + private void doUpsert(MutableSegment currentActive, ExtendedCell cell, long readpoint, MemStoreSizing memstoreSizing) { // Add the Cell to the MemStore - // Use the internalAdd method here since we (a) already have a lock - // and (b) cannot safely use the MSLAB here without potentially - // hitting OOME - see TestMemStore.testUpsertMSLAB for a - // test that triggers the pathological case if we don't avoid MSLAB - // here. + // Use the internalAdd method here since we + // (a) already have a lock and + // (b) cannot safely use the MSLAB here without potentially hitting OOME + // - see TestMemStore.testUpsertMSLAB for a test that triggers the pathological case if we don't + // avoid MSLAB here. // This cell data is backed by the same byte[] where we read request in RPC(See // HBASE-15180). We must do below deep copy. Or else we will keep referring to the bigger // chunk of memory and prevent it from getting GCed. @@ -195,7 +195,7 @@ private void doUpsert(MutableSegment currentActive, Cell cell, long readpoint, * @param memstoreSizing object to accumulate region size changes * @return true iff can proceed with applying the update */ - protected abstract boolean preUpdate(MutableSegment currentActive, Cell cell, + protected abstract boolean preUpdate(MutableSegment currentActive, ExtendedCell cell, MemStoreSizing memstoreSizing); /** @@ -204,16 +204,13 @@ protected abstract boolean preUpdate(MutableSegment currentActive, Cell cell, */ protected abstract void postUpdate(MutableSegment currentActive); - private static Cell deepCopyIfNeeded(Cell cell) { - if (cell instanceof ExtendedCell) { - return ((ExtendedCell) cell).deepClone(); - } - return cell; + private static ExtendedCell deepCopyIfNeeded(ExtendedCell cell) { + return cell.deepClone(); } @Override - public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { - for (Cell cell : cells) { + public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { + for (ExtendedCell cell : cells) { upsert(cell, readpoint, memstoreSizing); } } @@ -281,10 +278,8 @@ protected void dump(Logger log) { snapshot.dump(log); } - /* - * @return Return lowest of a or b or null if both a and b are null - */ - protected Cell getLowest(final Cell a, final Cell b) { + /** Returns Return lowest of a or b or null if both a and b are null */ + protected ExtendedCell getLowest(final ExtendedCell a, final ExtendedCell b) { if (a == null) { return b; } @@ -294,17 +289,17 @@ protected Cell getLowest(final Cell a, final Cell b) { return comparator.compareRows(a, b) <= 0 ? a : b; } - /* + /** * @param key Find row that follows this one. If null, return first. * @param set Set to look in for a row beyond row. * @return Next row or null if none found. If one found, will be a new KeyValue -- can be - * destroyed by subsequent calls to this method. + * destroyed by subsequent calls to this method. */ - protected Cell getNextRow(final Cell key, final NavigableSet set) { - Cell result = null; - SortedSet tail = key == null ? set : set.tailSet(key); + protected ExtendedCell getNextRow(final ExtendedCell key, final NavigableSet set) { + ExtendedCell result = null; + SortedSet tail = key == null ? set : set.tailSet(key); // Iterate until we fall into the next row; i.e. move off current row - for (Cell cell : tail) { + for (ExtendedCell cell : tail) { if (comparator.compareRows(cell, key) <= 0) { continue; } @@ -326,20 +321,20 @@ protected Cell getNextRow(final Cell key, final NavigableSet set) { * @param forceCloneOfBigCell true only during the process of flattening to CellChunkMap. * @return either the given cell or its clone */ - private Cell maybeCloneWithAllocator(MutableSegment currentActive, Cell cell, + private ExtendedCell maybeCloneWithAllocator(MutableSegment currentActive, ExtendedCell cell, boolean forceCloneOfBigCell) { return currentActive.maybeCloneWithAllocator(cell, forceCloneOfBigCell); } - /* + /** * Internal version of add() that doesn't clone Cells with the allocator, and doesn't take the * lock. Callers should ensure they already have the read lock taken - * @param toAdd the cell to add - * @param mslabUsed whether using MSLAB + * @param toAdd the cell to add + * @param mslabUsed whether using MSLAB * @param memstoreSizing object to accumulate changed size */ - private void internalAdd(MutableSegment currentActive, final Cell toAdd, final boolean mslabUsed, - MemStoreSizing memstoreSizing) { + private void internalAdd(MutableSegment currentActive, final ExtendedCell toAdd, + final boolean mslabUsed, MemStoreSizing memstoreSizing) { boolean sizeAddedPreOperation = sizeAddedPreOperation(); currentActive.add(toAdd, mslabUsed, memstoreSizing, sizeAddedPreOperation); setOldestEditTimeToNow(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java index f62b0d615149..618ae07a9a8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; @@ -86,15 +87,14 @@ protected boolean canBeFlattened() { // Create CellSet based on CellArrayMap from compacting iterator private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) { - boolean merge = (action == MemStoreCompactionStrategy.Action.MERGE || action == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS); - Cell[] cells = new Cell[numOfCells]; // build the Cell Array + ExtendedCell[] cells = new ExtendedCell[numOfCells]; // build the Cell Array int i = 0; int numUniqueKeys = 0; Cell prev = null; while (iterator.hasNext()) { - Cell c = iterator.next(); + ExtendedCell c = iterator.next(); // The scanner behind the iterator is doing all the elimination logic if (merge) { // if this is merge we just move the Cell object without copying MSLAB @@ -126,8 +126,8 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet - CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, i, false); - this.setCellSet(null, new CellSet(cam, numUniqueKeys)); // update the CellSet of this Segment + CellArrayMap cam = new CellArrayMap<>(getComparator(), cells, 0, i, false); + this.setCellSet(null, new CellSet<>(cam, numUniqueKeys)); // update the CellSet of this Segment } /*------------------------------------------------------------------------*/ @@ -135,12 +135,12 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator // (without compacting iterator) // We do not consider cells bigger than chunks! private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, - CellSet oldCellSet, MemStoreCompactionStrategy.Action action) { - Cell[] cells = new Cell[numOfCells]; // build the Cell Array - Cell curCell; + CellSet oldCellSet, MemStoreCompactionStrategy.Action action) { + ExtendedCell[] cells = new ExtendedCell[numOfCells]; // build the Cell Array + ExtendedCell curCell; int idx = 0; int numUniqueKeys = 0; - Cell prev = null; + ExtendedCell prev = null; try { while ((curCell = segmentScanner.next()) != null) { cells[idx++] = curCell; @@ -165,9 +165,9 @@ private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet - CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, idx, false); + CellArrayMap cam = new CellArrayMap<>(getComparator(), cells, 0, idx, false); // update the CellSet of this Segment - this.setCellSet(oldCellSet, new CellSet(cam, numUniqueKeys)); + this.setCellSet(oldCellSet, new CellSet<>(cam, numUniqueKeys)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java index af60c8e93cf6..1ebf693bda6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java @@ -26,15 +26,15 @@ * CellArrayMap's array of references pointing to Cell objects. */ @InterfaceAudience.Private -public class CellArrayMap extends CellFlatMap { +public class CellArrayMap extends CellFlatMap { - private final Cell[] block; + private final T[] block; /* * The Cells Array is created only when CellArrayMap is created, all sub-CellBlocks use boundary * indexes. The given Cell array must be ordered. */ - public CellArrayMap(Comparator comparator, Cell[] b, int min, int max, + public CellArrayMap(Comparator comparator, T[] b, int min, int max, boolean descending) { super(comparator, min, max, descending); this.block = b; @@ -42,12 +42,12 @@ public CellArrayMap(Comparator comparator, Cell[] b, int min, int /* To be used by base class only to create a sub-CellFlatMap */ @Override - protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { - return new CellArrayMap(comparator(), this.block, min, max, descending); + protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { + return new CellArrayMap<>(comparator(), this.block, min, max, descending); } @Override - protected Cell getCell(int i) { + protected T getCell(int i) { if ((i < minCellIdx) || (i >= maxCellIdx)) return null; return block[i]; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java index de6377668f93..a623c823cb33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; @@ -135,19 +134,17 @@ protected boolean canBeFlattened() { // Create CellSet based on CellChunkMap from compacting iterator private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) { - int numOfCellsAfterCompaction = 0; int currentChunkIdx = 0; int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER; int numUniqueKeys = 0; - Cell prev = null; + ExtendedCell prev = null; Chunk[] chunks = allocIndexChunks(numOfCells); while (iterator.hasNext()) { // the iterator hides the elimination logic for compaction boolean alreadyCopied = false; - Cell c = iterator.next(); + ExtendedCell c = iterator.next(); numOfCellsAfterCompaction++; - assert (c instanceof ExtendedCell); - if (((ExtendedCell) c).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { + if (c.getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { // CellChunkMap assumes all cells are allocated on MSLAB. // Therefore, cells which are not allocated on MSLAB initially, // are copied into MSLAB here. @@ -190,9 +187,9 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet - CellChunkMap ccm = - new CellChunkMap(getComparator(), chunks, 0, numOfCellsAfterCompaction, false); - this.setCellSet(null, new CellSet(ccm, numUniqueKeys)); // update the CellSet of this Segment + CellChunkMap ccm = + new CellChunkMap<>(getComparator(), chunks, 0, numOfCellsAfterCompaction, false); + this.setCellSet(null, new CellSet<>(ccm, numUniqueKeys)); // update the CellSet of this Segment } /*------------------------------------------------------------------------*/ @@ -200,19 +197,19 @@ private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator // (without compacting iterator) // This is a service for not-flat immutable segments private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, - CellSet oldCellSet, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) { - Cell curCell; + CellSet oldCellSet, MemStoreSizing memstoreSizing, + MemStoreCompactionStrategy.Action action) { + ExtendedCell curCell; Chunk[] chunks = allocIndexChunks(numOfCells); int currentChunkIdx = 0; int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER; int numUniqueKeys = 0; - Cell prev = null; + ExtendedCell prev = null; try { while ((curCell = segmentScanner.next()) != null) { - assert (curCell instanceof ExtendedCell); - if (((ExtendedCell) curCell).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { + if (curCell.getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) { // CellChunkMap assumes all cells are allocated on MSLAB. // Therefore, cells which are not allocated on MSLAB initially, // are copied into MSLAB here. @@ -246,9 +243,10 @@ private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, segmentScanner.close(); } - CellChunkMap ccm = new CellChunkMap(getComparator(), chunks, 0, numOfCells, false); + CellChunkMap ccm = + new CellChunkMap<>(getComparator(), chunks, 0, numOfCells, false); // update the CellSet of this Segment - this.setCellSet(oldCellSet, new CellSet(ccm, numUniqueKeys)); + this.setCellSet(oldCellSet, new CellSet<>(ccm, numUniqueKeys)); } /*------------------------------------------------------------------------*/ @@ -317,7 +315,7 @@ private Chunk[] allocIndexChunks(int numOfCells) { return chunks; } - private Cell copyCellIntoMSLAB(Cell cell, MemStoreSizing memstoreSizing) { + private ExtendedCell copyCellIntoMSLAB(ExtendedCell cell, MemStoreSizing memstoreSizing) { // Take care for a special case when a cell is copied from on-heap to (probably off-heap) MSLAB. // The cell allocated as an on-heap JVM object (byte array) occupies slightly different // amount of memory, than when the cell serialized and allocated on the MSLAB. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java index e4bfcf05ab2d..f6dad226cce0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java @@ -51,7 +51,7 @@ * */ @InterfaceAudience.Private -public class CellChunkMap extends CellFlatMap { +public class CellChunkMap extends CellFlatMap { private final Chunk[] chunks; // the array of chunks, on which the index is based @@ -69,7 +69,7 @@ public class CellChunkMap extends CellFlatMap { * @param max number of Cells or the index of the cell after the maximal cell * @param descending the order of the given array */ - public CellChunkMap(Comparator comparator, Chunk[] chunks, int min, int max, + public CellChunkMap(Comparator comparator, Chunk[] chunks, int min, int max, boolean descending) { super(comparator, min, max, descending); this.chunks = chunks; @@ -86,12 +86,12 @@ public CellChunkMap(Comparator comparator, Chunk[] chunks, int min * create only CellChunkMap from CellChunkMap */ @Override - protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { - return new CellChunkMap(this.comparator(), this.chunks, min, max, descending); + protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { + return new CellChunkMap<>(this.comparator(), this.chunks, min, max, descending); } @Override - protected Cell getCell(int i) { + protected T getCell(int i) { // get the index of the relevant chunk inside chunk array int chunkIndex = (i / numOfCellRepsInChunk); ByteBuffer block = chunks[chunkIndex].getData();// get the ByteBuffer of the relevant chunk @@ -127,6 +127,9 @@ protected Cell getCell(int i) { + ". We were looking for a cell at index " + i); } - return new ByteBufferChunkKeyValue(buf, offsetOfCell, lengthOfCell, cellSeqID); + @SuppressWarnings("unchecked") + T cell = (T) new ByteBufferChunkKeyValue(buf, offsetOfCell, lengthOfCell, cellSeqID); + + return cell; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java index 8a64d80c15ed..0c95f7ddb4ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java @@ -26,8 +26,6 @@ import java.util.Set; import org.apache.hadoop.hbase.Cell; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * CellFlatMap stores a constant number of elements and is immutable after creation stage. Being @@ -38,15 +36,15 @@ * sequential array and thus requires less memory than ConcurrentSkipListMap. */ @InterfaceAudience.Private -public abstract class CellFlatMap implements NavigableMap { - private static final Logger LOG = LoggerFactory.getLogger(CellFlatMap.class); - private final Comparator comparator; +public abstract class CellFlatMap implements NavigableMap { + + private final Comparator comparator; protected int minCellIdx = 0; // the index of the minimal cell (for sub-sets) protected int maxCellIdx = 0; // the index of the cell after the maximal cell (for sub-sets) private boolean descending = false; /* C-tor */ - public CellFlatMap(Comparator comparator, int min, int max, boolean d) { + public CellFlatMap(Comparator comparator, int min, int max, boolean d) { this.comparator = comparator; this.minCellIdx = min; this.maxCellIdx = max; @@ -54,10 +52,10 @@ public CellFlatMap(Comparator comparator, int min, int max, boolea } /* Used for abstract CellFlatMap creation, implemented by derived class */ - protected abstract CellFlatMap createSubCellFlatMap(int min, int max, boolean descending); + protected abstract CellFlatMap createSubCellFlatMap(int min, int max, boolean descending); /* Returns the i-th cell in the cell block */ - protected abstract Cell getCell(int i); + protected abstract T getCell(int i); /** * Binary search for a given key in between given boundaries of the array. Positive returned @@ -67,13 +65,13 @@ public CellFlatMap(Comparator comparator, int min, int max, boolea * @param needle The key to look for in all of the entries * @return Same return value as Arrays.binarySearch. */ - private int find(Cell needle) { + private int find(T needle) { int begin = minCellIdx; int end = maxCellIdx - 1; while (begin <= end) { int mid = begin + ((end - begin) >> 1); - Cell midCell = getCell(mid); + T midCell = getCell(mid); int compareRes = comparator.compare(midCell, needle); if (compareRes == 0) { @@ -98,7 +96,7 @@ private int find(Cell needle) { * the given key exists in the set or not. taking into consideration whether the key should be * inclusive or exclusive. */ - private int getValidIndex(Cell key, boolean inclusive, boolean tail) { + private int getValidIndex(T key, boolean inclusive, boolean tail) { final int index = find(key); // get the valid (positive) insertion point from the output of the find() method int insertionPoint = index < 0 ? ~index : index; @@ -125,7 +123,7 @@ private int getValidIndex(Cell key, boolean inclusive, boolean tail) { } @Override - public Comparator comparator() { + public Comparator comparator() { return comparator; } @@ -141,8 +139,7 @@ public boolean isEmpty() { // ---------------- Sub-Maps ---------------- @Override - public NavigableMap subMap(Cell fromKey, boolean fromInclusive, Cell toKey, - boolean toInclusive) { + public NavigableMap subMap(T fromKey, boolean fromInclusive, T toKey, boolean toInclusive) { final int lessCellIndex = getValidIndex(fromKey, fromInclusive, true); final int greaterCellIndex = getValidIndex(toKey, toInclusive, false); if (descending) { @@ -153,7 +150,7 @@ public NavigableMap subMap(Cell fromKey, boolean fromInclusive, Cell } @Override - public NavigableMap headMap(Cell toKey, boolean inclusive) { + public NavigableMap headMap(T toKey, boolean inclusive) { if (descending) { return createSubCellFlatMap(getValidIndex(toKey, inclusive, false), maxCellIdx, descending); } else { @@ -162,7 +159,7 @@ public NavigableMap headMap(Cell toKey, boolean inclusive) { } @Override - public NavigableMap tailMap(Cell fromKey, boolean inclusive) { + public NavigableMap tailMap(T fromKey, boolean inclusive) { if (descending) { return createSubCellFlatMap(minCellIdx, getValidIndex(fromKey, inclusive, true), descending); } else { @@ -171,28 +168,28 @@ public NavigableMap tailMap(Cell fromKey, boolean inclusive) { } @Override - public NavigableMap descendingMap() { + public NavigableMap descendingMap() { return createSubCellFlatMap(minCellIdx, maxCellIdx, true); } @Override - public NavigableMap subMap(Cell k1, Cell k2) { + public NavigableMap subMap(T k1, T k2) { return this.subMap(k1, true, k2, true); } @Override - public NavigableMap headMap(Cell k) { + public NavigableMap headMap(T k) { return this.headMap(k, true); } @Override - public NavigableMap tailMap(Cell k) { + public NavigableMap tailMap(T k) { return this.tailMap(k, true); } // -------------------------------- Key's getters -------------------------------- @Override - public Cell firstKey() { + public T firstKey() { if (isEmpty()) { return null; } @@ -200,7 +197,7 @@ public Cell firstKey() { } @Override - public Cell lastKey() { + public T lastKey() { if (isEmpty()) { return null; } @@ -208,7 +205,7 @@ public Cell lastKey() { } @Override - public Cell lowerKey(Cell k) { + public T lowerKey(T k) { if (isEmpty()) { return null; } @@ -219,7 +216,7 @@ public Cell lowerKey(Cell k) { } @Override - public Cell floorKey(Cell k) { + public T floorKey(T k) { if (isEmpty()) { return null; } @@ -229,7 +226,7 @@ public Cell floorKey(Cell k) { } @Override - public Cell ceilingKey(Cell k) { + public T ceilingKey(T k) { if (isEmpty()) { return null; } @@ -239,7 +236,7 @@ public Cell ceilingKey(Cell k) { } @Override - public Cell higherKey(Cell k) { + public T higherKey(T k) { if (isEmpty()) { return null; } @@ -250,7 +247,7 @@ public Cell higherKey(Cell k) { @Override public boolean containsKey(Object o) { - int index = find((Cell) o); + int index = find((T) o); return (index >= 0); } @@ -260,99 +257,99 @@ public boolean containsValue(Object o) { // use containsKey(Object o) instead } @Override - public Cell get(Object o) { - int index = find((Cell) o); + public T get(Object o) { + int index = find((T) o); return (index >= 0) ? getCell(index) : null; } // -------------------------------- Entry's getters -------------------------------- - private static class CellFlatMapEntry implements Entry { - private final Cell cell; + private static class CellFlatMapEntry implements Entry { + private final T cell; - public CellFlatMapEntry(Cell cell) { + public CellFlatMapEntry(T cell) { this.cell = cell; } @Override - public Cell getKey() { + public T getKey() { return cell; } @Override - public Cell getValue() { + public T getValue() { return cell; } @Override - public Cell setValue(Cell value) { + public T setValue(T value) { throw new UnsupportedOperationException(); } } @Override - public Entry lowerEntry(Cell k) { - Cell cell = lowerKey(k); + public Entry lowerEntry(T k) { + T cell = lowerKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry higherEntry(Cell k) { - Cell cell = higherKey(k); + public Entry higherEntry(T k) { + T cell = higherKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry ceilingEntry(Cell k) { - Cell cell = ceilingKey(k); + public Entry ceilingEntry(T k) { + T cell = ceilingKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry floorEntry(Cell k) { - Cell cell = floorKey(k); + public Entry floorEntry(T k) { + T cell = floorKey(k); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry firstEntry() { - Cell cell = firstKey(); + public Entry firstEntry() { + T cell = firstKey(); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } @Override - public Entry lastEntry() { - Cell cell = lastKey(); + public Entry lastEntry() { + T cell = lastKey(); if (cell == null) { return null; } - return new CellFlatMapEntry(cell); + return new CellFlatMapEntry<>(cell); } // The following 2 methods (pollFirstEntry, pollLastEntry) are unsupported because these are // updating methods. @Override - public Entry pollFirstEntry() { + public Entry pollFirstEntry() { throw new UnsupportedOperationException(); } @Override - public Entry pollLastEntry() { + public Entry pollLastEntry() { throw new UnsupportedOperationException(); } @@ -362,7 +359,7 @@ public Entry pollLastEntry() { // fill up with Cells and provided in construction time. // Later the structure is immutable. @Override - public Cell put(Cell k, Cell v) { + public T put(T k, T v) { throw new UnsupportedOperationException(); } @@ -372,43 +369,43 @@ public void clear() { } @Override - public Cell remove(Object o) { + public T remove(Object o) { throw new UnsupportedOperationException(); } @Override - public void putAll(Map map) { + public void putAll(Map map) { throw new UnsupportedOperationException(); } // -------------------------------- Sub-Sets -------------------------------- @Override - public NavigableSet navigableKeySet() { + public NavigableSet navigableKeySet() { throw new UnsupportedOperationException(); } @Override - public NavigableSet descendingKeySet() { + public NavigableSet descendingKeySet() { throw new UnsupportedOperationException(); } @Override - public NavigableSet keySet() { + public NavigableSet keySet() { throw new UnsupportedOperationException(); } @Override - public Collection values() { + public Collection values() { return new CellFlatMapCollection(); } @Override - public Set> entrySet() { + public Set> entrySet() { throw new UnsupportedOperationException(); } // -------------------------------- Iterator K -------------------------------- - private final class CellFlatMapIterator implements Iterator { + private final class CellFlatMapIterator implements Iterator { int index; private CellFlatMapIterator() { @@ -421,8 +418,8 @@ public boolean hasNext() { } @Override - public Cell next() { - Cell result = getCell(index); + public T next() { + T result = getCell(index); if (descending) { index--; } else { @@ -438,7 +435,7 @@ public void remove() { } // -------------------------------- Collection -------------------------------- - private final class CellFlatMapCollection implements Collection { + private final class CellFlatMapCollection implements Collection { @Override public int size() { @@ -461,7 +458,7 @@ public boolean contains(Object o) { } @Override - public Iterator iterator() { + public Iterator iterator() { return new CellFlatMapIterator(); } @@ -476,7 +473,7 @@ public T[] toArray(T[] ts) { } @Override - public boolean add(Cell k) { + public boolean add(T k) { throw new UnsupportedOperationException(); } @@ -491,7 +488,7 @@ public boolean containsAll(Collection collection) { } @Override - public boolean addAll(Collection collection) { + public boolean addAll(Collection collection) { throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java index 4890c8a84494..c8d9b5b2ea67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java @@ -36,7 +36,7 @@ * and set and won't throw ConcurrentModificationException when iterating. */ @InterfaceAudience.Private -public class CellSet implements NavigableSet { +public class CellSet implements NavigableSet { public static final int UNKNOWN_NUM_UNIQUES = -1; // Implemented on top of a {@link java.util.concurrent.ConcurrentSkipListMap} @@ -44,127 +44,127 @@ public class CellSet implements NavigableSet { // is not already present.", this implementation "Adds the specified element to this set EVEN // if it is already present overwriting what was there previous". // Otherwise, has same attributes as ConcurrentSkipListSet - private final NavigableMap delegatee; /// + private final NavigableMap delegatee; /// private final int numUniqueKeys; - public CellSet(final CellComparator c) { + public CellSet(CellComparator c) { this.delegatee = new ConcurrentSkipListMap<>(c.getSimpleComparator()); this.numUniqueKeys = UNKNOWN_NUM_UNIQUES; } - CellSet(final NavigableMap m, int numUniqueKeys) { + CellSet(final NavigableMap m, int numUniqueKeys) { this.delegatee = m; this.numUniqueKeys = numUniqueKeys; } - CellSet(final NavigableMap m) { + CellSet(final NavigableMap m) { this.delegatee = m; this.numUniqueKeys = UNKNOWN_NUM_UNIQUES; } - NavigableMap getDelegatee() { + NavigableMap getDelegatee() { return delegatee; } @Override - public Cell ceiling(Cell e) { + public T ceiling(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Iterator descendingIterator() { + public Iterator descendingIterator() { return this.delegatee.descendingMap().values().iterator(); } @Override - public NavigableSet descendingSet() { + public NavigableSet descendingSet() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell floor(Cell e) { + public T floor(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public SortedSet headSet(final Cell toElement) { + public SortedSet headSet(final T toElement) { return headSet(toElement, false); } @Override - public NavigableSet headSet(final Cell toElement, boolean inclusive) { - return new CellSet(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES); + public NavigableSet headSet(final T toElement, boolean inclusive) { + return new CellSet<>(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES); } @Override - public Cell higher(Cell e) { + public T higher(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Iterator iterator() { + public Iterator iterator() { return this.delegatee.values().iterator(); } @Override - public Cell lower(Cell e) { + public T lower(T e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell pollFirst() { + public T pollFirst() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell pollLast() { + public T pollLast() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public SortedSet subSet(Cell fromElement, Cell toElement) { + public SortedSet subSet(T fromElement, T toElement) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement, + public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement, boolean toInclusive) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public SortedSet tailSet(Cell fromElement) { + public SortedSet tailSet(T fromElement) { return tailSet(fromElement, true); } @Override - public NavigableSet tailSet(Cell fromElement, boolean inclusive) { - return new CellSet(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES); + public NavigableSet tailSet(T fromElement, boolean inclusive) { + return new CellSet<>(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES); } @Override - public Comparator comparator() { + public Comparator comparator() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @Override - public Cell first() { + public T first() { return this.delegatee.firstEntry().getValue(); } @Override - public Cell last() { + public T last() { return this.delegatee.lastEntry().getValue(); } @Override - public boolean add(Cell e) { + public boolean add(T e) { return this.delegatee.put(e, e) == null; } @Override - public boolean addAll(Collection c) { + public boolean addAll(Collection c) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java index 1d838d86abcf..f3f260f8cf7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.yetus.audience.InterfaceAudience; @@ -34,14 +34,14 @@ public interface CellSink { * Append the given cell * @param cell the cell to be added */ - void append(Cell cell) throws IOException; + void append(ExtendedCell cell) throws IOException; /** * Append the given (possibly partial) list of cells of a row * @param cellList the cell list to be added */ - default void appendAll(List cellList) throws IOException { - for (Cell cell : cellList) { + default void appendAll(List cellList) throws IOException { + for (ExtendedCell cell : cellList) { append(cell); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 1a2cbc6bdabf..568a7b061021 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; @@ -306,7 +307,7 @@ public void stopReplayingFromWAL() { * @return true iff can proceed with applying the update */ @Override - protected boolean preUpdate(MutableSegment currentActive, Cell cell, + protected boolean preUpdate(MutableSegment currentActive, ExtendedCell cell, MemStoreSizing memstoreSizing) { if (currentActive.sharedLock()) { if (checkAndAddToActiveSize(currentActive, cell, memstoreSizing)) { @@ -621,8 +622,8 @@ boolean isMemStoreFlushingInMemory() { * @param cell Find the row that comes after this one. If null, we return the first. * @return Next row or null if none found. */ - Cell getNextRow(final Cell cell) { - Cell lowest = null; + ExtendedCell getNextRow(final ExtendedCell cell) { + ExtendedCell lowest = null; List segments = getSegments(); for (Segment segment : segments) { if (lowest == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index f955eb5d5825..af09e462140c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -23,6 +23,7 @@ import java.util.SortedSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -93,7 +94,7 @@ public void close() { * @return either the given cell or its clone */ @Override - public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) { + public ExtendedCell maybeCloneWithAllocator(ExtendedCell cell, boolean forceCloneOfBigCell) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @@ -192,17 +193,17 @@ public TimeRangeTracker getTimeRangeTracker() { // *** Methods for SegmentsScanner @Override - public Cell last() { + public ExtendedCell last() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - public Iterator iterator() { + public Iterator iterator() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - public SortedSet headSet(Cell firstKeyOnRow) { + public SortedSet headSet(ExtendedCell firstKeyOnRow) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @@ -218,18 +219,18 @@ public int compareRows(Cell left, Cell right) { /** Returns a set of all cells in the segment */ @Override - protected CellSet getCellSet() { + protected CellSet getCellSet() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, + protected void internalAdd(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @Override - protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, + protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } @@ -240,7 +241,7 @@ protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, * @return a subset of the segment cell set, which starts with the given cell */ @Override - protected SortedSet tailSet(Cell firstCell) { + protected SortedSet tailSet(ExtendedCell firstCell) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java index e5ee8041c350..b800178e8a28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,7 +54,7 @@ public DateTieredMultiFileWriter(List lowerBoundaries, } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { Map.Entry entry = lowerBoundary2Writer.floorEntry(cell.getTimestamp()); StoreFileWriter writer = entry.getValue(); if (writer == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 420dad51e377..433105e998f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.InnerStoreCellComparator; @@ -155,7 +156,7 @@ protected List getSegments() throws IOException { * @param cell Find the row that comes after this one. If null, we return the first. * @return Next row or null if none found. */ - Cell getNextRow(final Cell cell) { + ExtendedCell getNextRow(final ExtendedCell cell) { return getLowest(getNextRow(cell, this.getActive().getCellSet()), getNextRow(cell, this.snapshot.getCellSet())); } @@ -165,7 +166,7 @@ public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) { } @Override - protected boolean preUpdate(MutableSegment currentActive, Cell cell, + protected boolean preUpdate(MutableSegment currentActive, ExtendedCell cell, MemStoreSizing memstoreSizing) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index d4b24de33cc3..c71498cfab09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -33,10 +33,10 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -300,7 +300,7 @@ private void validateMobFile(Path path) throws IOException { * @param cacheBlocks Whether the scanner should cache blocks. * @return The cell found in the mob file. */ - public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException { + public MobCell resolve(ExtendedCell reference, boolean cacheBlocks) throws IOException { return resolve(reference, cacheBlocks, -1, true); } @@ -313,8 +313,8 @@ public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException { * resolved. * @return The cell found in the mob file. */ - public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyValueOnMobCellMiss) - throws IOException { + public MobCell resolve(ExtendedCell reference, boolean cacheBlocks, + boolean readEmptyValueOnMobCellMiss) throws IOException { return resolve(reference, cacheBlocks, -1, readEmptyValueOnMobCellMiss); } @@ -328,7 +328,7 @@ public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyVal * corrupt. * @return The cell found in the mob file. */ - public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt, + public MobCell resolve(ExtendedCell reference, boolean cacheBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException { MobCell mobCell = null; if (MobUtils.hasValidMobRefCellValue(reference)) { @@ -343,7 +343,7 @@ public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt, if (mobCell == null) { LOG.warn("The Cell result is null, assemble a new Cell with the same row,family," + "qualifier,timestamp,type and tags but with an empty value to return."); - Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + ExtendedCell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength()) .setFamily(reference.getFamilyArray(), reference.getFamilyOffset(), reference.getFamilyLength()) @@ -397,7 +397,7 @@ public List getLocations(TableName tableName) throws IOException { * corrupt. * @return The found cell. Null if there's no such a cell. */ - private MobCell readCell(List locations, String fileName, Cell search, + private MobCell readCell(List locations, String fileName, ExtendedCell search, boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException { FileSystem fs = getFileSystem(); IOException ioe = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 2381458a48bb..51927799f289 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; @@ -3145,18 +3146,18 @@ public void delete(Delete delete) throws IOException { *

* Caller should have the row and region locks. */ - private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, + private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, byte[] byteNow) throws IOException { - for (Map.Entry> e : familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { byte[] family = e.getKey(); - List cells = e.getValue(); + List cells = e.getValue(); assert cells instanceof RandomAccess; Map kvCount = new TreeMap<>(Bytes.BYTES_COMPARATOR); int listSize = cells.size(); for (int i = 0; i < listSize; i++) { - Cell cell = cells.get(i); + ExtendedCell cell = cells.get(i); // Check if time is LATEST, change to time of most recent addition if so // This is expensive. if ( @@ -3242,7 +3243,7 @@ private abstract static class BatchOperation { protected final OperationStatus[] retCodeDetails; protected final WALEdit[] walEditsFromCoprocessors; // reference family cell maps directly so coprocessors can mutate them if desired - protected final Map>[] familyCellMaps; + protected final Map>[] familyCellMaps; // For Increment/Append operations protected final Result[] results; @@ -3411,7 +3412,9 @@ protected void checkAndPrepareMutation(int index, long timestamp) throws IOExcep if (mutation instanceof Put || mutation instanceof Delete) { // store the family map reference to allow for mutations - familyCellMaps[index] = mutation.getFamilyCellMap(); + // we know that in mutation, only ExtendedCells are allow so here we do a fake cast, to + // simplify later logic + familyCellMaps[index] = (Map) mutation.getFamilyCellMap(); } // store durability for the batch (highest durability of all operations in the batch) @@ -3602,7 +3605,7 @@ public boolean visit(int index) throws IOException { walEdit.add(cell); } } - walEdit.add(familyCellMaps[index]); + walEdit.add((Map) familyCellMaps[index]); return true; } @@ -3663,11 +3666,11 @@ public void doPostOpCleanupForMiniBatch( * also does not check the families for validity. * @param familyMap Map of Cells by family */ - protected void applyFamilyMapToMemStore(Map> familyMap, + protected void applyFamilyMapToMemStore(Map> familyMap, MemStoreSizing memstoreAccounting) { - for (Map.Entry> e : familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { byte[] family = e.getKey(); - List cells = e.getValue(); + List cells = e.getValue(); assert cells instanceof RandomAccess; region.applyToMemStore(region.getStore(family), cells, false, memstoreAccounting); } @@ -3844,7 +3847,7 @@ public void prepareMiniBatchOperations(MiniBatchOperationInProgress mi return true; } - List results = returnResults ? new ArrayList<>(mutation.size()) : null; + List results = returnResults ? new ArrayList<>(mutation.size()) : null; familyCellMaps[index] = reckonDeltas(mutation, results, timestamp); this.results[index] = results != null ? Result.create(results) : Result.EMPTY_RESULT; @@ -3934,19 +3937,19 @@ private static Get toGet(final Mutation mutation) throws IOException { return get; } - private Map> reckonDeltas(Mutation mutation, List results, long now) - throws IOException { + private Map> reckonDeltas(Mutation mutation, + List results, long now) throws IOException { assert mutation instanceof Increment || mutation instanceof Append; - Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); + Map> ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); // Process a Store/family at a time. for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { final byte[] columnFamilyName = entry.getKey(); - List deltas = entry.getValue(); + List deltas = (List) entry.getValue(); // Reckon for the Store what to apply to WAL and MemStore. - List toApply = + List toApply = reckonDeltasByStore(region.stores.get(columnFamilyName), mutation, now, deltas, results); if (!toApply.isEmpty()) { - for (Cell cell : toApply) { + for (ExtendedCell cell : toApply) { HStore store = region.getStore(cell); if (store == null) { region.checkFamily(CellUtil.cloneFamily(cell)); @@ -3971,11 +3974,11 @@ private Map> reckonDeltas(Mutation mutation, List resul * @return Resulting Cells after deltas have been applied to current values. Side * effect is our filling out of the results List. */ - private List reckonDeltasByStore(HStore store, Mutation mutation, long now, - List deltas, List results) throws IOException { + private List reckonDeltasByStore(HStore store, Mutation mutation, long now, + List deltas, List results) throws IOException { assert mutation instanceof Increment || mutation instanceof Append; byte[] columnFamily = store.getColumnFamilyDescriptor().getName(); - List> cellPairs = new ArrayList<>(deltas.size()); + List> cellPairs = new ArrayList<>(deltas.size()); // Sort the cells so that they match the order that they appear in the Get results. // Otherwise, we won't be able to find the existing values if the cells are not specified @@ -3984,7 +3987,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now // Get previous values for all columns in this family. Get get = new Get(mutation.getRow()); - for (Cell cell : deltas) { + for (ExtendedCell cell : deltas) { get.addColumn(columnFamily, CellUtil.cloneQualifier(cell)); } TimeRange tr; @@ -4001,14 +4004,14 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now try (RegionScanner scanner = region.getScanner(new Scan(get))) { // NOTE: Please don't use HRegion.get() instead, // because it will copy cells to heap. See HBASE-26036 - List currentValues = new ArrayList<>(); - scanner.next(currentValues); + List currentValues = new ArrayList<>(); + scanner.next((List) currentValues); // Iterate the input columns and update existing values if they were found, otherwise // add new column initialized to the delta amount int currentValuesIndex = 0; for (int i = 0; i < deltas.size(); i++) { - Cell delta = deltas.get(i); - Cell currentValue = null; + ExtendedCell delta = deltas.get(i); + ExtendedCell currentValue = null; if ( currentValuesIndex < currentValues.size() && CellUtil.matchingQualifier(currentValues.get(currentValuesIndex), delta) @@ -4019,7 +4022,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now } } // Switch on whether this an increment or an append building the new Cell to apply. - Cell newCell; + ExtendedCell newCell; if (mutation instanceof Increment) { long deltaAmount = getLongValue(delta); final long newValue = @@ -4053,14 +4056,14 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now if (region.coprocessorHost != null) { // Here the operation must be increment or append. cellPairs = mutation instanceof Increment - ? region.coprocessorHost.postIncrementBeforeWAL(mutation, cellPairs) - : region.coprocessorHost.postAppendBeforeWAL(mutation, cellPairs); + ? region.coprocessorHost.postIncrementBeforeWAL(mutation, (List) cellPairs) + : region.coprocessorHost.postAppendBeforeWAL(mutation, (List) cellPairs); } } return cellPairs.stream().map(Pair::getSecond).collect(Collectors.toList()); } - private static Cell reckonDelta(final Cell delta, final Cell currentCell, + private static ExtendedCell reckonDelta(final Cell delta, final Cell currentCell, final byte[] columnFamily, final long now, Mutation mutation, Function supplier) throws IOException { // Forward any tags found on the delta. @@ -4080,7 +4083,11 @@ private static Cell reckonDelta(final Cell delta, final Cell currentCell, } else { tags = TagUtil.carryForwardTTLTag(tags, mutation.getTTL()); PrivateCellUtil.updateLatestStamp(delta, now); - return CollectionUtils.isEmpty(tags) ? delta : PrivateCellUtil.createCell(delta, tags); + assert delta instanceof ExtendedCell; + ExtendedCell deltaCell = (ExtendedCell) delta; + return CollectionUtils.isEmpty(tags) + ? deltaCell + : PrivateCellUtil.createCell(deltaCell, tags); } } @@ -4295,7 +4302,7 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> cpFamilyMap = cpMutation.getFamilyCellMap(); + Map> cpFamilyMap = (Map) cpMutation.getFamilyCellMap(); region.rewriteCellTags(cpFamilyMap, mutation); // will get added to the memStore later mergeFamilyMaps(familyCellMaps[i], cpFamilyMap); @@ -4304,7 +4311,7 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress cells : cpFamilyMap.values()) { + for (List cells : cpFamilyMap.values()) { miniBatchOp.addCellCount(cells.size()); } } @@ -4313,10 +4320,10 @@ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress> familyMap, - Map> toBeMerged) { - for (Map.Entry> entry : toBeMerged.entrySet()) { - List cells = familyMap.get(entry.getKey()); + private void mergeFamilyMaps(Map> familyMap, + Map> toBeMerged) { + for (Map.Entry> entry : toBeMerged.entrySet()) { + List cells = familyMap.get(entry.getKey()); if (cells == null) { familyMap.put(entry.getKey(), entry.getValue()); } else { @@ -4852,14 +4859,14 @@ private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutat byte[] byteTs = Bytes.toBytes(ts); if (mutation != null) { if (mutation instanceof Put) { - updateCellTimestamps(mutation.getFamilyCellMap().values(), byteTs); + updateCellTimestamps((Iterable) mutation.getFamilyCellMap().values(), byteTs); } // And else 'delete' is not needed since it already does a second get, and sets the // timestamp from get (see prepareDeleteTimestamps). } else { for (Mutation m : rowMutations.getMutations()) { if (m instanceof Put) { - updateCellTimestamps(m.getFamilyCellMap().values(), byteTs); + updateCellTimestamps((Iterable) m.getFamilyCellMap().values(), byteTs); } } // And else 'delete' is not needed since it already does a second get, and sets the @@ -4970,12 +4977,14 @@ public void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare manifest.addRegion(this); } - private void updateSequenceId(final Iterable> cellItr, final long sequenceId) + private void updateSequenceId(final Iterable> cellItr, final long sequenceId) throws IOException { - for (List cells : cellItr) { - if (cells == null) return; - for (Cell cell : cells) { - PrivateCellUtil.setSequenceId(cell, sequenceId); + for (List cells : cellItr) { + if (cells == null) { + return; + } + for (ExtendedCell cell : cells) { + cell.setSequenceId(sequenceId); } } } @@ -4984,10 +4993,12 @@ private void updateSequenceId(final Iterable> cellItr, final long seq * Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP} * provided current timestamp. */ - private static void updateCellTimestamps(final Iterable> cellItr, final byte[] now) - throws IOException { - for (List cells : cellItr) { - if (cells == null) continue; + private static void updateCellTimestamps(final Iterable> cellItr, + final byte[] now) throws IOException { + for (List cells : cellItr) { + if (cells == null) { + continue; + } // Optimization: 'foreach' loop is not used. See: // HBASE-12023 HRegion.applyFamilyMapToMemstore creates too many iterator objects assert cells instanceof RandomAccess; @@ -5001,7 +5012,7 @@ private static void updateCellTimestamps(final Iterable> cellItr, fin /** * Possibly rewrite incoming cell tags. */ - private void rewriteCellTags(Map> familyMap, final Mutation m) { + private void rewriteCellTags(Map> familyMap, final Mutation m) { // Check if we have any work to do and early out otherwise // Update these checks as more logic is added here if (m.getTTL() == Long.MAX_VALUE) { @@ -5009,12 +5020,12 @@ private void rewriteCellTags(Map> familyMap, final Mutation m } // From this point we know we have some work to do - for (Map.Entry> e : familyMap.entrySet()) { - List cells = e.getValue(); + for (Map.Entry> e : familyMap.entrySet()) { + List cells = e.getValue(); assert cells instanceof RandomAccess; int listSize = cells.size(); for (int i = 0; i < listSize; i++) { - Cell cell = cells.get(i); + ExtendedCell cell = cells.get(i); List newTags = TagUtil.carryForwardTags(null, cell); newTags = TagUtil.carryForwardTTLTag(newTags, m.getTTL()); // Rewrite the cell with the updated set of tags @@ -5085,7 +5096,7 @@ public void setReadsEnabled(boolean readsEnabled) { * scenario but that do not make sense otherwise. * @see #applyToMemStore(HStore, Cell, MemStoreSizing) */ - private void applyToMemStore(HStore store, List cells, boolean delta, + private void applyToMemStore(HStore store, List cells, boolean delta, MemStoreSizing memstoreAccounting) { // Any change in how we update Store/MemStore needs to also be done in other applyToMemStore!!!! boolean upsert = delta && store.getColumnFamilyDescriptor().getMaxVersions() == 1; @@ -5099,7 +5110,7 @@ private void applyToMemStore(HStore store, List cells, boolean delta, /** * @see #applyToMemStore(HStore, List, boolean, MemStoreSizing) */ - private void applyToMemStore(HStore store, Cell cell, MemStoreSizing memstoreAccounting) + private void applyToMemStore(HStore store, ExtendedCell cell, MemStoreSizing memstoreAccounting) throws IOException { // Any change in how we update Store/MemStore needs to also be done in other applyToMemStore!!!! if (store == null) { @@ -5437,7 +5448,9 @@ private long replayRecoveredEdits(final Path edits, Map maxSeqIdIn boolean flush = false; MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); - for (Cell cell : val.getCells()) { + for (Cell c : val.getCells()) { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; // Check this edit is for me. Also, guard against writing the special // METACOLUMN info such as HBASE::CACHEFLUSH entries if (WALEdit.isMetaEditFamily(cell)) { @@ -6412,7 +6425,7 @@ private void checkTargetRegion(byte[] encodedRegionName, String exceptionMsg, Ob * @param s Store to add edit too. * @param cell Cell to add. */ - protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreAccounting) { + protected void restoreEdit(HStore s, ExtendedCell cell, MemStoreSizing memstoreAccounting) { s.add(cell, memstoreAccounting); } @@ -7690,7 +7703,7 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, lon // Handle any tag based cell features. // TODO: Do we need to call rewriteCellTags down in applyToMemStore()? Why not before // so tags go into WAL? - rewriteCellTags(m.getFamilyCellMap(), m); + rewriteCellTags((Map) m.getFamilyCellMap(), m); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); if (walEdit.isEmpty()) { @@ -7698,7 +7711,7 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, lon // If no WAL, need to stamp it here. PrivateCellUtil.setSequenceId(cell, sequenceId); } - applyToMemStore(getStore(cell), cell, memstoreAccounting); + applyToMemStore(getStore(cell), (ExtendedCell) cell, memstoreAccounting); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 6fccccfc8203..e4deae852e5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -693,8 +694,8 @@ public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte f.initReader(); try { Cell splitKey = PrivateCellUtil.createFirstOnRow(splitRow); - Optional lastKey = f.getLastKey(); - Optional firstKey = f.getFirstKey(); + Optional lastKey = f.getLastKey(); + Optional firstKey = f.getFirstKey(); if (top) { // check if larger than last key. // If lastKey is null means storefile is empty. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 1df8d0b95807..85882b7ee5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.InnerStoreCellComparator; import org.apache.hadoop.hbase.MemoryCompactionPolicy; @@ -554,7 +555,7 @@ public void stopReplayingFromWAL() { /** * Adds a value to the memstore */ - public void add(final Cell cell, MemStoreSizing memstoreSizing) { + public void add(final ExtendedCell cell, MemStoreSizing memstoreSizing) { storeEngine.readLock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { @@ -571,7 +572,7 @@ public void add(final Cell cell, MemStoreSizing memstoreSizing) { /** * Adds the specified value to the memstore */ - public void add(final Iterable cells, MemStoreSizing memstoreSizing) { + public void add(final Iterable cells, MemStoreSizing memstoreSizing) { storeEngine.readLock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { @@ -615,7 +616,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); - Optional lk = reader.getLastKey(); + Optional lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get()); @@ -868,7 +869,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); - Optional lk = reader.getLastKey(); + Optional lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get()); if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { @@ -1912,7 +1913,7 @@ public long getSmallestReadPoint() { * across all of them. * @param readpoint readpoint below which we can safely remove duplicate KVs */ - public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { + public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing) { this.storeEngine.readLock(); try { this.memstore.upsert(cells, readpoint, memstoreSizing); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index 14627ebc9389..3c486bb13668 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -32,8 +32,8 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.io.TimeRange; @@ -158,9 +158,9 @@ public class HStoreFile implements StoreFile { private long maxMemstoreTS = -1; // firstKey, lastkey and cellComparator will be set when openReader. - private Optional firstKey; + private Optional firstKey; - private Optional lastKey; + private Optional lastKey; private CellComparator comparator; @@ -169,12 +169,12 @@ public CacheConfig getCacheConf() { } @Override - public Optional getFirstKey() { + public Optional getFirstKey() { return firstKey; } @Override - public Optional getLastKey() { + public Optional getLastKey() { return lastKey; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java index a16c2cca034f..da40c7c49b7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java @@ -20,14 +20,14 @@ import com.google.errorprone.annotations.RestrictedApi; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.yetus.audience.InterfaceAudience; /** * A MemStoreLAB implementation which wraps N MemStoreLABs. Its main duty is in proper managing the * close of the individual MemStoreLAB. This is treated as an immutable one and so do not allow to - * add any more Cells into it. {@link #copyCellInto(Cell)} throws Exception + * add any more Cells into it. {@link #copyCellInto(ExtendedCell)} throws Exception */ @InterfaceAudience.Private public class ImmutableMemStoreLAB implements MemStoreLAB { @@ -45,7 +45,7 @@ public ImmutableMemStoreLAB(List mslabs) { } @Override - public Cell copyCellInto(Cell cell) { + public ExtendedCell copyCellInto(ExtendedCell cell) { throw new IllegalStateException("This is an Immutable MemStoreLAB."); } @@ -58,7 +58,7 @@ public Cell copyCellInto(Cell cell) { * data, or null when this cell cannt be copied. */ @Override - public Cell forceCopyOfBigCellInto(Cell cell) { + public ExtendedCell forceCopyOfBigCellInto(ExtendedCell cell) { MemStoreLAB mslab = this.mslabs.get(0); return mslab.forceCopyOfBigCellInto(cell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 9f42e7ce2ad4..5fbb680edcd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -25,6 +25,7 @@ import java.util.function.IntConsumer; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -94,7 +95,7 @@ public KeyValueHeap(List scanners, CellComparator com } @Override - public Cell peek() { + public ExtendedCell peek() { if (this.current == null) { return null; } @@ -111,12 +112,12 @@ public void recordBlockSize(IntConsumer blockSizeConsumer) { } @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { if (this.current == null) { return null; } - Cell kvReturn = this.current.next(); - Cell kvNext = this.current.peek(); + ExtendedCell kvReturn = this.current.next(); + ExtendedCell kvNext = this.current.peek(); if (kvNext == null) { this.scannersForDelayedClose.add(this.current); this.current = null; @@ -235,24 +236,25 @@ public void close() { * As individual scanners may run past their ends, those scanners are automatically closed and * removed from the heap. *

- * This function (and {@link #reseek(Cell)}) does not do multi-column Bloom filter and lazy-seek - * optimizations. To enable those, call {@link #requestSeek(Cell, boolean, boolean)}. + * This function (and {@link #reseek(ExtendedCell)}) does not do multi-column Bloom filter and + * lazy-seek optimizations. To enable those, call + * {@link #requestSeek(ExtendedCell, boolean, boolean)}. * @param seekKey KeyValue to seek at or after * @return true if KeyValues exist at or after specified key, false if not */ @Override - public boolean seek(Cell seekKey) throws IOException { + public boolean seek(ExtendedCell seekKey) throws IOException { return generalizedSeek(false, // This is not a lazy seek seekKey, false, // forward (false: this is not a reseek) false); // Not using Bloom filters } /** - * This function is identical to the {@link #seek(Cell)} function except that + * This function is identical to the {@link #seek(ExtendedCell)} function except that * scanner.seek(seekKey) is changed to scanner.reseek(seekKey). */ @Override - public boolean reseek(Cell seekKey) throws IOException { + public boolean reseek(ExtendedCell seekKey) throws IOException { return generalizedSeek(false, // This is not a lazy seek seekKey, true, // forward (true because this is reseek) false); // Not using Bloom filters @@ -262,7 +264,8 @@ public boolean reseek(Cell seekKey) throws IOException { * {@inheritDoc} */ @Override - public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell key, boolean forward, boolean useBloom) + throws IOException { return generalizedSeek(true, key, forward, useBloom); } @@ -274,8 +277,8 @@ public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws I * @param forward whether to seek forward (also known as reseek) * @param useBloom whether to optimize seeks using Bloom filters */ - private boolean generalizedSeek(boolean isLazy, Cell seekKey, boolean forward, boolean useBloom) - throws IOException { + private boolean generalizedSeek(boolean isLazy, ExtendedCell seekKey, boolean forward, + boolean useBloom) throws IOException { if (!isLazy && useBloom) { throw new IllegalArgumentException( "Multi-column Bloom filter " + "optimization requires a lazy seek"); @@ -406,7 +409,7 @@ KeyValueScanner getCurrentForTesting() { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { // here we return the next index key from the top scanner return current == null ? null : current.getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index c3b60792fb65..bfe47772f1aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.function.IntConsumer; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -38,27 +38,27 @@ public interface KeyValueScanner extends Shipper, Closeable { * The byte array represents for NO_NEXT_INDEXED_KEY; The actual value is irrelevant because this * is always compared by reference. */ - public static final Cell NO_NEXT_INDEXED_KEY = new KeyValue(); + public static final ExtendedCell NO_NEXT_INDEXED_KEY = new KeyValue(); /** * Look at the next Cell in this scanner, but do not iterate scanner. NOTICE: The returned cell * has not been passed into ScanQueryMatcher. So it may not be what the user need. * @return the next Cell */ - Cell peek(); + ExtendedCell peek(); /** * Return the next Cell in this scanner, iterating the scanner * @return the next Cell */ - Cell next() throws IOException; + ExtendedCell next() throws IOException; /** * Seek the scanner at or after the specified KeyValue. * @param key seek value * @return true if scanner has values left, false if end of scanner */ - boolean seek(Cell key) throws IOException; + boolean seek(ExtendedCell key) throws IOException; /** * Reseek the scanner at or after the specified KeyValue. This method is guaranteed to seek at or @@ -67,7 +67,7 @@ public interface KeyValueScanner extends Shipper, Closeable { * @param key seek value (should be non-null) * @return true if scanner has values left, false if end of scanner */ - boolean reseek(Cell key) throws IOException; + boolean reseek(ExtendedCell key) throws IOException; /** * Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners. This is @@ -105,7 +105,7 @@ default long getScannerOrder() { * @param forward do a forward-only "reseek" instead of a random-access seek * @param useBloom whether to enable multi-column Bloom filter optimization */ - boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException; + boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) throws IOException; /** * We optimize our store scanners by checking the most recent store file first, so we sometimes @@ -148,7 +148,7 @@ default long getScannerOrder() { * @param key seek KeyValue * @return true if the scanner is at the valid KeyValue, false if such KeyValue does not exist */ - public boolean backwardSeek(Cell key) throws IOException; + public boolean backwardSeek(ExtendedCell key) throws IOException; /** * Seek the scanner at the first Cell of the row which is the previous row of specified key @@ -156,7 +156,7 @@ default long getScannerOrder() { * @return true if the scanner at the first valid Cell of previous row, false if not existing such * Cell */ - public boolean seekToPreviousRow(Cell key) throws IOException; + public boolean seekToPreviousRow(ExtendedCell key) throws IOException; /** * Seek the scanner at the first KeyValue of last row @@ -169,5 +169,5 @@ default long getScannerOrder() { * between last key of current block and first key of next block.. see * HFileWriterImpl#getMidpoint, or null if not known. */ - public Cell getNextIndexedKey(); + public ExtendedCell getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index cd8eecd54301..ed8e6a2cd8ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.yetus.audience.InterfaceAudience; @@ -65,14 +66,14 @@ public interface MemStore extends Closeable { * @param memstoreSizing The delta in memstore size will be passed back via this. This will * include both data size and heap overhead delta. */ - void add(final Cell cell, MemStoreSizing memstoreSizing); + void add(final ExtendedCell cell, MemStoreSizing memstoreSizing); /** * Write the updates * @param memstoreSizing The delta in memstore size will be passed back via this. This will * include both data size and heap overhead delta. */ - void add(Iterable cells, MemStoreSizing memstoreSizing); + void add(Iterable cells, MemStoreSizing memstoreSizing); /** Returns Oldest timestamp of all the Cells in the MemStore */ long timeOfOldestEdit(); @@ -92,7 +93,7 @@ public interface MemStore extends Closeable { * @param memstoreSizing The delta in memstore size will be passed back via this. This will * include both data size and heap overhead delta. */ - void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing); + void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing); /** * @return scanner over the memstore. This might include scanner over the snapshot when one is diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java index 11955ac3c6f3..505c419005a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java @@ -22,8 +22,8 @@ import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.yetus.audience.InterfaceAudience; @@ -41,9 +41,9 @@ public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator private static final Logger LOG = LoggerFactory.getLogger(MemStoreCompactorSegmentsIterator.class); - private final List kvs = new ArrayList<>(); + private final List kvs = new ArrayList<>(); private boolean hasMore = true; - private Iterator kvsIterator; + private Iterator kvsIterator; // scanner on top of pipeline scanner that uses ScanQueryMatcher private InternalScanner compactingScanner; @@ -71,7 +71,7 @@ public boolean hasNext() { } @Override - public Cell next() { + public ExtendedCell next() { if (!hasNext()) { throw new NoSuchElementException(); } @@ -132,7 +132,7 @@ private InternalScanner createScanner(HStore store, List scanne } } - /* + /** * Refill kev-value set (should be invoked only when KVS is empty) Returns true if KVS is * non-empty */ @@ -145,7 +145,10 @@ private boolean refillKVS() { kvs.clear(); for (;;) { try { - hasMore = compactingScanner.next(kvs, scannerContext); + // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but + // all the server side implementation should only add ExtendedCell to the List, otherwise it + // will cause serious assertions in our code + hasMore = compactingScanner.next((List) kvs, scannerContext); } catch (IOException e) { // should not happen as all data are in memory throw new IllegalStateException(e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 4edefaf7ca0d..22fd49a9918f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -38,8 +38,8 @@ * collection occurs. *

* This manages the large sized chunks. When Cells are to be added to Memstore, MemStoreLAB's - * {@link #copyCellInto(Cell)} gets called. This allocates enough size in the chunk to hold this - * cell's data and copies into this area and then recreate a Cell over this copied data. + * {@link #copyCellInto(ExtendedCell)} gets called. This allocates enough size in the chunk to hold + * this cell's data and copies into this area and then recreate a Cell over this copied data. *

* @see ChunkCreator */ @@ -68,7 +68,7 @@ public interface MemStoreLAB { * Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance * over the copied the data. When this MemStoreLAB can not copy this Cell, it returns null. */ - Cell copyCellInto(Cell cell); + ExtendedCell copyCellInto(ExtendedCell cell); /** * Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance @@ -78,7 +78,7 @@ public interface MemStoreLAB { * called while the process of flattening to CellChunkMap is running, for forcing the allocation * of big cells on this MSLAB. */ - Cell forceCopyOfBigCellInto(Cell cell); + ExtendedCell forceCopyOfBigCellInto(ExtendedCell cell); /** * Close instance since it won't be used any more, try to put the chunks back to pool diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java index 41dd270cd22e..52829255df4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.nio.RefCnt; import org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType; import org.apache.yetus.audience.InterfaceAudience; @@ -112,7 +111,7 @@ public MemStoreLABImpl(Configuration conf) { } @Override - public Cell copyCellInto(Cell cell) { + public ExtendedCell copyCellInto(ExtendedCell cell) { // See head of copyBBECellInto for how it differs from copyCellInto return (cell instanceof ByteBufferExtendedCell) ? copyBBECellInto((ByteBufferExtendedCell) cell, maxAlloc) @@ -125,7 +124,7 @@ public Cell copyCellInto(Cell cell) { * MSLAB, during this process, the big cells are copied into MSLAB using this method. */ @Override - public Cell forceCopyOfBigCellInto(Cell cell) { + public ExtendedCell forceCopyOfBigCellInto(ExtendedCell cell) { int size = Segment.getCellLength(cell); Preconditions.checkArgument(size >= 0, "negative size"); if (size + ChunkCreator.SIZEOF_CHUNK_HEADER <= dataChunkSize) { @@ -145,7 +144,7 @@ public Cell forceCopyOfBigCellInto(Cell cell) { * it was too big. Uses less CPU. See HBASE-20875 for evidence. * @see #copyCellInto(Cell, int) */ - private Cell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) { + private ExtendedCell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) { int size = cell.getSerializedSize(); Preconditions.checkArgument(size >= 0, "negative size"); // Callers should satisfy large allocations from JVM heap so limit fragmentation. @@ -179,7 +178,7 @@ private Cell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) { /** * @see #copyBBECellInto(ByteBufferExtendedCell, int) */ - private Cell copyCellInto(Cell cell, int maxAlloc) { + private ExtendedCell copyCellInto(ExtendedCell cell, int maxAlloc) { int size = Segment.getCellLength(cell); Preconditions.checkArgument(size >= 0, "negative size"); // Callers should satisfy large allocations directly from JVM since they @@ -216,16 +215,10 @@ private Cell copyCellInto(Cell cell, int maxAlloc) { * out of it * @see #copyBBECToChunkCell(ByteBufferExtendedCell, ByteBuffer, int, int) */ - private static Cell copyToChunkCell(Cell cell, ByteBuffer buf, int offset, int len) { + private static ExtendedCell copyToChunkCell(ExtendedCell cell, ByteBuffer buf, int offset, + int len) { int tagsLen = cell.getTagsLength(); - if (cell instanceof ExtendedCell) { - ((ExtendedCell) cell).write(buf, offset); - } else { - // Normally all Cell impls within Server will be of type ExtendedCell. Just considering the - // other case also. The data fragments within Cell is copied into buf as in KeyValue - // serialization format only. - KeyValueUtil.appendTo(cell, buf, offset, true); - } + cell.write(buf, offset); return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId()); } @@ -234,14 +227,14 @@ private static Cell copyToChunkCell(Cell cell, ByteBuffer buf, int offset, int l * out of it * @see #copyToChunkCell(Cell, ByteBuffer, int, int) */ - private static Cell copyBBECToChunkCell(ByteBufferExtendedCell cell, ByteBuffer buf, int offset, - int len) { + private static ExtendedCell copyBBECToChunkCell(ByteBufferExtendedCell cell, ByteBuffer buf, + int offset, int len) { int tagsLen = cell.getTagsLength(); cell.write(buf, offset); return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId()); } - private static Cell createChunkCell(ByteBuffer buf, int offset, int len, int tagsLen, + private static ExtendedCell createChunkCell(ByteBuffer buf, int offset, int len, int tagsLen, long sequenceId) { // TODO : write the seqid here. For writing seqId we should create a new cell type so // that seqId is not used as the state diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java index 0a91ada9e07e..1cb8c717c4c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -61,7 +61,7 @@ public boolean hasNext() { } @Override - public Cell next() { + public ExtendedCell next() { try { // try to get next if (!closed && heap != null) { return heap.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java index 4657b060a8d0..14738d7a10a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.Iterator; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -28,7 +28,7 @@ * not thread-safe and must have only one instance per MemStore in each period of time */ @InterfaceAudience.Private -public abstract class MemStoreSegmentsIterator implements Iterator { +public abstract class MemStoreSegmentsIterator implements Iterator { protected final ScannerContext scannerContext; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java index 945bcef05756..9de37c3f40cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobCell; import org.apache.hadoop.hbase.mob.MobUtils; @@ -73,7 +74,8 @@ public boolean next(List outResult, ScannerContext ctx) throws IOException long mobKVCount = 0; long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { - Cell cell = outResult.get(i); + // At server side, we should only get ExtendedCell + ExtendedCell cell = (ExtendedCell) outResult.get(i); if (MobUtils.isMobReferenceCell(cell)) { MobCell mobCell = mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java index 53cb1bcdbc91..6ebd1cf9c9cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java @@ -20,9 +20,9 @@ import java.util.Iterator; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -41,8 +41,8 @@ public class MutableSegment extends Segment { ClassSize.align(Segment.DEEP_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.SYNC_TIMERANGE_TRACKER + ClassSize.REFERENCE + ClassSize.ATOMIC_BOOLEAN); - protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, - MemStoreSizing memstoreSizing) { + protected MutableSegment(CellSet cellSet, CellComparator comparator, + MemStoreLAB memStoreLAB, MemStoreSizing memstoreSizing) { super(cellSet, comparator, memStoreLAB, TimeRangeTracker.create(TimeRangeTracker.Type.SYNC)); incMemStoreSize(0, DEEP_OVERHEAD, 0, 0); // update the mutable segment metadata if (memstoreSizing != null) { @@ -55,24 +55,25 @@ protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB * @param cell the cell to add * @param mslabUsed whether using MSLAB */ - public void add(Cell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, + public void add(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) { internalAdd(cell, mslabUsed, memStoreSizing, sizeAddedPreOperation); } - public void upsert(Cell cell, long readpoint, MemStoreSizing memStoreSizing, + public void upsert(ExtendedCell cell, long readpoint, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) { internalAdd(cell, false, memStoreSizing, sizeAddedPreOperation); // Get the Cells for the row/family/qualifier regardless of timestamp. // For this case we want to clean up any other puts - Cell firstCell = PrivateCellUtil.createFirstOnRowColTS(cell, HConstants.LATEST_TIMESTAMP); - SortedSet ss = this.tailSet(firstCell); - Iterator it = ss.iterator(); + ExtendedCell firstCell = + PrivateCellUtil.createFirstOnRowColTS(cell, HConstants.LATEST_TIMESTAMP); + SortedSet ss = this.tailSet(firstCell); + Iterator it = ss.iterator(); // versions visible to oldest scanner int versionsVisible = 0; while (it.hasNext()) { - Cell cur = it.next(); + ExtendedCell cur = it.next(); if (cell == cur) { // ignore the one just put in @@ -118,7 +119,7 @@ public boolean setInMemoryFlushed() { * Returns the first cell in the segment * @return the first cell in the segment */ - Cell first() { + ExtendedCell first() { return this.getCellSet().first(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java index 8f1898a3c658..f55bbcc639de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java @@ -21,7 +21,7 @@ import java.util.function.IntConsumer; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -33,7 +33,8 @@ public abstract class NonLazyKeyValueScanner implements KeyValueScanner { @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) + throws IOException { return doRealSeek(this, kv, forward); } @@ -47,7 +48,7 @@ public void enforceSeek() throws IOException { throw new NotImplementedException("enforceSeek must not be called on a " + "non-lazy scanner"); } - public static boolean doRealSeek(KeyValueScanner scanner, Cell kv, boolean forward) + public static boolean doRealSeek(KeyValueScanner scanner, ExtendedCell kv, boolean forward) throws IOException { return forward ? scanner.reseek(kv) : scanner.seek(kv); } @@ -76,7 +77,7 @@ public Path getFilePath() { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java index 2c2cf26e9fa7..e5eebd85b547 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java @@ -19,7 +19,7 @@ import java.io.IOException; import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -30,13 +30,13 @@ public abstract class NonReversedNonLazyKeyValueScanner extends NonLazyKeyValueScanner { @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { throw new NotImplementedException( "backwardSeek must not be called on a " + "non-reversed scanner"); } @Override - public boolean seekToPreviousRow(Cell key) throws IOException { + public boolean seekToPreviousRow(ExtendedCell key) throws IOException { throw new NotImplementedException( "seekToPreviousRow must not be called on a " + "non-reversed scanner"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java index d829b1961070..a5fc2947bee0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -441,7 +442,7 @@ private boolean nextInternal(List results, ScannerContext scannerContext) region.checkInterrupt(); // Let's see what we have in the storeHeap. - Cell current = this.storeHeap.peek(); + ExtendedCell current = this.storeHeap.peek(); boolean shouldStop = shouldStop(current); // When has filter row is true it means that the all the cells for a particular row must be @@ -651,7 +652,7 @@ private void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { } /** Returns true when the joined heap may have data for the current row */ - private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { + private boolean joinedHeapMayHaveData(ExtendedCell currentRowCell) throws IOException { Cell nextJoinedKv = joinedHeap.peek(); boolean matchCurrentRow = nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); @@ -660,7 +661,7 @@ private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { // If the next value in the joined heap does not match the current row, try to seek to the // correct row if (!matchCurrentRow) { - Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); + ExtendedCell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); @@ -776,7 +777,7 @@ public synchronized boolean reseek(byte[] row) throws IOException { } boolean result = false; region.startRegionOperation(); - Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); + ExtendedCell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); try { // use request seek to make use of the lazy seek option. See HBASE-5520 result = this.storeHeap.requestSeek(kv, true, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java index 60c634578aa7..522aac80aca8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; @@ -43,22 +44,23 @@ public ReversedKeyValueHeap(List scanners, CellCompar } @Override - public boolean seek(Cell seekKey) throws IOException { + public boolean seek(ExtendedCell seekKey) throws IOException { throw new IllegalStateException("seek cannot be called on ReversedKeyValueHeap"); } @Override - public boolean reseek(Cell seekKey) throws IOException { + public boolean reseek(ExtendedCell seekKey) throws IOException { throw new IllegalStateException("reseek cannot be called on ReversedKeyValueHeap"); } @Override - public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell key, boolean forward, boolean useBloom) + throws IOException { throw new IllegalStateException("requestSeek cannot be called on ReversedKeyValueHeap"); } @Override - public boolean seekToPreviousRow(Cell seekKey) throws IOException { + public boolean seekToPreviousRow(ExtendedCell seekKey) throws IOException { if (current == null) { return false; } @@ -87,7 +89,7 @@ public boolean seekToPreviousRow(Cell seekKey) throws IOException { } @Override - public boolean backwardSeek(Cell seekKey) throws IOException { + public boolean backwardSeek(ExtendedCell seekKey) throws IOException { if (current == null) { return false; } @@ -116,12 +118,12 @@ public boolean backwardSeek(Cell seekKey) throws IOException { } @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { if (this.current == null) { return null; } - Cell kvReturn = this.current.next(); - Cell kvNext = this.current.peek(); + ExtendedCell kvReturn = this.current.next(); + ExtendedCell kvNext = this.current.peek(); if (kvNext == null || this.comparator.kvComparator.compareRows(kvNext, kvReturn) > 0) { if (this.current.seekToPreviousRow(kvReturn)) { this.heap.add(this.current); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java index 7863493e3282..505cd5dedcee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobCell; import org.apache.hadoop.hbase.mob.MobUtils; @@ -73,9 +74,10 @@ public boolean next(List outResult, ScannerContext ctx) throws IOException long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { Cell cell = outResult.get(i); + assert cell instanceof ExtendedCell; if (MobUtils.isMobReferenceCell(cell)) { - MobCell mobCell = - mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); + MobCell mobCell = mobStore.resolve((ExtendedCell) cell, cacheMobBlocks, readPt, + readEmptyValueOnMobCellMiss); mobKVCount++; mobKVSize += mobCell.getCell().getValueLength(); outResult.set(i, mobCell.getCell()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java index d0ea2e08d173..e9eeba2b6f4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -58,7 +59,7 @@ protected KeyValueHeap newKVHeap(List scanners, } @Override - protected void seekScanners(List scanners, Cell seekKey, + protected void seekScanners(List scanners, ExtendedCell seekKey, boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the previous matching Row). @@ -74,7 +75,7 @@ protected void seekScanners(List scanners, Cell seekK } @Override - protected boolean seekToNextRow(Cell kv) throws IOException { + protected boolean seekToNextRow(ExtendedCell kv) throws IOException { return seekToPreviousRow(kv); } @@ -82,7 +83,7 @@ protected boolean seekToNextRow(Cell kv) throws IOException { * Do a backwardSeek in a reversed StoreScanner(scan backward) */ @Override - protected boolean seekAsDirection(Cell kv) throws IOException { + protected boolean seekAsDirection(ExtendedCell kv) throws IOException { return backwardSeek(kv); } @@ -98,17 +99,17 @@ protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) } @Override - public boolean reseek(Cell kv) throws IOException { + public boolean reseek(ExtendedCell kv) throws IOException { throw new IllegalStateException("reseek cannot be called on ReversedStoreScanner"); } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { throw new IllegalStateException("seek cannot be called on ReversedStoreScanner"); } @Override - public boolean seekToPreviousRow(Cell key) throws IOException { + public boolean seekToPreviousRow(ExtendedCell key) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } @@ -116,7 +117,7 @@ public boolean seekToPreviousRow(Cell key) throws IOException { } @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 272dd4069629..b5e6192a0c32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; @@ -50,7 +51,7 @@ public abstract class Segment implements MemStoreSizing { public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE + ClassSize.CELL_SET + 2 * ClassSize.ATOMIC_LONG + ClassSize.REENTRANT_LOCK; - private AtomicReference cellSet = new AtomicReference<>(); + private AtomicReference> cellSet = new AtomicReference<>(); private final CellComparator comparator; private ReentrantReadWriteLock updatesLock; protected long minSequenceId; @@ -93,8 +94,8 @@ protected Segment(CellComparator comparator, List segments, } // This constructor is used to create empty Segments. - protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, - TimeRangeTracker trt) { + protected Segment(CellSet cellSet, CellComparator comparator, + MemStoreLAB memStoreLAB, TimeRangeTracker trt) { this.cellSet.set(cellSet); this.comparator = comparator; this.updatesLock = new ReentrantReadWriteLock(); @@ -154,12 +155,12 @@ public void close() { * set to 'true' and the cell is copied into MSLAB. * @return either the given cell or its clone */ - public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) { + public ExtendedCell maybeCloneWithAllocator(ExtendedCell cell, boolean forceCloneOfBigCell) { if (this.memStoreLAB == null) { return cell; } - Cell cellFromMslab; + ExtendedCell cellFromMslab; if (forceCloneOfBigCell) { cellFromMslab = this.memStoreLAB.forceCopyOfBigCellInto(cell); } else { @@ -202,7 +203,7 @@ public void decScannerCount() { * @return this object */ - protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) { + protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) { this.cellSet.compareAndSet(cellSetOld, cellSetNew); return this; } @@ -265,15 +266,15 @@ public TimeRangeTracker getTimeRangeTracker() { } // *** Methods for SegmentsScanner - public Cell last() { + public ExtendedCell last() { return getCellSet().last(); } - public Iterator iterator() { + public Iterator iterator() { return getCellSet().iterator(); } - public SortedSet headSet(Cell firstKeyOnRow) { + public SortedSet headSet(ExtendedCell firstKeyOnRow) { return getCellSet().headSet(firstKeyOnRow); } @@ -286,7 +287,7 @@ public int compareRows(Cell left, Cell right) { } /** Returns a set of all cells in the segment */ - protected CellSet getCellSet() { + protected CellSet getCellSet() { return cellSet.get(); } @@ -298,13 +299,13 @@ protected CellComparator getComparator() { return comparator; } - protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, + protected void internalAdd(ExtendedCell cell, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { boolean succ = getCellSet().add(cell); updateMetaInfo(cell, succ, mslabUsed, memstoreSizing, sizeAddedPreOperation); } - protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, + protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, boolean mslabUsed, MemStoreSizing memstoreSizing, boolean sizeAddedPreOperation) { long delta = 0; long cellSize = getCellLength(cellToAdd); @@ -335,7 +336,8 @@ protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed, } } - protected void updateMetaInfo(Cell cellToAdd, boolean succ, MemStoreSizing memstoreSizing) { + protected void updateMetaInfo(ExtendedCell cellToAdd, boolean succ, + MemStoreSizing memstoreSizing) { updateMetaInfo(cellToAdd, succ, (getMemStoreLAB() != null), memstoreSizing, false); } @@ -396,7 +398,7 @@ protected long indexEntryOffHeapSize(boolean offHeap) { * @param firstCell a cell in the segment * @return a subset of the segment cell set, which starts with the given cell */ - protected SortedSet tailSet(Cell firstCell) { + protected SortedSet tailSet(ExtendedCell firstCell) { return getCellSet().tailSet(firstCell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index a05ac364fc01..f263bf01fe24 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -139,7 +140,7 @@ private ImmutableSegment createImmutableSegment(final Configuration conf, private MutableSegment generateMutableSegment(final Configuration conf, CellComparator comparator, MemStoreLAB memStoreLAB, MemStoreSizing memstoreSizing) { // TBD use configuration to set type of segment - CellSet set = new CellSet(comparator); + CellSet set = new CellSet<>(comparator); return new MutableSegment(set, comparator, memStoreLAB, memstoreSizing); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index 1d28c55570ed..4d380d936f58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -24,6 +24,7 @@ import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.yetus.audience.InterfaceAudience; @@ -40,17 +41,17 @@ public class SegmentScanner implements KeyValueScanner { private long readPoint; // the current iterator that can be reinitialized by // seek(), backwardSeek(), or reseek() - protected Iterator iter; + protected Iterator iter; // the pre-calculated cell to be returned by peek() - protected Cell current = null; + protected ExtendedCell current = null; // or next() // A flag represents whether could stop skipping KeyValues for MVCC // if have encountered the next row. Only used for reversed scan private boolean stopSkippingKVsIfNextRow = false; // Stop skipping KeyValues for MVCC if finish this row. Only used for reversed scan - private Cell stopSkippingKVsRow; + private ExtendedCell stopSkippingKVsRow; // last iterated KVs by seek (to restore the iterator state after reseek) - private Cell last = null; + private ExtendedCell last = null; // flag to indicate if this scanner is closed protected boolean closed = false; @@ -77,7 +78,7 @@ protected SegmentScanner(Segment segment, long readPoint) { * @return the currently observed Cell */ @Override - public Cell peek() { // sanity check, the current should be always valid + public ExtendedCell peek() { // sanity check, the current should be always valid if (closed) { return null; } @@ -93,11 +94,11 @@ public Cell peek() { // sanity check, the current should be always valid * @return the next Cell or null if end of scanner */ @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { if (closed) { return null; } - Cell oldCurrent = current; + ExtendedCell oldCurrent = current; updateCurrent(); // update the currently observed Cell return oldCurrent; } @@ -108,7 +109,7 @@ public Cell next() throws IOException { * @return true if scanner has values left, false if end of scanner */ @Override - public boolean seek(Cell cell) throws IOException { + public boolean seek(ExtendedCell cell) throws IOException { if (closed) { return false; } @@ -124,7 +125,7 @@ public boolean seek(Cell cell) throws IOException { return (current != null); } - protected Iterator getIterator(Cell cell) { + protected Iterator getIterator(ExtendedCell cell) { return segment.tailSet(cell).iterator(); } @@ -136,7 +137,7 @@ protected Iterator getIterator(Cell cell) { * @return true if scanner has values left, false if end of scanner */ @Override - public boolean reseek(Cell cell) throws IOException { + public boolean reseek(ExtendedCell cell) throws IOException { if (closed) { return false; } @@ -161,7 +162,7 @@ public boolean reseek(Cell cell) throws IOException { * @return true if the scanner is at the valid KeyValue, false if such Cell does not exist */ @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { if (closed) { return false; } @@ -179,21 +180,21 @@ public boolean backwardSeek(Cell key) throws IOException { * Cell */ @Override - public boolean seekToPreviousRow(Cell cell) throws IOException { + public boolean seekToPreviousRow(ExtendedCell cell) throws IOException { if (closed) { return false; } boolean keepSeeking; Cell key = cell; do { - Cell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key); - SortedSet cellHead = segment.headSet(firstKeyOnRow); + ExtendedCell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key); + SortedSet cellHead = segment.headSet(firstKeyOnRow); Cell lastCellBeforeRow = cellHead.isEmpty() ? null : cellHead.last(); if (lastCellBeforeRow == null) { current = null; return false; } - Cell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow); + ExtendedCell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow); this.stopSkippingKVsIfNextRow = true; this.stopSkippingKVsRow = firstKeyOnPreviousRow; seek(firstKeyOnPreviousRow); @@ -220,12 +221,12 @@ public boolean seekToLastRow() throws IOException { if (closed) { return false; } - Cell higherCell = segment.isEmpty() ? null : segment.last(); + ExtendedCell higherCell = segment.isEmpty() ? null : segment.last(); if (higherCell == null) { return false; } - Cell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell); + ExtendedCell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell); if (seek(firstCellOnLastRow)) { return true; @@ -258,7 +259,7 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) } @Override - public boolean requestSeek(Cell c, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell c, boolean forward, boolean useBloom) throws IOException { return NonLazyKeyValueScanner.doRealSeek(this, c, forward); } @@ -302,7 +303,7 @@ public Path getFilePath() { * otherwise Not relevant for in-memory scanner */ @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } @@ -334,7 +335,7 @@ private Segment getSegment() { * Private internal method for iterating over the segment, skipping the cells with irrelevant MVCC */ protected void updateCurrent() { - Cell next = null; + ExtendedCell next = null; try { while (iter.hasNext()) { @@ -363,7 +364,7 @@ protected void updateCurrent() { * Private internal method that returns the higher of the two key values, or null if they are both * null */ - private Cell getHighest(Cell first, Cell second) { + private ExtendedCell getHighest(ExtendedCell first, ExtendedCell second) { if (first == null && second == null) { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java index 3109920dffae..fdfcf4cf7d28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Iterator; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.yetus.audience.InterfaceAudience; /** @@ -30,8 +30,8 @@ @InterfaceAudience.Private public class SnapshotSegmentScanner extends NonReversedNonLazyKeyValueScanner { private final ImmutableSegment segment; - private Iterator iter; - private Cell current; + private Iterator iter; + private ExtendedCell current; public SnapshotSegmentScanner(ImmutableSegment segment) { this.segment = segment; @@ -42,18 +42,18 @@ public SnapshotSegmentScanner(ImmutableSegment segment) { } } - private static Iterator createIterator(Segment segment) { + private static Iterator createIterator(Segment segment) { return segment.getCellSet().iterator(); } @Override - public Cell peek() { + public ExtendedCell peek() { return current; } @Override - public Cell next() { - Cell oldCurrent = current; + public ExtendedCell next() { + ExtendedCell oldCurrent = current; if (iter.hasNext()) { current = iter.next(); } else { @@ -63,16 +63,16 @@ public Cell next() { } @Override - public boolean seek(Cell seekCell) { + public boolean seek(ExtendedCell seekCell) { // restart iterator this.iter = createIterator(this.segment); return reseek(seekCell); } @Override - public boolean reseek(Cell seekCell) { + public boolean reseek(ExtendedCell seekCell) { while (this.iter.hasNext()) { - Cell next = this.iter.next(); + ExtendedCell next = this.iter.next(); int ret = this.segment.getComparator().compare(next, seekCell); if (ret >= 0) { this.current = next; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index d8f004fdeedd..b58e1f402e4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -21,8 +21,8 @@ import java.util.Optional; import java.util.OptionalLong; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -42,12 +42,12 @@ public interface StoreFile { /** * Get the first key in this store file. */ - Optional getFirstKey(); + Optional getFirstKey(); /** * Get the last key in this store file. */ - Optional getLastKey(); + Optional getLastKey(); /** * Get the comparator for comparing two cells. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 09c379227bda..622731dfccfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -452,8 +453,8 @@ private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloo * @return true if there is overlap, false otherwise */ public boolean passesKeyRangeFilter(Scan scan) { - Optional firstKeyKV = this.getFirstKey(); - Optional lastKeyKV = this.getLastKey(); + Optional firstKeyKV = this.getFirstKey(); + Optional lastKeyKV = this.getLastKey(); if (!firstKeyKV.isPresent() || !lastKeyKV.isPresent()) { // the file is empty return false; @@ -579,7 +580,7 @@ private void setDeleteFamilyBloomFilterFaulty() { this.deleteFamilyBloomFilter = null; } - public Optional getLastKey() { + public Optional getLastKey() { return reader.getLastKey(); } @@ -587,7 +588,7 @@ public Optional getLastRowKey() { return reader.getLastRowKey(); } - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { return reader.midKey(); } @@ -607,7 +608,7 @@ public long getDeleteFamilyCnt() { return deleteFamilyCnt; } - public Optional getFirstKey() { + public Optional getFirstKey() { return reader.getFirstKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index fd941de4df87..6e0824a16c62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -49,12 +50,12 @@ public class StoreFileScanner implements KeyValueScanner { // the reader it comes from: private final StoreFileReader reader; private final HFileScanner hfs; - private Cell cur = null; + private ExtendedCell cur = null; private boolean closed = false; private boolean realSeekDone; private boolean delayedReseek; - private Cell delayedSeekKV; + private ExtendedCell delayedSeekKV; private final boolean enforceMVCC; private final boolean hasMVCCInfo; @@ -193,13 +194,13 @@ public String toString() { } @Override - public Cell peek() { + public ExtendedCell peek() { return cur; } @Override - public Cell next() throws IOException { - Cell retKey = cur; + public ExtendedCell next() throws IOException { + ExtendedCell retKey = cur; try { // only seek if we aren't at the end. cur == null implies 'end'. @@ -219,7 +220,7 @@ public Cell next() throws IOException { } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { if (seekCount != null) seekCount.increment(); try { @@ -248,7 +249,7 @@ public boolean seek(Cell key) throws IOException { } @Override - public boolean reseek(Cell key) throws IOException { + public boolean reseek(ExtendedCell key) throws IOException { if (seekCount != null) seekCount.increment(); try { @@ -275,7 +276,7 @@ public boolean reseek(Cell key) throws IOException { } } - protected void setCurrentCell(Cell newVal) throws IOException { + protected void setCurrentCell(ExtendedCell newVal) throws IOException { this.cur = newVal; if (this.cur != null && this.reader.isBulkLoaded() && !this.reader.isSkipResetSeqId()) { PrivateCellUtil.setSequenceId(cur, this.reader.getSequenceID()); @@ -315,7 +316,7 @@ public void close() { } /** Returns false if not found or if k is after the end. */ - public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { + public static boolean seekAtOrAfter(HFileScanner s, ExtendedCell k) throws IOException { int result = s.seekTo(k); if (result < 0) { if (result == HConstants.INDEX_KEY_MAGIC) { @@ -333,7 +334,7 @@ public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { return true; } - static boolean reseekAtOrAfter(HFileScanner s, Cell k) throws IOException { + static boolean reseekAtOrAfter(HFileScanner s, ExtendedCell k) throws IOException { // This function is similar to seekAtOrAfter function int result = s.reseekTo(k); if (result <= 0) { @@ -375,7 +376,8 @@ public long getScannerOrder() { * the next row/column and use OLDEST_TIMESTAMP in the seek key. */ @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) + throws IOException { if (kv.getFamilyLength() == 0) { useBloom = false; } @@ -498,7 +500,7 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) } @Override - public boolean seekToPreviousRow(Cell originalKey) throws IOException { + public boolean seekToPreviousRow(ExtendedCell originalKey) throws IOException { try { if (isFastSeekingEncoding) { return seekToPreviousRowStateless(originalKey); @@ -528,7 +530,7 @@ public boolean seekToPreviousRow(Cell originalKey) throws IOException { private boolean seekToPreviousRowWithHint() throws IOException { do { // Using our existing seek hint, set our next seek hint - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(previousRow); + ExtendedCell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(previousRow); seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow); // Reseek back to our initial seek hint (i.e. what we think is the start of the @@ -560,13 +562,13 @@ private boolean seekToPreviousRowWithHint() throws IOException { */ private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOException { // Rewind to the cell before the beginning of this row - Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey); + ExtendedCell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey); if (!seekBefore(keyAtBeginningOfRow)) { return false; } // Rewind before this row and save what we find as a seek hint - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); + ExtendedCell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow); // Seek back to the start of the previous row @@ -598,15 +600,15 @@ private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOExceptio * It should be used if the cost for seeking is lower i.e. when using a fast seeking data block * encoding like RIV1. */ - private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException { - Cell key = originalKey; + private boolean seekToPreviousRowStateless(ExtendedCell originalKey) throws IOException { + ExtendedCell key = originalKey; do { - Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key); + ExtendedCell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key); if (!seekBefore(keyAtBeginningOfRow)) { return false; } - Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); + ExtendedCell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell()); if (!seekAtOrAfter(firstKeyOfPreviousRow)) { return false; } @@ -618,7 +620,7 @@ private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException } while (true); } - private boolean seekBefore(Cell seekKey) throws IOException { + private boolean seekBefore(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -638,7 +640,7 @@ private boolean seekBefore(Cell seekKey) throws IOException { * being null again via this method, that's because there doesn't exist a row before the seek * target in the storefile (i.e. we're at the beginning of the storefile) */ - private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException { + private void seekBeforeAndSaveKeyToPreviousRow(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -653,7 +655,7 @@ private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException } } - private boolean seekAtOrAfter(Cell seekKey) throws IOException { + private boolean seekAtOrAfter(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -665,7 +667,7 @@ private boolean seekAtOrAfter(Cell seekKey) throws IOException { return true; } - private boolean reseekAtOrAfter(Cell seekKey) throws IOException { + private boolean reseekAtOrAfter(ExtendedCell seekKey) throws IOException { if (seekCount != null) { seekCount.increment(); } @@ -700,7 +702,7 @@ public boolean seekToLastRow() throws IOException { if (!lastRow.isPresent()) { return false; } - Cell seekKey = PrivateCellUtil.createFirstOnRow(lastRow.get()); + ExtendedCell seekKey = PrivateCellUtil.createFirstOnRow(lastRow.get()); if (seek(seekKey)) { return true; } else { @@ -709,7 +711,7 @@ public boolean seekToLastRow() throws IOException { } @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { seek(key); if (cur == null || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); @@ -718,7 +720,7 @@ public boolean backwardSeek(Cell key) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return hfs.getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index 61d5b91b35b6..5375351c2500 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -105,15 +106,15 @@ public class StoreFileWriter implements CellSink, ShipperListener { private final boolean shouldDropCacheBehind; private final Supplier> compactedFilesSupplier; private final CellComparator comparator; - private Cell lastCell; + private ExtendedCell lastCell; // The first (latest) delete family marker of the current row - private Cell deleteFamily; + private ExtendedCell deleteFamily; // The list of delete family version markers of the current row - private List deleteFamilyVersionList = new ArrayList<>(); + private List deleteFamilyVersionList = new ArrayList<>(); // The first (latest) delete column marker of the current column - private Cell deleteColumn; + private ExtendedCell deleteColumn; // The list of delete column version markers of the current column - private List deleteColumnVersionList = new ArrayList<>(); + private List deleteColumnVersionList = new ArrayList<>(); // The live put cell count for the current column private int livePutCellCount; private final int maxVersions; @@ -344,14 +345,14 @@ private void initColumnState() { } - private boolean isDeletedByDeleteFamily(Cell cell) { + private boolean isDeletedByDeleteFamily(ExtendedCell cell) { return deleteFamily != null && (deleteFamily.getTimestamp() > cell.getTimestamp() || (deleteFamily.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteFamily.getSequenceId()))); } - private boolean isDeletedByDeleteFamilyVersion(Cell cell) { - for (Cell deleteFamilyVersion : deleteFamilyVersionList) { + private boolean isDeletedByDeleteFamilyVersion(ExtendedCell cell) { + for (ExtendedCell deleteFamilyVersion : deleteFamilyVersionList) { if ( deleteFamilyVersion.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteFamilyVersion.getSequenceId()) @@ -362,14 +363,14 @@ private boolean isDeletedByDeleteFamilyVersion(Cell cell) { return false; } - private boolean isDeletedByDeleteColumn(Cell cell) { + private boolean isDeletedByDeleteColumn(ExtendedCell cell) { return deleteColumn != null && (deleteColumn.getTimestamp() > cell.getTimestamp() || (deleteColumn.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteColumn.getSequenceId()))); } - private boolean isDeletedByDeleteColumnVersion(Cell cell) { - for (Cell deleteColumnVersion : deleteColumnVersionList) { + private boolean isDeletedByDeleteColumnVersion(ExtendedCell cell) { + for (ExtendedCell deleteColumnVersion : deleteColumnVersionList) { if ( deleteColumnVersion.getTimestamp() == cell.getTimestamp() && (!newVersionBehavior || cell.getSequenceId() < deleteColumnVersion.getSequenceId()) @@ -380,12 +381,12 @@ private boolean isDeletedByDeleteColumnVersion(Cell cell) { return false; } - private boolean isDeleted(Cell cell) { + private boolean isDeleted(ExtendedCell cell) { return isDeletedByDeleteFamily(cell) || isDeletedByDeleteColumn(cell) || isDeletedByDeleteFamilyVersion(cell) || isDeletedByDeleteColumnVersion(cell); } - private void appendCell(Cell cell) throws IOException { + private void appendCell(ExtendedCell cell) throws IOException { if ((lastCell == null || !CellUtil.matchingColumn(lastCell, cell))) { initColumnState(); } @@ -458,11 +459,11 @@ private void appendCell(Cell cell) throws IOException { } @Override - public void appendAll(List cellList) throws IOException { + public void appendAll(List cellList) throws IOException { if (historicalFilePath == null) { // The dual writing is not enabled and all cells are written to one file. We use // the live version file in this case - for (Cell cell : cellList) { + for (ExtendedCell cell : cellList) { liveFileWriter.append(cell); } return; @@ -474,13 +475,13 @@ public void appendAll(List cellList) throws IOException { // It is a new row and thus time to reset the state initRowState(); } - for (Cell cell : cellList) { + for (ExtendedCell cell : cellList) { appendCell(cell); } } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { if (historicalFilePath == null) { // The dual writing is not enabled and all cells are written to one file. We use // the live version file in this case @@ -675,14 +676,14 @@ private void appendTrackedTimestampsToMetadata() throws IOException { * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker * to include the timestamp of this key */ - private void trackTimestamps(final Cell cell) { + private void trackTimestamps(final ExtendedCell cell) { if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); } timeRangeTracker.includeTimestamp(cell); } - private void appendGeneralBloomfilter(final Cell cell) throws IOException { + private void appendGeneralBloomfilter(final ExtendedCell cell) throws IOException { if (this.generalBloomFilterWriter != null) { /* * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue. @@ -694,7 +695,7 @@ private void appendGeneralBloomfilter(final Cell cell) throws IOException { } } - private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { + private void appendDeleteFamilyBloomFilter(final ExtendedCell cell) throws IOException { if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { return; } @@ -706,7 +707,7 @@ private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { } } - private void append(final Cell cell) throws IOException { + private void append(final ExtendedCell cell) throws IOException { appendGeneralBloomfilter(cell); appendDeleteFamilyBloomFilter(cell); writer.append(cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index 82ec1b0c5bf1..4b1826c39b02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -24,7 +24,7 @@ import java.util.function.Consumer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputControlUtil; @@ -119,7 +119,7 @@ protected void performFlush(InternalScanner scanner, CellSink sink, ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List kvs = new ArrayList<>(); + List kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush @@ -130,9 +130,12 @@ protected void performFlush(InternalScanner scanner, CellSink sink, } try { do { - hasMore = scanner.next(kvs, scannerContext); + // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but + // all the server side implementation should only add ExtendedCell to the List, otherwise it + // will cause serious assertions in our code + hasMore = scanner.next((List) kvs, scannerContext); if (!kvs.isEmpty()) { - for (Cell c : kvs) { + for (ExtendedCell c : kvs) { // If we know that this KV is going to be included always, then let us // set its memstoreTS to 0. This will help us save space when writing to // disk. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 6b552bc10f48..72558ce1f02a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -107,7 +108,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * seeking to next row/column. TODO: estimate them? */ private long kvsScanned = 0; - private Cell prevCell = null; + private ExtendedCell prevCell = null; private final long preadMaxBytes; private long bytesRead; @@ -398,7 +399,7 @@ boolean isScanUsePread() { * @param isLazy true if using lazy seek * @param isParallelSeek true if using parallel seek */ - protected void seekScanners(List scanners, Cell seekKey, + protected void seekScanners(List scanners, ExtendedCell seekKey, boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). @@ -481,7 +482,7 @@ protected List selectScannersFrom(HStore store, } @Override - public Cell peek() { + public ExtendedCell peek() { return heap != null ? heap.peek() : null; } @@ -533,7 +534,7 @@ private void close(boolean withDelayedScannersClose) { } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } @@ -561,7 +562,7 @@ public boolean next(List outResult, ScannerContext scannerContext) throws return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } - Cell cell = this.heap.peek(); + ExtendedCell cell = this.heap.peek(); if (cell == null) { close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); @@ -642,7 +643,17 @@ public boolean next(List outResult, ScannerContext scannerContext) throws case INCLUDE_AND_SEEK_NEXT_COL: Filter f = matcher.getFilter(); if (f != null) { - cell = f.transformCell(cell); + Cell transformedCell = f.transformCell(cell); + // fast path, most filters just return the same cell instance + if (transformedCell != cell) { + if (transformedCell instanceof ExtendedCell) { + cell = (ExtendedCell) transformedCell; + } else { + throw new DoNotRetryIOException("Incorrect filter implementation, " + + "the Cell returned by transformCell is not an ExtendedCell. Filter class: " + + f.getClass().getName()); + } + } } this.countPerRow++; @@ -750,7 +761,7 @@ public boolean next(List outResult, ScannerContext scannerContext) throws break; case SEEK_NEXT_USING_HINT: - Cell nextKV = matcher.getNextKeyHint(cell); + ExtendedCell nextKV = matcher.getNextKeyHint(cell); if (nextKV != null) { int difference = comparator.compare(nextKV, cell); if ( @@ -824,7 +835,7 @@ private NextState needToReturn(List outResult) { return null; } - private void seekOrSkipToNextRow(Cell cell) throws IOException { + private void seekOrSkipToNextRow(ExtendedCell cell) throws IOException { // If it is a Get Scan, then we know that we are done with this row; there are no more // rows beyond the current one: don't try to optimize. if (!get) { @@ -835,7 +846,7 @@ private void seekOrSkipToNextRow(Cell cell) throws IOException { seekToNextRow(cell); } - private void seekOrSkipToNextColumn(Cell cell) throws IOException { + private void seekOrSkipToNextColumn(ExtendedCell cell) throws IOException { if (!trySkipToNextColumn(cell)) { seekAsDirection(matcher.getKeyForNextColumn(cell)); } @@ -892,13 +903,13 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException { * @param cell current cell * @return true means skip to next row, false means not */ - protected boolean trySkipToNextRow(Cell cell) throws IOException { - Cell nextCell = null; + protected boolean trySkipToNextRow(ExtendedCell cell) throws IOException { + ExtendedCell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison // when the identity changes we need to compare the bytes again - Cell previousIndexedKey = null; + ExtendedCell previousIndexedKey = null; do { - Cell nextIndexedKey = getNextIndexedKey(); + ExtendedCell nextIndexedKey = getNextIndexedKey(); if ( nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey @@ -915,17 +926,17 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { } /** - * See {@link org.apache.hadoop.hbase.regionserver.StoreScanner#trySkipToNextRow(Cell)} + * See {@link #trySkipToNextRow(ExtendedCell)} * @param cell current cell * @return true means skip to next column, false means not */ - protected boolean trySkipToNextColumn(Cell cell) throws IOException { - Cell nextCell = null; + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { + ExtendedCell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison // when the identity changes we need to compare the bytes again - Cell previousIndexedKey = null; + ExtendedCell previousIndexedKey = null; do { - Cell nextIndexedKey = getNextIndexedKey(); + ExtendedCell nextIndexedKey = getNextIndexedKey(); if ( nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey @@ -1019,7 +1030,7 @@ public void updateReaders(List sfs, List memStoreSc /** Returns if top of heap has changed (and KeyValueHeap has to try the next KV) */ protected final boolean reopenAfterFlush() throws IOException { // here we can make sure that we have a Store instance so no null check on store. - Cell lastTop = heap.peek(); + ExtendedCell lastTop = heap.peek(); // When we have the scan object, should we not pass it to getScanners() to get a limited set of // scanners? We did so in the constructor and we could have done it now by storing the scan // object from the constructor @@ -1064,11 +1075,11 @@ protected final boolean reopenAfterFlush() throws IOException { return topChanged; } - private void resetQueryMatcher(Cell lastTopKey) { + private void resetQueryMatcher(ExtendedCell lastTopKey) { // Reset the state of the Query Matcher and set to top row. // Only reset and call setRow if the row changes; avoids confusing the // query matcher if scanning intra-row. - Cell cell = heap.peek(); + ExtendedCell cell = heap.peek(); if (cell == null) { cell = lastTopKey; } @@ -1089,7 +1100,7 @@ protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; } - protected boolean seekToNextRow(Cell c) throws IOException { + protected boolean seekToNextRow(ExtendedCell c) throws IOException { return reseek(PrivateCellUtil.createLastOnRow(c)); } @@ -1097,12 +1108,12 @@ protected boolean seekToNextRow(Cell c) throws IOException { * Do a reseek in a normal StoreScanner(scan forward) * @return true if scanner has values left, false if end of scanner */ - protected boolean seekAsDirection(Cell kv) throws IOException { + protected boolean seekAsDirection(ExtendedCell kv) throws IOException { return reseek(kv); } @Override - public boolean reseek(Cell kv) throws IOException { + public boolean reseek(ExtendedCell kv) throws IOException { if (checkFlushed()) { reopenAfterFlush(); } @@ -1122,7 +1133,7 @@ void trySwitchToStreamRead() { LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, this.store.getColumnFamilyName()); scanUsePread = false; - Cell lastTop = heap.peek(); + ExtendedCell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); List scannersToClose = new ArrayList<>(); for (KeyValueScanner kvs : currentScanners) { @@ -1188,7 +1199,7 @@ protected final boolean checkFlushed() { * @param scanners the list {@link KeyValueScanner}s to be read from * @param kv the KeyValue on which the operation is being requested */ - private void parallelSeek(final List scanners, final Cell kv) + private void parallelSeek(final List scanners, final ExtendedCell kv) throws IOException { if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); @@ -1241,7 +1252,7 @@ public long getEstimatedNumberOfKvsScanned() { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return this.heap.getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 98bb68f31fb0..c9ee019e9afe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompoundConfiguration; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -121,7 +122,7 @@ static Optional getFileSplitPoint(HStoreFile file, CellComparator compar // Get first, last, and mid keys. Midkey is the key that starts block // in middle of hfile. Has column and timestamp. Need to return just // the row we want to split on as midkey. - Optional optionalMidKey = reader.midKey(); + Optional optionalMidKey = reader.midKey(); if (!optionalMidKey.isPresent()) { return Optional.empty(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 386f64166ef4..cd7c63b8270c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -167,7 +168,7 @@ public BoundaryMultiWriter(CellComparator comparator, List targetBoundar } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { if (currentWriter == null && existingWriters.isEmpty()) { // First append ever, do a sanity check. sanityCheckLeft(this.boundaries.get(0), cell); @@ -292,7 +293,7 @@ public SizeMultiWriter(CellComparator comparator, int targetCount, long targetKv } @Override - public void append(Cell cell) throws IOException { + public void append(ExtendedCell cell) throws IOException { // If we are waiting for opportunity to close and we started writing different row, // discard the writer and stop waiting. boolean doCreateWriter = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 7fc79642d919..6846f077c729 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -23,7 +23,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.yetus.audience.InterfaceAudience; @@ -103,7 +103,7 @@ public static TimeRangeTracker create(Type type, long minimumTimestamp, long max * of the key. * @param cell the Cell to include */ - public void includeTimestamp(final Cell cell) { + public void includeTimestamp(final ExtendedCell cell) { includeTimestamp(cell.getTimestamp()); if (PrivateCellUtil.isDeleteColumnOrFamily(cell)) { includeTimestamp(0); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 3e9e85a4aba8..5938c3a6359c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.compress.Compression; @@ -421,7 +422,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long bytesWrittenProgressForShippedCall = 0; // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List cells = new ArrayList<>(); + List cells = new ArrayList<>(); long currentTime = EnvironmentEdgeManager.currentTime(); long lastMillis = 0; if (LOG.isDebugEnabled()) { @@ -442,7 +443,10 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); try { do { - hasMore = scanner.next(cells, scannerContext); + // InternalScanner is for CPs so we do not want to leak ExtendedCell to the interface, but + // all the server side implementation should only add ExtendedCell to the List, otherwise it + // will cause serious assertions in our code + hasMore = scanner.next((List) cells, scannerContext); currentTime = EnvironmentEdgeManager.currentTime(); if (LOG.isDebugEnabled()) { now = currentTime; @@ -454,7 +458,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel // output to writer: Cell lastCleanCell = null; long lastCleanCellSeqId = 0; - for (Cell c : cells) { + for (ExtendedCell c : cells) { if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) { lastCleanCell = c; lastCleanCellSeqId = c.getSequenceId(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java index 41fb3e7bf12b..1bc799b9824d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.concurrent.CountDownLatch; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; @@ -34,12 +34,12 @@ public class ParallelSeekHandler extends EventHandler { private static final Logger LOG = LoggerFactory.getLogger(ParallelSeekHandler.class); private KeyValueScanner scanner; - private Cell keyValue; + private ExtendedCell keyValue; private long readPoint; private CountDownLatch latch; private Throwable err = null; - public ParallelSeekHandler(KeyValueScanner scanner, Cell keyValue, long readPoint, + public ParallelSeekHandler(KeyValueScanner scanner, ExtendedCell keyValue, long readPoint, CountDownLatch latch) { super(null, EventType.RS_PARALLEL_SEEK); this.scanner = scanner; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java index db4d80508626..5d279461bc56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; import org.apache.yetus.audience.InterfaceAudience; @@ -53,25 +53,26 @@ public interface ColumnTracker extends ShipperListener { /** * Checks if the column is present in the list of requested columns by returning the match code * instance. It does not check against the number of versions for the columns asked for. To do the - * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} method based - * on the return type (INCLUDE) of this method. The values that can be returned by this method are - * {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and {@link MatchCode#SEEK_NEXT_ROW}. + * version check, one has to call {@link #checkVersions(ExtendedCell, long, byte, boolean)} method + * based on the return type (INCLUDE) of this method. The values that can be returned by this + * method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and + * {@link MatchCode#SEEK_NEXT_ROW}. * @param cell a cell with the column to match against * @param type The type of the Cell * @return The match code instance. * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) throws IOException; + ScanQueryMatcher.MatchCode checkColumn(ExtendedCell cell, byte type) throws IOException; /** * Keeps track of the number of versions for the columns asked for. It assumes that the user has * already checked if the cell needs to be included by calling the - * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method are + * {@link #checkColumn(ExtendedCell, byte)} method. The enum values returned by this method are * {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} * and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. Implementations which include all the columns - * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(Cell, byte)} method and - * perform all the operations in this checkVersions method. + * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(ExtendedCell, byte)} + * method and perform all the operations in this checkVersions method. * @param cell a cell with the column to match against * @param timestamp The timestamp of the cell. * @param type the type of the key value (Put/Delete) @@ -82,7 +83,7 @@ public interface ColumnTracker extends ShipperListener { * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, + ScanQueryMatcher.MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException; /** @@ -106,7 +107,7 @@ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, /** * Retrieve the MatchCode for the next row or column */ - MatchCode getNextRowOrNextColumn(Cell cell); + MatchCode getNextRowOrNextColumn(ExtendedCell cell); /** * Give the tracker a chance to declare it's done based on only the timestamp to allow an early @@ -120,6 +121,6 @@ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, * this information from external filters or timestamp range and we then need to indicate this * information to tracker. It is currently implemented for ExplicitColumnTracker. */ - default void doneWithColumn(Cell cell) { + default void doneWithColumn(ExtendedCell cell) { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java index 9a4361a956aa..9ac85bfb3b7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -70,7 +70,7 @@ public boolean isUserScan() { } @Override - public boolean moreRowsMayExistAfter(Cell cell) { + public boolean moreRowsMayExistAfter(ExtendedCell cell) { return true; } @@ -81,7 +81,7 @@ public Filter getFilter() { } @Override - public Cell getNextKeyHint(Cell cell) throws IOException { + public ExtendedCell getNextKeyHint(ExtendedCell cell) throws IOException { // no filter, so no key hint. return null; } @@ -91,7 +91,7 @@ protected void reset() { deletes.reset(); } - protected final void trackDelete(Cell cell) { + protected final void trackDelete(ExtendedCell cell) { // If keepDeletedCells is true, then we only remove cells by versions or TTL during // compaction, so we do not need to track delete here. // If keepDeletedCells is TTL and the delete marker is expired, then we can make sure that the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java index 56ac265dd187..53816e9f3f34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.yetus.audience.InterfaceAudience; @@ -42,14 +42,14 @@ public interface DeleteTracker extends ShipperListener { * This is called when a Delete is encountered in a StoreFile. * @param cell - the delete cell */ - void add(Cell cell); + void add(ExtendedCell cell); /** * Check if the specified cell buffer has been deleted by a previously seen delete. * @param cell - current cell to check if deleted by a previously seen delete * @return deleteResult The result tells whether the Cell is deleted and why */ - DeleteResult isDeleted(Cell cell); + DeleteResult isDeleted(ExtendedCell cell); /** Returns true if there are no current delete, false otherwise */ boolean isEmpty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java index 397e2631a440..ada4e31fb9bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -60,7 +60,7 @@ protected DropDeletesCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker this.earliestPutTs = earliestPutTs; } - protected final MatchCode tryDropDelete(Cell cell) { + protected final MatchCode tryDropDelete(ExtendedCell cell) { long timestamp = cell.getTimestamp(); // If it is not the time to drop the delete marker, just return if (timeToPurgeDeletes > 0 && now - timestamp <= timeToPurgeDeletes) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index 1ce2c6136cc2..ec9810ef9753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -19,8 +19,8 @@ import java.io.IOException; import java.util.NavigableSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; @@ -102,7 +102,7 @@ public ColumnCount getColumnHint() { * {@inheritDoc} */ @Override - public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) { + public ScanQueryMatcher.MatchCode checkColumn(ExtendedCell cell, byte type) { // delete markers should never be passed to an // *Explicit*ColumnTracker assert !PrivateCellUtil.isDelete(type); @@ -152,7 +152,7 @@ public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) { } @Override - public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, + public ScanQueryMatcher.MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { assert !PrivateCellUtil.isDelete(type); if (ignoreCount) { @@ -210,7 +210,7 @@ private boolean isExpired(long timestamp) { } @Override - public void doneWithColumn(Cell cell) { + public void doneWithColumn(ExtendedCell cell) { while (this.column != null) { int compare = CellUtil.compareQualifiers(cell, column.getBuffer(), column.getOffset(), column.getLength()); @@ -232,7 +232,7 @@ public void doneWithColumn(Cell cell) { } @Override - public MatchCode getNextRowOrNextColumn(Cell cell) { + public MatchCode getNextRowOrNextColumn(ExtendedCell cell) { doneWithColumn(cell); if (getColumnHint() == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java index c6776a05a41d..547d2e0673e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -34,7 +34,7 @@ public IncludeAllCompactionQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { return MatchCode.INCLUDE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java index 7d3d973779c8..9be9e6d91798 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -36,7 +36,7 @@ public MajorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { MatchCode returnCode = preCheck(cell); if (returnCode != null) { return returnCode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java index 70e474e106b8..847eff44b318 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -35,7 +35,7 @@ public MinorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { MatchCode returnCode = preCheck(cell); if (returnCode != null) { return returnCode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java index 146f67dbd2fb..820bb6baa7b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java @@ -26,9 +26,9 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; @@ -127,7 +127,7 @@ protected DeleteVersionsNode() { this(Long.MIN_VALUE, Long.MAX_VALUE); } - public void addVersionDelete(Cell cell) { + public void addVersionDelete(ExtendedCell cell) { SortedSet set = deletesMap.get(cell.getTimestamp()); if (set == null) { set = new TreeSet<>(); @@ -161,7 +161,7 @@ protected DeleteVersionsNode getDeepCopy() { * @return If this put has duplicate ts with last cell, return the mvcc of last cell. Else return * MAX_VALUE. */ - protected long prepare(Cell cell) { + protected long prepare(ExtendedCell cell) { if (isColumnQualifierChanged(cell)) { // The last cell is family-level delete and this is not, or the cq is changed, // we should construct delColMap as a deep copy of delFamMap. @@ -186,7 +186,7 @@ protected long prepare(Cell cell) { return Long.MAX_VALUE; } - private boolean isColumnQualifierChanged(Cell cell) { + private boolean isColumnQualifierChanged(ExtendedCell cell) { if ( delColMap.isEmpty() && lastCqArray == null && cell.getQualifierLength() == 0 && (PrivateCellUtil.isDeleteColumns(cell) || PrivateCellUtil.isDeleteColumnVersion(cell)) @@ -199,7 +199,7 @@ private boolean isColumnQualifierChanged(Cell cell) { // DeleteTracker @Override - public void add(Cell cell) { + public void add(ExtendedCell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (Type.codeToType(type)) { @@ -231,7 +231,7 @@ public void add(Cell cell) { * @return We don't distinguish DeleteColumn and DeleteFamily. We only return code for column. */ @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { long duplicateMvcc = prepare(cell); for (Map.Entry e : delColMap.tailMap(cell.getSequenceId()) @@ -281,7 +281,7 @@ public void update() { // ColumnTracker @Override - public MatchCode checkColumn(Cell cell, byte type) throws IOException { + public MatchCode checkColumn(ExtendedCell cell, byte type) throws IOException { if (columns == null) { return MatchCode.INCLUDE; } @@ -305,7 +305,7 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { } @Override - public MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) + public MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { assert !PrivateCellUtil.isDelete(type); // We drop old version in #isDeleted, so here we won't SKIP because of versioning. But we should @@ -370,7 +370,7 @@ public ColumnCount getColumnHint() { } @Override - public MatchCode getNextRowOrNextColumn(Cell cell) { + public MatchCode getNextRowOrNextColumn(ExtendedCell cell) { // TODO maybe we can optimize. return MatchCode.SEEK_NEXT_COL; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java index 93288cba8cd4..9ad3c792345e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; @@ -55,7 +55,7 @@ public void beforeShipped() throws IOException { } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java index 180d2dd2ed31..dcffbb140ed0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -35,7 +35,7 @@ protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker column } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index 8fdee2da524e..efe09cce722f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -20,9 +20,9 @@ import java.io.IOException; import java.util.SortedSet; import java.util.TreeSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; @@ -48,7 +48,7 @@ public class ScanDeleteTracker implements DeleteTracker { protected boolean hasFamilyStamp = false; protected long familyStamp = 0L; protected SortedSet familyVersionStamps = new TreeSet(); - protected Cell deleteCell = null; + protected ExtendedCell deleteCell = null; protected byte[] deleteBuffer = null; protected int deleteOffset = 0; protected int deleteLength = 0; @@ -67,7 +67,7 @@ public ScanDeleteTracker(CellComparator comparator) { * @param cell - the delete cell */ @Override - public void add(Cell cell) { + public void add(ExtendedCell cell) { long timestamp = cell.getTimestamp(); byte type = cell.getTypeByte(); if (!hasFamilyStamp || timestamp > familyStamp) { @@ -99,7 +99,7 @@ public void add(Cell cell) { * @param cell - current cell to check if deleted by a previously seen delete */ @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { long timestamp = cell.getTimestamp(); if (hasFamilyStamp && timestamp <= familyStamp) { return DeleteResult.FAMILY_DELETED; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 2ab3d68fca15..7e38ea293343 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; @@ -115,7 +116,7 @@ public static enum MatchCode { protected final CellComparator rowComparator; /** Key to seek to in memstore and StoreFiles */ - protected final Cell startKey; + protected final ExtendedCell startKey; /** Keeps track of columns and versions */ protected final ColumnTracker columns; @@ -126,9 +127,9 @@ public static enum MatchCode { protected final long now; /** Row the query is on */ - protected Cell currentRow; + protected ExtendedCell currentRow; - protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker columns, + protected ScanQueryMatcher(ExtendedCell startKey, ScanInfo scanInfo, ColumnTracker columns, long oldestUnexpiredTS, long now) { this.rowComparator = scanInfo.getComparator(); this.startKey = startKey; @@ -138,7 +139,7 @@ protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker colum } /** Returns true if the cell is expired */ - private static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, + private static boolean isCellTTLExpired(final ExtendedCell cell, final long oldestTimestamp, final long now) { // Look for a TTL tag first. Use it instead of the family setting if // found. If a cell has multiple TTLs, resolve the conflict by using the @@ -167,7 +168,7 @@ private static boolean isCellTTLExpired(final Cell cell, final long oldestTimest * Check before the delete logic. * @return null means continue. */ - protected final MatchCode preCheck(Cell cell) { + protected final MatchCode preCheck(ExtendedCell cell) { if (currentRow == null) { // Since the curCell is null it means we are already sure that we have moved over to the next // row @@ -196,7 +197,7 @@ protected final MatchCode preCheck(Cell cell) { return null; } - protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { + protected final MatchCode checkDeleted(DeleteTracker deletes, ExtendedCell cell) { if (deletes.isEmpty() && !(deletes instanceof NewVersionBehaviorTracker)) { return null; } @@ -234,10 +235,10 @@ protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { * @throws IOException in case there is an internal consistency problem caused by a data * corruption. */ - public abstract MatchCode match(Cell cell) throws IOException; + public abstract MatchCode match(ExtendedCell cell) throws IOException; /** Returns the start key */ - public Cell getStartKey() { + public ExtendedCell getStartKey() { return startKey; } @@ -245,7 +246,7 @@ public Cell getStartKey() { public abstract boolean hasNullColumnInQuery(); /** Returns a cell represent the current row */ - public Cell currentRow() { + public ExtendedCell currentRow() { return currentRow; } @@ -261,7 +262,7 @@ public void clearCurrentRow() { /** * Set the row when there is change in row */ - public void setToNewRow(Cell currentRow) { + public void setToNewRow(ExtendedCell currentRow) { this.currentRow = currentRow; columns.reset(); reset(); @@ -274,16 +275,16 @@ public void setToNewRow(Cell currentRow) { * stopRow or we are scanning on row only because this Scan is for a Get, * etc. */ - public abstract boolean moreRowsMayExistAfter(Cell cell); + public abstract boolean moreRowsMayExistAfter(ExtendedCell cell); - public Cell getKeyForNextColumn(Cell cell) { + public ExtendedCell getKeyForNextColumn(ExtendedCell cell) { // We aren't sure whether any DeleteFamily cells exist, so we can't skip to next column. // TODO: Current way disable us to seek to next column quickly. Is there any better solution? // see HBASE-18471 for more details // see TestFromClientSide3#testScanAfterDeletingSpecifiedRow // see TestFromClientSide3#testScanAfterDeletingSpecifiedRowV2 if (cell.getQualifierLength() == 0) { - Cell nextKey = PrivateCellUtil.createNextOnRowCol(cell); + ExtendedCell nextKey = PrivateCellUtil.createNextOnRowCol(cell); if (nextKey != cell) { return nextKey; } @@ -305,7 +306,7 @@ public Cell getKeyForNextColumn(Cell cell) { * @param currentCell The Cell we're using to calculate the seek key * @return result of the compare between the indexed key and the key portion of the passed cell */ - public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { + public int compareKeyForNextRow(ExtendedCell nextIndexed, ExtendedCell currentCell) { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0, 0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } @@ -315,7 +316,7 @@ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { * @param currentCell The Cell we're using to calculate the seek key * @return result of the compare between the indexed key and the key portion of the passed cell */ - public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { + public int compareKeyForNextColumn(ExtendedCell nextIndexed, ExtendedCell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, @@ -334,7 +335,7 @@ public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { /** * Delegate to {@link Filter#getNextCellHint(Cell)}. If no filter, return {@code null}. */ - public abstract Cell getNextKeyHint(Cell cell) throws IOException; + public abstract ExtendedCell getNextKeyHint(ExtendedCell cell) throws IOException; @Override public void beforeShipped() throws IOException { @@ -346,7 +347,7 @@ public void beforeShipped() throws IOException { } } - protected static Cell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) { + protected static ExtendedCell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) { return PrivateCellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java index ea0afee21787..407cedd8b225 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -33,7 +33,7 @@ */ @InterfaceAudience.Private public class ScanWildcardColumnTracker implements ColumnTracker { - private Cell columnCell = null; + private ExtendedCell columnCell = null; private int currentCount = 0; private final int maxVersions; private final int minVersions; @@ -68,7 +68,7 @@ public ScanWildcardColumnTracker(int minVersion, int maxVersion, long oldestUnex * {@inheritDoc} This receives puts *and* deletes. */ @Override - public MatchCode checkColumn(Cell cell, byte type) throws IOException { + public MatchCode checkColumn(ExtendedCell cell, byte type) throws IOException { return MatchCode.INCLUDE; } @@ -77,7 +77,7 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { * take the version of the previous put (so eventually all but the last can be reclaimed). */ @Override - public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, + public ScanQueryMatcher.MatchCode checkVersions(ExtendedCell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { if (columnCell == null) { // first iteration. @@ -121,7 +121,7 @@ public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte + "smaller than the previous column: " + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); } - private void resetCell(Cell columnCell) { + private void resetCell(ExtendedCell columnCell) { this.columnCell = columnCell; currentCount = 0; } @@ -192,7 +192,7 @@ public boolean done() { } @Override - public MatchCode getNextRowOrNextColumn(Cell cell) { + public MatchCode getNextRowOrNextColumn(ExtendedCell cell) { return MatchCode.SEEK_NEXT_COL; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java index 370164c8a0d5..a59d23d1664e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -50,7 +50,7 @@ public StripeCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes } @Override - public MatchCode match(Cell cell) throws IOException { + public MatchCode match(ExtendedCell cell) throws IOException { MatchCode returnCode = preCheck(cell); if (returnCode != null) { return returnCode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index 6c3d002b0929..c07b91b77e68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -21,6 +21,8 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; @@ -57,9 +59,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { private int count = 0; - private Cell curColCell = null; + private ExtendedCell curColCell = null; - private static Cell createStartKey(Scan scan, ScanInfo scanInfo) { + private static ExtendedCell createStartKey(Scan scan, ScanInfo scanInfo) { if (scan.includeStartRow()) { return createStartKeyFromRow(scan.getStartRow(), scanInfo); } else { @@ -104,11 +106,19 @@ public Filter getFilter() { } @Override - public Cell getNextKeyHint(Cell cell) throws IOException { + public ExtendedCell getNextKeyHint(ExtendedCell cell) throws IOException { if (filter == null) { return null; } else { - return filter.getNextCellHint(cell); + Cell hint = filter.getNextCellHint(cell); + if (hint == null || hint instanceof ExtendedCell) { + return (ExtendedCell) hint; + } else { + throw new DoNotRetryIOException("Incorrect filter implementation, " + + "the Cell returned by getNextKeyHint is not an ExtendedCell. Filter class: " + + filter.getClass().getName()); + } + } } @@ -120,7 +130,7 @@ public void beforeShipped() throws IOException { } } - protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) + protected final MatchCode matchColumn(ExtendedCell cell, long timestamp, byte typeByte) throws IOException { int tsCmp = tr.compare(timestamp); if (tsCmp > 0) { @@ -187,7 +197,7 @@ protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) * INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW * */ - private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, + private final MatchCode mergeFilterResponse(ExtendedCell cell, MatchCode matchCode, ReturnCode filterResponse) { switch (filterResponse) { case SKIP: @@ -259,7 +269,7 @@ private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, protected abstract boolean moreRowsMayExistsAfter(int cmpToStopRow); @Override - public boolean moreRowsMayExistAfter(Cell cell) { + public boolean moreRowsMayExistAfter(ExtendedCell cell) { // If a 'get' Scan -- we are doing a Get (every Get is a single-row Scan in implementation) -- // then we are looking at one row only, the one specified in the Get coordinate..so we know // for sure that there are no more rows on this Scan diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java index 4201dd07533d..754368f73f3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java @@ -25,6 +25,7 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -173,12 +174,15 @@ public EncryptedKvEncoder(OutputStream os, Encryptor encryptor) { } @Override - public void write(Cell cell) throws IOException { + public void write(Cell c) throws IOException { if (encryptor == null) { - super.write(cell); + super.write(c); return; } + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; + byte[] iv = nextIv(); encryptor.setIv(iv); encryptor.reset(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 84709cbc58dd..e6a20b0d0206 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -23,6 +23,7 @@ import java.io.OutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -232,7 +233,9 @@ public CompressedKvEncoder(OutputStream out, CompressionContext compression) { } @Override - public void write(Cell cell) throws IOException { + public void write(Cell c) throws IOException { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; // We first write the KeyValue infrastructure as VInts. StreamUtils.writeRawVInt32(out, KeyValueUtil.keyLength(cell)); StreamUtils.writeRawVInt32(out, cell.getValueLength()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index f593c2cc4283..854148a89e06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -22,6 +22,7 @@ import java.util.Objects; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -130,7 +131,7 @@ public ReturnCode filterCell(final Cell cell) { case CHECK_CELL_DEFAULT: { if ( authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) - || authManager.authorizeCell(user, table, cell, Permission.Action.READ) + || authManager.authorizeCell(user, table, (ExtendedCell) cell, Permission.Action.READ) ) { return ReturnCode.INCLUDE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index f349faec20ea..44e18f65654e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -466,12 +467,12 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce } } } else if (entry.getValue() instanceof List) { - List list = (List) entry.getValue(); + List list = (List) entry.getValue(); if (list == null || list.isEmpty()) { get.addFamily(col); } else { // In case of family delete, a Cell will be added into the list with Qualifier as null. - for (Cell cell : list) { + for (ExtendedCell cell : list) { if ( cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode() || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode()) @@ -606,7 +607,9 @@ private static void addCellPermissions(final byte[] perms, Map> e : familyMap.entrySet()) { List newCells = Lists.newArrayList(); - for (Cell cell : e.getValue()) { + for (Cell c : e.getValue()) { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, perms)); @@ -1748,7 +1751,8 @@ private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell // We have checked the ACL tag of mutation is not null. // So that the tags could not be empty. tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, mutation.getACL())); - return PrivateCellUtil.createCell(newCell, tags); + assert newCell instanceof ExtendedCell; + return PrivateCellUtil.createCell((ExtendedCell) newCell, tags); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 9d9f90765c72..830c360e61a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; @@ -503,7 +504,8 @@ public boolean evaluate(Cell cell) throws IOException { @Override public boolean evaluate(Cell cell) throws IOException { boolean visibilityTagPresent = false; - Iterator tagsItr = PrivateCellUtil.tagsIterator(cell); + assert cell instanceof ExtendedCell; + Iterator tagsItr = PrivateCellUtil.tagsIterator((ExtendedCell) cell); while (tagsItr.hasNext()) { boolean includeKV = true; Tag tag = tagsItr.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 0d4fed632582..4b97e5484c10 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -299,7 +300,9 @@ public void preBatchMutate(ObserverContext c, boolean modifiedTagFound = false; Pair pair = new Pair<>(false, null); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - pair = checkForReservedVisibilityTagPresence(cellScanner.current(), pair); + Cell cell = cellScanner.current(); + assert cell instanceof ExtendedCell; + pair = checkForReservedVisibilityTagPresence((ExtendedCell) cell, pair); if (!pair.getFirst()) { // Don't disallow reserved tags if authorization is disabled if (authorizationEnabled) { @@ -337,21 +340,23 @@ public void preBatchMutate(ObserverContext c, } } if (visibilityTags != null) { - List updatedCells = new ArrayList<>(); + List updatedCells = new ArrayList<>(); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { - Cell cell = cellScanner.current(); + Cell ce = cellScanner.current(); + assert ce instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) ce; List tags = PrivateCellUtil.getTags(cell); if (modifiedTagFound) { // Rewrite the tags by removing the modified tags. removeReplicationVisibilityTag(tags); } tags.addAll(visibilityTags); - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + ExtendedCell updatedCell = PrivateCellUtil.createCell(cell, tags); updatedCells.add(updatedCell); } m.getFamilyCellMap().clear(); // Clear and add new Cells to the Mutation. - for (Cell cell : updatedCells) { + for (ExtendedCell cell : updatedCells) { if (m instanceof Put) { Put p = (Put) m; p.add(cell); @@ -429,7 +434,7 @@ public void prePrepareTimeStampForDeleteVersion(ObserverContext checkForReservedVisibilityTagPresence(Cell cell, + private Pair checkForReservedVisibilityTagPresence(ExtendedCell cell, Pair pair) throws IOException { if (pair == null) { pair = new Pair<>(false, null); @@ -630,8 +635,8 @@ public List> postIncrementBeforeWAL( List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), + createNewCellWithTags(mutation, (ExtendedCell) pair.getSecond()))); } return resultPairs; } @@ -642,13 +647,13 @@ public List> postAppendBeforeWAL( List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs - .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), + createNewCellWithTags(mutation, (ExtendedCell) pair.getSecond()))); } return resultPairs; } - private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOException { + private Cell createNewCellWithTags(Mutation mutation, ExtendedCell newCell) throws IOException { List tags = Lists.newArrayList(); CellVisibility cellVisibility = null; try { @@ -982,7 +987,12 @@ public boolean filterRowKey(Cell cell) throws IOException { @Override public ReturnCode filterCell(final Cell cell) throws IOException { List putVisTags = new ArrayList<>(); - Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); + Byte putCellVisTagsFormat = null; + if (cell instanceof ExtendedCell) { + putCellVisTagsFormat = + VisibilityUtils.extractVisibilityTags((ExtendedCell) cell, putVisTags); + } + if (putVisTags.isEmpty() && deleteCellVisTags.isEmpty()) { // Early out if there are no tags in the cell return ReturnCode.INCLUDE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java index 026a99796c9f..e822e663a508 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java @@ -28,8 +28,8 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.regionserver.querymatcher.NewVersionBehaviorTracker; @@ -56,7 +56,7 @@ private static class TagInfo { List tags; Byte format; - private TagInfo(Cell c) { + private TagInfo(ExtendedCell c) { tags = new ArrayList<>(); format = VisibilityUtils.extractVisibilityTags(c, tags); } @@ -98,7 +98,7 @@ protected VisibilityDeleteVersionsNode getDeepCopy() { } @Override - public void addVersionDelete(Cell cell) { + public void addVersionDelete(ExtendedCell cell) { SortedMap set = deletesMap.get(cell.getTimestamp()); if (set == null) { set = new TreeMap<>(); @@ -117,7 +117,7 @@ public void addVersionDelete(Cell cell) { } @Override - public void add(Cell cell) { + public void add(ExtendedCell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (KeyValue.Type.codeToType(type)) { @@ -143,7 +143,7 @@ public void add(Cell cell) { } } - private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { + private boolean tagMatched(ExtendedCell put, TagInfo delInfo) throws IOException { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, putVisTags); return putVisTags.isEmpty() == delInfo.tags.isEmpty() @@ -153,7 +153,7 @@ private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { } @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { try { long duplicateMvcc = prepare(cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index 5cffb51500a2..1b91ed718f61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -25,6 +25,7 @@ import java.util.concurrent.TimeoutException; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -75,7 +76,9 @@ public boolean replicate(ReplicateContext replicateContext) { for (Entry entry : entries) { WALEdit newEdit = new WALEdit(); ArrayList cells = entry.getEdit().getCells(); - for (Cell cell : cells) { + for (Cell c : cells) { + assert c instanceof ExtendedCell; + ExtendedCell cell = (ExtendedCell) c; if (cell.getTagsLength() > 0) { visTags.clear(); nonVisTags.clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 59623ece1359..180bd3cc4eae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; @@ -47,7 +47,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { /** * This tag is used for the DELETE cell which has no visibility label. */ - private static final List EMPTY_TAG = Collections.EMPTY_LIST; + private static final List EMPTY_TAG = Collections.emptyList(); // Its better to track the visibility tags in delete based on each type. Create individual // data structures for tracking each of them. This would ensure that there is no tracking based // on time and also would handle all cases where deletefamily or deletecolumns is specified with @@ -70,7 +70,7 @@ public VisibilityScanDeleteTracker(CellComparator comparator) { } @Override - public void add(Cell delCell) { + public void add(ExtendedCell delCell) { // Cannot call super.add because need to find if the delete needs to be considered long timestamp = delCell.getTimestamp(); byte type = delCell.getTypeByte(); @@ -110,7 +110,7 @@ public void add(Cell delCell) { extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type)); } - private boolean extractDeleteCellVisTags(Cell delCell, Type type) { + private boolean extractDeleteCellVisTags(ExtendedCell delCell, Type type) { // If tag is present in the delete boolean hasVisTag = false; Byte deleteCellVisTagsFormat = null; @@ -178,7 +178,7 @@ private boolean extractDeleteCellVisTags(Cell delCell, Type type) { } @Override - public DeleteResult isDeleted(Cell cell) { + public DeleteResult isDeleted(ExtendedCell cell) { long timestamp = cell.getTimestamp(); try { if (hasFamilyStamp) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index 513e29377cb3..376162be0aac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -35,7 +35,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -200,7 +200,7 @@ public static List getScanLabelGenerators(Configuration conf * @param tags - the array that will be populated if visibility tags are present * @return The visibility tags serialization format */ - public static Byte extractVisibilityTags(Cell cell, List tags) { + public static Byte extractVisibilityTags(ExtendedCell cell, List tags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -223,7 +223,8 @@ public static Byte extractVisibilityTags(Cell cell, List tags) { * @return - the serailization format of the tag. Can be null if no tags are found or if there is * no visibility tag found */ - public static Byte extractAndPartitionTags(Cell cell, List visTags, List nonVisTags) { + public static Byte extractAndPartitionTags(ExtendedCell cell, List visTags, + List nonVisTags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -240,7 +241,7 @@ public static Byte extractAndPartitionTags(Cell cell, List visTags, List tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java index 94c58dde4e00..c9860159d00f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.yetus.audience.InterfaceAudience; @@ -44,7 +45,7 @@ public Cell getLastCell() { /** * Bloom information from the cell is retrieved */ - public void writeBloom(Cell cell) throws IOException { + public void writeBloom(ExtendedCell cell) throws IOException { // only add to the bloom filter on a new, unique key if (isNewKey(cell)) { sanityCheck(cell); @@ -52,7 +53,7 @@ public void writeBloom(Cell cell) throws IOException { } } - private void sanityCheck(Cell cell) throws IOException { + private void sanityCheck(ExtendedCell cell) throws IOException { if (this.getLastCell() != null) { if (comparator.compare(cell, this.getLastCell()) <= 0) { throw new IOException("Added a key not lexically larger than" + " previous. Current cell = " @@ -71,5 +72,5 @@ private void sanityCheck(Cell cell) throws IOException { * @param cell the cell to be verified * @return true if a new key else false */ - protected abstract boolean isNewKey(Cell cell); + protected abstract boolean isNewKey(ExtendedCell cell); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java index 5465c24540a1..6667b4235534 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java @@ -22,8 +22,8 @@ import java.util.Iterator; import java.util.List; import java.util.SortedSet; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner; import org.apache.yetus.audience.InterfaceAudience; @@ -32,36 +32,36 @@ */ @InterfaceAudience.Private public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { - final private Iterable data; + final private Iterable data; final CellComparator comparator; - private Iterator iter; - private Cell current; + private Iterator iter; + private ExtendedCell current; - public CollectionBackedScanner(SortedSet set) { + public CollectionBackedScanner(SortedSet set) { this(set, CellComparator.getInstance()); } - public CollectionBackedScanner(SortedSet set, CellComparator comparator) { + public CollectionBackedScanner(SortedSet set, CellComparator comparator) { this.comparator = comparator; data = set; init(); } - public CollectionBackedScanner(List list) { + public CollectionBackedScanner(List list) { this(list, CellComparator.getInstance()); } - public CollectionBackedScanner(List list, CellComparator comparator) { + public CollectionBackedScanner(List list, CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(CellComparator comparator, Cell... array) { + public CollectionBackedScanner(CellComparator comparator, ExtendedCell... array) { this.comparator = comparator; - List tmp = new ArrayList<>(array.length); + List tmp = new ArrayList<>(array.length); Collections.addAll(tmp, array); Collections.sort(tmp, comparator); data = tmp; @@ -76,13 +76,13 @@ private void init() { } @Override - public Cell peek() { + public ExtendedCell peek() { return current; } @Override - public Cell next() { - Cell oldCurrent = current; + public ExtendedCell next() { + ExtendedCell oldCurrent = current; if (iter.hasNext()) { current = iter.next(); } else { @@ -92,16 +92,16 @@ public Cell next() { } @Override - public boolean seek(Cell seekCell) { + public boolean seek(ExtendedCell seekCell) { // restart iterator iter = data.iterator(); return reseek(seekCell); } @Override - public boolean reseek(Cell seekCell) { + public boolean reseek(ExtendedCell seekCell) { while (iter.hasNext()) { - Cell next = iter.next(); + ExtendedCell next = iter.next(); int ret = comparator.compare(next, seekCell); if (ret >= 0) { current = next; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 349fcbc3c50a..d58bcdac74d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -24,11 +24,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -119,7 +123,10 @@ public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Ex HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create(); // Write any-old Cell... final byte[] rowKey = Bytes.toBytes("compressiontestkey"); - Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval")); + ExtendedCell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(Bytes.toBytes("compressiontestval")).build(); writer.append(c); writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval")); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 78276c53b1c2..bbc9a5bff89b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -993,9 +994,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { HFile.Reader hf = null; try { hf = HFile.createReader(fs, hfile.getPath(), CacheConfig.DISABLED, true, getConf()); - Optional startKv = hf.getFirstKey(); + Optional startKv = hf.getFirstKey(); start = CellUtil.cloneRow(startKv.get()); - Optional endKv = hf.getLastKey(); + Optional endKv = hf.getLastKey(); end = CellUtil.cloneRow(endKv.get()); } catch (Exception ioe) { LOG.warn("Problem reading orphan file " + hfile + ", skipping"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java index 46aa6ece1bf5..38781eaef61c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.yetus.audience.InterfaceAudience; @@ -45,7 +45,7 @@ public void addLastBloomKey(Writer writer) throws IOException { } @Override - protected boolean isNewKey(Cell cell) { + protected boolean isNewKey(ExtendedCell cell) { if (this.getLastCell() != null) { return !CellUtil.matchingRows(cell, this.getLastCell()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java index 140feb117d89..cb1bcef01f89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowColBloomContext.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY; import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.yetus.audience.InterfaceAudience; @@ -41,7 +41,7 @@ public RowColBloomContext(BloomFilterWriter generalBloomFilterWriter, CellCompar @Override public void addLastBloomKey(Writer writer) throws IOException { if (this.getLastCell() != null) { - Cell firstOnRow = PrivateCellUtil.createFirstOnRowCol(this.getLastCell()); + ExtendedCell firstOnRow = PrivateCellUtil.createFirstOnRowCol(this.getLastCell()); // This copy happens only once when the writer is closed byte[] key = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(firstOnRow); writer.appendFileInfo(LAST_BLOOM_KEY, key); @@ -49,7 +49,7 @@ public void addLastBloomKey(Writer writer) throws IOException { } @Override - protected boolean isNewKey(Cell cell) { + protected boolean isNewKey(ExtendedCell cell) { if (this.getLastCell() != null) { return !CellUtil.matchingRowColumn(cell, this.getLastCell()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java index 622735847508..dee0897fb4b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowPrefixFixedLengthBloomContext.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.yetus.audience.InterfaceAudience; @@ -39,7 +40,8 @@ public RowPrefixFixedLengthBloomContext(BloomFilterWriter bloomFilterWriter, this.prefixLength = prefixLength; } - public void writeBloom(Cell cell) throws IOException { + @Override + public void writeBloom(ExtendedCell cell) throws IOException { super.writeBloom(getRowPrefixCell(cell)); } @@ -47,7 +49,7 @@ public void writeBloom(Cell cell) throws IOException { * @param cell the cell * @return the new cell created by row prefix */ - private Cell getRowPrefixCell(Cell cell) { + private ExtendedCell getRowPrefixCell(ExtendedCell cell) { byte[] row = CellUtil.copyRow(cell); return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(row, 0, Math.min(prefixLength, row.length)).setType(Cell.Type.Put).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 323788c572eb..d43fd9ab8703 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.MetaCellComparator; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -75,7 +76,7 @@ public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter, @Override public void append(RegionEntryBuffer buffer) throws IOException { - Map familyCells = new HashMap<>(); + Map> familyCells = new HashMap<>(); Map familySeqIds = new HashMap<>(); boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME); // First iterate all Cells to find which column families are present and to stamp Cell with @@ -87,28 +88,29 @@ public void append(RegionEntryBuffer buffer) throws IOException { if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { continue; } + // only ExtendedCell can set sequence id, so it is safe to cast it to ExtendedCell later. PrivateCellUtil.setSequenceId(cell, seqId); String familyName = Bytes.toString(CellUtil.cloneFamily(cell)); // comparator need to be specified for meta familyCells .computeIfAbsent(familyName, - key -> new CellSet( + key -> new CellSet<>( isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)) - .add(cell); + .add((ExtendedCell) cell); familySeqIds.compute(familyName, (k, v) -> v == null ? seqId : Math.max(v, seqId)); } } // Create a new hfile writer for each column family, write edits then close writer. String regionName = Bytes.toString(buffer.encodedRegionName); - for (Map.Entry cellsEntry : familyCells.entrySet()) { + for (Map.Entry> cellsEntry : familyCells.entrySet()) { String familyName = cellsEntry.getKey(); StoreFileWriter writer = createRecoveredHFileWriter(buffer.tableName, regionName, familySeqIds.get(familyName), familyName, isMetaTable); LOG.trace("Created {}", writer.getPath()); openingWritersNum.incrementAndGet(); try { - for (Cell cell : cellsEntry.getValue()) { + for (ExtendedCell cell : cellsEntry.getValue()) { writer.append(cell); } // Append the max seqid to hfile, used when recovery. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index b03357b93327..0c0c50625184 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -313,9 +314,10 @@ public void processFile(final Configuration conf, final Path p) throws IOExcepti // initialize list into which we will store atomic actions List> actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { + assert cell instanceof ExtendedCell; // add atomic operation to txn - Map op = - new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); + Map op = new HashMap<>( + toStringMap((ExtendedCell) cell, outputOnlyRowKey, rowPrefix, row, outputValues)); if (op.isEmpty()) { continue; } @@ -379,7 +381,7 @@ public static void printCell(PrintStream out, Map op, boolean ou out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, boolean printRowKeyOnly, + public static Map toStringMap(ExtendedCell cell, boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); String rowKey = @@ -421,7 +423,7 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly return stringMap; } - public static Map toStringMap(Cell cell) { + public static Map toStringMap(ExtendedCell cell) { return toStringMap(cell, false, null, null, false); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index c6cee19196cc..60ba0f85c006 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -73,7 +73,7 @@ static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) { return w; } - static Cell createCell(final int i) { + static ExtendedCell createCell(final int i) { return createCell(i, HConstants.EMPTY_BYTE_ARRAY); } @@ -84,16 +84,22 @@ static Cell createCell(final int i) { * @param value Value to use * @return Created Cell. */ - static Cell createCell(final int i, final byte[] value) { + static ExtendedCell createCell(final int i, final byte[] value) { return createCell(format(i), value); } - static Cell createCell(final byte[] keyRow) { - return CellUtil.createCell(keyRow); + static ExtendedCell createCell(final byte[] keyRow) { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build(); } - static Cell createCell(final byte[] keyRow, final byte[] value) { - return CellUtil.createCell(keyRow, value); + static ExtendedCell createCell(final byte[] keyRow, final byte[] value) { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(value).build(); } /** @@ -459,7 +465,7 @@ void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, false); byte[] b = getRandomRow(); // System.out.println("Random row: " + new String(b)); - Cell c = createCell(b); + ExtendedCell c = createCell(b); if (scanner.seekTo(c) != 0) { LOG.info("Nonexistent row: " + new String(b)); return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java index cad787ce9e3b..228cb66c00a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java @@ -35,9 +35,12 @@ public class TestTagRewriteCell { @Test public void testHeapSize() { - Cell originalCell = CellUtil.createCell(Bytes.toBytes("row"), Bytes.toBytes("value")); + ExtendedCell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + .setRow(Bytes.toBytes("row")).setFamily(HConstants.EMPTY_BYTE_ARRAY) + .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(Bytes.toBytes("value")).build(); final int fakeTagArrayLength = 10; - Cell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]); + ExtendedCell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]); // Get the heapSize before the internal tags array in trCell are nuked long trCellHeapSize = ((HeapSize) trCell).heapSize(); @@ -45,7 +48,7 @@ public void testHeapSize() { // Make another TagRewriteCell with the original TagRewriteCell // This happens on systems with more than one RegionObserver/Coproc loaded (such as // VisibilityController and AccessController) - Cell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]); + ExtendedCell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]); assertTrue( "TagRewriteCell containing a TagRewriteCell's heapsize should be " diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java index de75db109933..e24f74c592ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java @@ -32,8 +32,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -152,7 +152,7 @@ protected List selectScannersFrom(HStore store, for (KeyValueScanner scanner : scanners) { newScanners.add(new DelegatingKeyValueScanner(scanner) { @Override - public boolean reseek(Cell key) throws IOException { + public boolean reseek(ExtendedCell key) throws IOException { if (ON.get()) { REQ_COUNT.incrementAndGet(); if (!THROW_ONCE.get() || REQ_COUNT.get() == 1) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 13955ccebfec..31b71fdb7e9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; @@ -163,7 +164,7 @@ public void testHalfScanner() throws IOException { w.close(); HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf); - Cell midKV = r.midKey().get(); + ExtendedCell midKV = r.midKey().get(); byte[] midkey = CellUtil.cloneRow(midKV); Reference bottom = new Reference(midkey, Reference.Range.bottom); @@ -212,7 +213,7 @@ public void testHalfScanner() throws IOException { assertNull(foundKeyValue); } - private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell seekBefore, + private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, ExtendedCell seekBefore, CacheConfig cacheConfig) throws IOException { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, p).build(); StoreFileInfo storeFileInfo = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index a502113325fa..9c6063ba58a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -225,7 +226,7 @@ public void testSeekingOnSample() throws IOException { for (boolean seekBefore : new boolean[] { false, true }) { checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); KeyValue midKv = sampleKv.get(sampleKv.size() / 2); - Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); + ExtendedCell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); } LOG.info("Done"); @@ -278,7 +279,7 @@ public void testSeekingToOffHeapKeyValueInSample() throws IOException { for (boolean seekBefore : new boolean[] { false, true }) { checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); KeyValue midKv = sampleKv.get(sampleKv.size() / 2); - Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); + ExtendedCell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); } LOG.info("Done"); @@ -392,7 +393,7 @@ public void testRowIndexWithTagsButNoTagsInCell() throws IOException { } private void checkSeekingConsistency(List encodedSeekers, - boolean seekBefore, Cell keyValue) { + boolean seekBefore, ExtendedCell keyValue) { Cell expectedKeyValue = null; ByteBuffer expectedKey = null; ByteBuffer expectedValue = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index 43f4605a0fc1..f100b016eb83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -163,7 +164,7 @@ public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException { KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv4); - Cell toSeek = PrivateCellUtil.createLastOnRow(kv3); + ExtendedCell toSeek = PrivateCellUtil.createLastOnRow(kv3); seekToTheKey(kv3, sampleKv, toSeek); } @@ -276,7 +277,8 @@ public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException { seekToTheKey(kv5, sampleKv, toSeek); } - private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) throws IOException { + private void seekToTheKey(KeyValue expected, List kvs, ExtendedCell toSeek) + throws IOException { // create all seekers List encodedSeekers = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { @@ -301,7 +303,7 @@ private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) th } private void checkSeekingConsistency(List encodedSeekers, - Cell keyValue, KeyValue expected) { + ExtendedCell keyValue, KeyValue expected) { for (DataBlockEncoder.EncodedSeeker seeker : encodedSeekers) { seeker.seekToKeyInBlock(keyValue, false); Cell keyValue2 = seeker.getCell(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 189af113b334..5324cde14b70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -49,11 +49,11 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; @@ -494,16 +494,17 @@ public void testCorruptOutOfOrderHFileWrite() throws IOException { .withCompression(Compression.Algorithm.NONE).withCompressTags(false).build(); HFileWriterImpl writer = new HFileWriterImpl(conf, cacheConf, path, mockedOutputStream, fileContext); - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); byte[] row = Bytes.toBytes("foo"); byte[] qualifier = Bytes.toBytes("qualifier"); byte[] cf = Bytes.toBytes(columnFamily); byte[] val = Bytes.toBytes("fooVal"); long firstTS = 100L; long secondTS = 101L; - Cell firstCell = cellBuilder.setRow(row).setValue(val).setTimestamp(firstTS) + ExtendedCell firstCell = cellBuilder.setRow(row).setValue(val).setTimestamp(firstTS) .setQualifier(qualifier).setFamily(cf).setType(Cell.Type.Put).build(); - Cell secondCell = cellBuilder.setRow(row).setValue(val).setTimestamp(secondTS) + ExtendedCell secondCell = cellBuilder.setRow(row).setValue(val).setTimestamp(secondTS) .setQualifier(qualifier).setFamily(cf).setType(Cell.Type.Put).build(); // second Cell will sort "higher" than the first because later timestamps should come first writer.append(firstCell); @@ -783,18 +784,22 @@ public void testCompressionOrdinance() { @Test public void testShortMidpointSameQual() { - Cell left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"), 11, - KeyValue.Type.Maximum.getCode(), HConstants.EMPTY_BYTE_ARRAY); - Cell right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"), 9, - KeyValue.Type.Maximum.getCode(), HConstants.EMPTY_BYTE_ARRAY); - Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); + ExtendedCell left = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("a")) + .setFamily(Bytes.toBytes("a")).setQualifier(Bytes.toBytes("a")).setTimestamp(11) + .setType(Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); + ExtendedCell right = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("a")) + .setFamily(Bytes.toBytes("a")).setQualifier(Bytes.toBytes("a")).setTimestamp(9) + .setType(Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); + ExtendedCell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); } - private Cell getCell(byte[] row, byte[] family, byte[] qualifier) { + private ExtendedCell getCell(byte[] row, byte[] family, byte[] qualifier) { return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) .setFamily(family).setQualifier(qualifier).setTimestamp(HConstants.LATEST_TIMESTAMP) .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); @@ -802,61 +807,61 @@ private Cell getCell(byte[] row, byte[] family, byte[] qualifier) { @Test public void testGetShortMidpoint() { - Cell left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - Cell right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); + ExtendedCell left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + ExtendedCell right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + ExtendedCell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); - left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); - left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); + left = getCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); - left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a")); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); assertEquals(1, mid.getRowLength()); - left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("a")); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); - left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b")); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); assertEquals(2, mid.getFamilyLength()); - left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa")); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); assertEquals(2, mid.getQualifierLength()); - left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b")); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( @@ -879,8 +884,8 @@ public void testGetShortMidpoint() { // Assert that if meta comparator, it returns the right cell -- i.e. no // optimization done. - left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); + left = getCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); + right = getCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(MetaCellComparator.META_COMPARATOR, left, right); assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); assertTrue( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java index f349adf92006..7c16098c1d9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java @@ -44,9 +44,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilder; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.fs.HFileSystem; @@ -465,7 +466,8 @@ protected void before() throws Throwable { HFile.getWriterFactory(testingUtility.getConfiguration(), CacheConfig.DISABLED) .withPath(hfs, path).withFileContext(context); - CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); Random rand = new Random(Instant.now().toEpochMilli()); byte[] family = Bytes.toBytes("f"); try (HFile.Writer writer = factory.create()) { @@ -473,7 +475,7 @@ protected void before() throws Throwable { byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 100); byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand); byte[] value = RandomKeyValueUtil.randomValue(rand); - Cell cell = cellBuilder.setType(Cell.Type.Put).setRow(row).setFamily(family) + ExtendedCell cell = cellBuilder.setType(Cell.Type.Put).setRow(row).setFamily(family) .setQualifier(qualifier).setValue(value).build(); writer.append(cell); cellBuilder.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 2872065a43e8..11877a195f31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -38,8 +38,10 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -725,7 +727,10 @@ public void testIntermediateLevelIndicesWithLargeKeys(int minNumEntries) throws byte[] b = Bytes.toBytes(i); System.arraycopy(b, 0, rowkey, rowkey.length - b.length, b.length); keys.add(rowkey); - hfw.append(CellUtil.createCell(rowkey)); + hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowkey) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build()); } hfw.close(); @@ -733,7 +738,10 @@ public void testIntermediateLevelIndicesWithLargeKeys(int minNumEntries) throws // Scanner doesn't do Cells yet. Fix. HFileScanner scanner = reader.getScanner(conf, true, true); for (int i = 0; i < keys.size(); ++i) { - scanner.seekTo(CellUtil.createCell(keys.get(i))); + scanner.seekTo(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + .setRow(keys.get(i)).setFamily(HConstants.EMPTY_BYTE_ARRAY) + .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); } reader.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index ebec927ea91e..fb0937a6f678 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -28,7 +28,7 @@ import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -153,7 +153,7 @@ public void testEncoding() throws IOException { public void testEncodingWithOffheapKeyValue() throws IOException { // usually we have just block without headers, but don't complicate that try { - List kvs = generator.generateTestExtendedOffheapKeyValues(60, true); + List kvs = generator.generateTestExtendedOffheapKeyValues(60, true); HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS) .withIncludesTags(true).withHBaseCheckSum(true).withCompression(Algorithm.NONE) .withBlockSize(0).withChecksumType(ChecksumType.NULL).build(); @@ -215,7 +215,7 @@ private HFileBlock createBlockOnDisk(Configuration conf, List kvs, HFi block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext(), ByteBuffAllocator.HEAP); } - private void writeBlock(Configuration conf, List kvs, HFileContext fileContext, + private void writeBlock(Configuration conf, List kvs, HFileContext fileContext, boolean useTags) throws IOException { HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf, blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); @@ -224,7 +224,7 @@ private void writeBlock(Configuration conf, List kvs, HFileContext fileCon baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); blockEncoder.startBlockEncoding(context, dos); - for (Cell kv : kvs) { + for (ExtendedCell kv : kvs) { blockEncoder.encode(kv, context, dos); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index edab56a7afce..0491e429e7a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -22,9 +22,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -79,7 +82,10 @@ public void testWriteHFile() throws Exception { byte[] k = Bytes.toBytes(keyStr); keys.add(k); byte[] v = Bytes.toBytes("value" + i); - hfw.append(CellUtil.createCell(k, v)); + hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(k) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(v).build()); } hfw.close(); @@ -87,7 +93,10 @@ public void testWriteHFile() throws Exception { // Scanner doesn't do Cells yet. Fix. HFileScanner scanner = reader.getScanner(conf, true, true); for (int i = 0; i < keys.size(); ++i) { - scanner.seekTo(CellUtil.createCell(keys.get(i))); + scanner.seekTo((ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + .setRow(keys.get(i)).setFamily(HConstants.EMPTY_BYTE_ARRAY) + .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build())); } reader.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index 85a0ac33d558..14557fe0af26 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -34,7 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -105,8 +105,8 @@ private static byte[] randLongBytes() { private Path workDir; private FileSystem fs; private Path hfilePath; - private Cell firstCell = null; - private Cell secondCell = null; + private ExtendedCell firstCell = null; + private ExtendedCell secondCell = null; private ByteBuffAllocator allocator; @BeforeClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index 6fe90105f816..a262545ee726 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -104,7 +105,7 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); conf.setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); - Cell[] cells = new Cell[NUM_KV]; + ExtendedCell[] cells = new ExtendedCell[NUM_KV]; Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), String.format( "testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s", hfileVersion, bloomType, testI)); @@ -163,13 +164,15 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { } } - private void checkSeekBefore(Cell[] cells, HFileScanner scanner, int i) throws IOException { + private void checkSeekBefore(ExtendedCell[] cells, HFileScanner scanner, int i) + throws IOException { assertEquals( "Failed to seek to the key before #" + i + " (" + CellUtil.getCellKeyAsString(cells[i]) + ")", true, scanner.seekBefore(cells[i])); } - private void checkNoSeekBefore(Cell[] cells, HFileScanner scanner, int i) throws IOException { + private void checkNoSeekBefore(ExtendedCell[] cells, HFileScanner scanner, int i) + throws IOException { assertEquals("Incorrectly succeeded in seeking to before first key (" + CellUtil.getCellKeyAsString(cells[i]) + ")", false, scanner.seekBefore(cells[i])); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java index 57446abfcd1f..971807592543 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -149,7 +150,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel long shippedCallSizeLimit = (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); - Cell mobCell = null; + ExtendedCell mobCell = null; long counter = 0; long countFailAt = -1; @@ -179,7 +180,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel if (LOG.isDebugEnabled()) { now = EnvironmentEdgeManager.currentTime(); } - for (Cell c : cells) { + for (Cell cell : cells) { + ExtendedCell c = (ExtendedCell) cell; counter++; if (compactMOBs) { if (MobUtils.isMobReferenceCell(c)) { @@ -294,7 +296,8 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel mobCells++; // append the original keyValue in the mob file. mobFileWriter.append(c); - Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); + ExtendedCell reference = + MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); // write the cell whose value is the path of a mob file to the store file. writer.append(reference); cellsCountCompactedToMob++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java index b47184390e86..5ce880485312 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.function.IntConsumer; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.client.Scan; public class DelegatingKeyValueScanner implements KeyValueScanner { @@ -36,22 +36,22 @@ public void shipped() throws IOException { } @Override - public Cell peek() { + public ExtendedCell peek() { return delegate.peek(); } @Override - public Cell next() throws IOException { + public ExtendedCell next() throws IOException { return delegate.next(); } @Override - public boolean seek(Cell key) throws IOException { + public boolean seek(ExtendedCell key) throws IOException { return delegate.seek(key); } @Override - public boolean reseek(Cell key) throws IOException { + public boolean reseek(ExtendedCell key) throws IOException { return delegate.reseek(key); } @@ -71,7 +71,8 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) } @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom) + throws IOException { return delegate.requestSeek(kv, forward, useBloom); } @@ -96,12 +97,12 @@ public Path getFilePath() { } @Override - public boolean backwardSeek(Cell key) throws IOException { + public boolean backwardSeek(ExtendedCell key) throws IOException { return delegate.backwardSeek(key); } @Override - public boolean seekToPreviousRow(Cell key) throws IOException { + public boolean seekToPreviousRow(ExtendedCell key) throws IOException { return delegate.seekToPreviousRow(key); } @@ -111,7 +112,7 @@ public boolean seekToLastRow() throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return delegate.getNextIndexedKey(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index fbd6286c5a94..5d6c49d97a5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -54,8 +55,8 @@ public EncodedSeekPerformanceTest() { numberOfSeeks = DEFAULT_NUMBER_OF_SEEKS; } - private List prepareListOfTestSeeks(Path path) throws IOException { - List allKeyValues = new ArrayList<>(); + private List prepareListOfTestSeeks(Path path) throws IOException { + List allKeyValues = new ArrayList<>(); // read all of the key values HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, @@ -63,7 +64,7 @@ private List prepareListOfTestSeeks(Path path) throws IOException { storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false, false, 0, 0, false); - Cell current; + ExtendedCell current; scanner.seek(KeyValue.LOWESTKEY); while (null != (current = scanner.next())) { @@ -73,9 +74,9 @@ private List prepareListOfTestSeeks(Path path) throws IOException { storeFile.closeStoreFile(cacheConf.shouldEvictOnClose()); // pick seeks by random - List seeks = new ArrayList<>(); + List seeks = new ArrayList<>(); for (int i = 0; i < numberOfSeeks; ++i) { - Cell keyValue = allKeyValues.get(randomizer.nextInt(allKeyValues.size())); + ExtendedCell keyValue = allKeyValues.get(randomizer.nextInt(allKeyValues.size())); seeks.add(keyValue); } @@ -84,7 +85,7 @@ private List prepareListOfTestSeeks(Path path) throws IOException { return seeks; } - private void runTest(Path path, DataBlockEncoding blockEncoding, List seeks) + private void runTest(Path path, DataBlockEncoding blockEncoding, List seeks) throws IOException { // read all of the key values HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, @@ -108,7 +109,7 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, List seek // do seeks long startSeeksTime = System.nanoTime(); - for (Cell keyValue : seeks) { + for (ExtendedCell keyValue : seeks) { scanner.seek(keyValue); Cell toVerify = scanner.next(); if (!keyValue.equals(toVerify)) { @@ -145,7 +146,7 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, List seek * @throws IOException if there is a bug while reading from disk */ public void runTests(Path path, DataBlockEncoding[] encodings) throws IOException { - List seeks = prepareListOfTestSeeks(path); + List seeks = prepareListOfTestSeeks(path); for (DataBlockEncoding blockEncoding : encodings) { runTest(path, blockEncoding, seeks); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java index 9a3c5d2e218b..f404cb2128e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java @@ -19,8 +19,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.CollectionBackedScanner; @@ -30,7 +30,7 @@ * file scanner. */ public class KeyValueScanFixture extends CollectionBackedScanner { - public KeyValueScanFixture(CellComparator comparator, Cell... cells) { + public KeyValueScanFixture(CellComparator comparator, ExtendedCell... cells) { super(comparator, cells); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java index b31c738149f9..16732d8b8e25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java @@ -25,8 +25,9 @@ import java.util.TreeMap; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -197,10 +198,10 @@ public void close(boolean evictOnClose) throws IOException { } @Override - public Optional getLastKey() { + public Optional getLastKey() { if (splitPoint != null) { return Optional - .of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put) + .of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put) .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build()); } else { return Optional.empty(); @@ -208,9 +209,9 @@ public Optional getLastKey() { } @Override - public Optional midKey() throws IOException { + public Optional midKey() throws IOException { if (splitPoint != null) { - return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY) + return Optional.of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setType(Cell.Type.Put).setRow(splitPoint).build()); } else { return Optional.empty(); @@ -218,9 +219,9 @@ public Optional midKey() throws IOException { } @Override - public Optional getFirstKey() { + public Optional getFirstKey() { if (splitPoint != null) { - return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY) + return Optional.of(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1).build()); } else { return Optional.empty(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java index efad9d38e0c4..e2f9ac2f34ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -64,16 +65,18 @@ public static Object[] data() { private static final int NUM_OF_CELLS = 4; private static final int SMALL_CHUNK_SIZE = 64; - private Cell ascCells[]; - private CellArrayMap ascCbOnHeap; - private Cell descCells[]; - private CellArrayMap descCbOnHeap; + private ExtendedCell[] ascCells; + private CellArrayMap ascCbOnHeap; + private ExtendedCell[] descCells; + private CellArrayMap descCbOnHeap; private final static Configuration CONF = new Configuration(); private KeyValue lowerOuterCell; private KeyValue upperOuterCell; - private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in array - private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk in array + private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in + // array + private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk + // in array private final boolean smallChunks; private static ChunkCreator chunkCreator; @@ -116,10 +119,12 @@ public void setUp() throws Exception { final KeyValue kv4 = new KeyValue(four, f, q, 40, v); lowerOuterCell = new KeyValue(Bytes.toBytes(10), f, q, 10, v); upperOuterCell = new KeyValue(Bytes.toBytes(50), f, q, 10, v); - ascCells = new Cell[] { kv1, kv2, kv3, kv4 }; - ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells, 0, NUM_OF_CELLS, false); - descCells = new Cell[] { kv4, kv3, kv2, kv1 }; - descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells, 0, NUM_OF_CELLS, true); + ascCells = new ExtendedCell[] { kv1, kv2, kv3, kv4 }; + ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells, 0, + NUM_OF_CELLS, false); + descCells = new ExtendedCell[] { kv4, kv3, kv2, kv1 }; + descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells, 0, + NUM_OF_CELLS, true); CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); @@ -138,7 +143,7 @@ public void setUp() throws Exception { /* Create and test ascending CellSet based on CellArrayMap */ @Test public void testCellArrayMapAsc() throws Exception { - CellSet cs = new CellSet(ascCbOnHeap); + CellSet cs = new CellSet<>(ascCbOnHeap); testCellBlocks(cs); testIterators(cs); } @@ -146,11 +151,11 @@ public void testCellArrayMapAsc() throws Exception { /* Create and test ascending and descending CellSet based on CellChunkMap */ @Test public void testCellChunkMap() throws Exception { - CellSet cs = new CellSet(ascCCM); + CellSet cs = new CellSet<>(ascCCM); testCellBlocks(cs); testIterators(cs); testSubSet(cs); - cs = new CellSet(descCCM); + cs = new CellSet<>(descCCM); testSubSet(cs); // cs = new CellSet(ascMultCCM); // testCellBlocks(cs); @@ -161,26 +166,26 @@ public void testCellChunkMap() throws Exception { @Test public void testAsc() throws Exception { - CellSet ascCs = new CellSet(ascCbOnHeap); + CellSet ascCs = new CellSet<>(ascCbOnHeap); assertEquals(NUM_OF_CELLS, ascCs.size()); testSubSet(ascCs); } @Test public void testDesc() throws Exception { - CellSet descCs = new CellSet(descCbOnHeap); + CellSet descCs = new CellSet<>(descCbOnHeap); assertEquals(NUM_OF_CELLS, descCs.size()); testSubSet(descCs); } - private void testSubSet(CellSet cs) throws Exception { + private void testSubSet(CellSet cs) throws Exception { for (int i = 0; i != ascCells.length; ++i) { - NavigableSet excludeTail = cs.tailSet(ascCells[i], false); - NavigableSet includeTail = cs.tailSet(ascCells[i], true); + NavigableSet excludeTail = cs.tailSet(ascCells[i], false); + NavigableSet includeTail = cs.tailSet(ascCells[i], true); assertEquals(ascCells.length - 1 - i, excludeTail.size()); assertEquals(ascCells.length - i, includeTail.size()); - Iterator excludeIter = excludeTail.iterator(); - Iterator includeIter = includeTail.iterator(); + Iterator excludeIter = excludeTail.iterator(); + Iterator includeIter = includeTail.iterator(); for (int j = 1 + i; j != ascCells.length; ++j) { assertEquals(true, CellUtil.equals(excludeIter.next(), ascCells[j])); } @@ -191,12 +196,12 @@ private void testSubSet(CellSet cs) throws Exception { assertEquals(NUM_OF_CELLS, cs.tailSet(lowerOuterCell, false).size()); assertEquals(0, cs.tailSet(upperOuterCell, false).size()); for (int i = 0; i != ascCells.length; ++i) { - NavigableSet excludeHead = cs.headSet(ascCells[i], false); - NavigableSet includeHead = cs.headSet(ascCells[i], true); + NavigableSet excludeHead = cs.headSet(ascCells[i], false); + NavigableSet includeHead = cs.headSet(ascCells[i], true); assertEquals(i, excludeHead.size()); assertEquals(i + 1, includeHead.size()); - Iterator excludeIter = excludeHead.iterator(); - Iterator includeIter = includeHead.iterator(); + Iterator excludeIter = excludeHead.iterator(); + Iterator includeIter = includeHead.iterator(); for (int j = 0; j != i; ++j) { assertEquals(true, CellUtil.equals(excludeIter.next(), ascCells[j])); } @@ -207,17 +212,17 @@ private void testSubSet(CellSet cs) throws Exception { assertEquals(0, cs.headSet(lowerOuterCell, false).size()); assertEquals(NUM_OF_CELLS, cs.headSet(upperOuterCell, false).size()); - NavigableMap sub = + NavigableMap sub = cs.getDelegatee().subMap(lowerOuterCell, true, upperOuterCell, true); assertEquals(NUM_OF_CELLS, sub.size()); - Iterator iter = sub.values().iterator(); + Iterator iter = sub.values().iterator(); for (int i = 0; i != ascCells.length; ++i) { assertEquals(true, CellUtil.equals(iter.next(), ascCells[i])); } } /* Generic basic test for immutable CellSet */ - private void testCellBlocks(CellSet cs) throws Exception { + private void testCellBlocks(CellSet cs) throws Exception { final byte[] oneAndHalf = Bytes.toBytes(20); final byte[] f = Bytes.toBytes("f"); final byte[] q = Bytes.toBytes("q"); @@ -235,12 +240,13 @@ private void testCellBlocks(CellSet cs) throws Exception { Cell last = cs.last(); assertTrue(ascCells[NUM_OF_CELLS - 1].equals(last)); - SortedSet tail = cs.tailSet(ascCells[1]); // check tail abd head sizes + SortedSet tail = cs.tailSet(ascCells[1]); // check tail abd head sizes assertEquals(NUM_OF_CELLS - 1, tail.size()); - SortedSet head = cs.headSet(ascCells[1]); + SortedSet head = cs.headSet(ascCells[1]); assertEquals(1, head.size()); - SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer cell + SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer + // cell assertEquals(NUM_OF_CELLS - 1, tailOuter.size()); Cell tailFirst = tail.first(); @@ -255,8 +261,7 @@ private void testCellBlocks(CellSet cs) throws Exception { } /* Generic iterators test for immutable CellSet */ - private void testIterators(CellSet cs) throws Exception { - + private void testIterators(CellSet cs) throws Exception { // Assert that we have NUM_OF_CELLS values and that they are in order int count = 0; for (Cell kv : cs) { @@ -273,7 +278,7 @@ private void testIterators(CellSet cs) throws Exception { // Test descending iterator count = 0; - for (Iterator i = cs.descendingIterator(); i.hasNext();) { + for (Iterator i = cs.descendingIterator(); i.hasNext();) { Cell kv = i.next(); assertEquals(ascCells[NUM_OF_CELLS - (count + 1)], kv); count++; @@ -282,8 +287,7 @@ private void testIterators(CellSet cs) throws Exception { } /* Create CellChunkMap with four cells inside the index chunk */ - private CellChunkMap setUpCellChunkMap(boolean asc) { - + private CellChunkMap setUpCellChunkMap(boolean asc) { // allocate new chunks and use the data chunk to hold the full data of the cells // and the index chunk to hold the cell-representations Chunk dataChunk = chunkCreator.getChunk(); @@ -298,9 +302,9 @@ private CellChunkMap setUpCellChunkMap(boolean asc) { int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID - Cell[] cellArray = asc ? ascCells : descCells; + ExtendedCell[] cellArray = asc ? ascCells : descCells; - for (Cell kv : cellArray) { + for (ExtendedCell kv : cellArray) { // do we have enough space to write the cell data on the data chunk? if (dataOffset + kv.getSerializedSize() > chunkCreator.getChunkSize()) { // allocate more data chunks if needed @@ -326,14 +330,14 @@ private CellChunkMap setUpCellChunkMap(boolean asc) { idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId } - return new CellChunkMap(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); + return new CellChunkMap<>(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); } /* * Create CellChunkMap with four cells inside the data jumbo chunk. This test is working only with * small chunks sized SMALL_CHUNK_SIZE (64) bytes */ - private CellChunkMap setUpJumboCellChunkMap(boolean asc) { + private CellChunkMap setUpJumboCellChunkMap(boolean asc) { int smallChunkSize = SMALL_CHUNK_SIZE + 8; // allocate new chunks and use the data JUMBO chunk to hold the full data of the cells // and the normal index chunk to hold the cell-representations @@ -350,9 +354,9 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) { int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID - Cell[] cellArray = asc ? ascCells : descCells; + ExtendedCell[] cellArray = asc ? ascCells : descCells; - for (Cell kv : cellArray) { + for (ExtendedCell kv : cellArray) { int dataStartOfset = dataOffset; dataOffset = KeyValueUtil.appendTo(kv, dataBuffer, dataOffset, false); // write deep cell data @@ -378,6 +382,6 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) { dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; } - return new CellChunkMap(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); + return new CellChunkMap<>(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java index b8ee022c9c21..d40516a501fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -45,7 +46,7 @@ public class TestCellSkipListSet { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCellSkipListSet.class); - private final CellSet csls = new CellSet(CellComparatorImpl.COMPARATOR); + private final CellSet csls = new CellSet<>(CellComparatorImpl.COMPARATOR); @Rule public TestName name = new TestName(); @@ -125,7 +126,7 @@ public void testDescendingIterator() throws Exception { } // Assert that we added 'total' values and that they are in order int count = 0; - for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { + for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { Cell kv = i.next(); assertEquals("" + (total - (count + 1)), Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); @@ -141,7 +142,7 @@ public void testDescendingIterator() throws Exception { // Assert that we added 'total' values and that they are in order and that // we are getting back value2 count = 0; - for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { + for (Iterator i = this.csls.descendingIterator(); i.hasNext();) { Cell kv = i.next(); assertEquals("" + (total - (count + 1)), Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); @@ -164,9 +165,9 @@ public void testHeadTail() throws Exception { if (i == 1) splitter = kv; this.csls.add(kv); } - SortedSet tail = this.csls.tailSet(splitter); + SortedSet tail = this.csls.tailSet(splitter); assertEquals(2, tail.size()); - SortedSet head = this.csls.headSet(splitter); + SortedSet head = this.csls.headSet(splitter); assertEquals(1, head.size()); // Now ensure that we get back right answer even when we do tail or head. // Now overwrite with a new value. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index ab2a80782bc2..fea89e4f94a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -307,7 +308,7 @@ public void testGet_memstoreAndSnapShot() throws IOException { public void testUpsertMemstoreSize() throws Exception { MemStoreSize oldSize = memstore.size(); - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); @@ -364,7 +365,7 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { t = runSnapshot(memstore, true); // test the case that the timeOfOldestEdit is updated after a KV upsert - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); kv1.setSequenceId(100); l.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 60fdf2357759..2d9f1a37e6ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -824,7 +825,7 @@ public void testUpsertMemstoreSize() throws Exception { memstore = new DefaultMemStore(conf, CellComparatorImpl.COMPARATOR); MemStoreSize oldSize = memstore.size(); - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); @@ -886,7 +887,7 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { t = runSnapshot(memstore); // test the case that the timeOfOldestEdit is updated after a KV upsert - List l = new ArrayList<>(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); kv1.setSequenceId(100); l.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 7493f506da5e..a221c0497834 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -110,11 +111,11 @@ public class TestHMobStore { private byte[] value2 = Bytes.toBytes("value2"); private Path mobFilePath; private Date currentDate = new Date(); - private Cell seekKey1; - private Cell seekKey2; - private Cell seekKey3; + private ExtendedCell seekKey1; + private ExtendedCell seekKey2; + private ExtendedCell seekKey3; private NavigableSet qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR); - private List expected = new ArrayList<>(); + private List expected = new ArrayList<>(); private long id = EnvironmentEdgeManager.currentTime(); private Get get = new Get(row); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 83856cda5135..b009974f5e6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -76,12 +76,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -882,9 +882,9 @@ private static StoreFlushContext flushStore(HStore store, long id) throws IOExce * Generate a list of KeyValues for testing based on given parameters * @return the rows key-value list */ - private List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, + private List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, byte[] family) { - List kvList = new ArrayList<>(); + List kvList = new ArrayList<>(); for (int i = 1; i <= numRows; i++) { byte[] b = Bytes.toBytes(i); for (long timestamp : timestamps) { @@ -905,15 +905,15 @@ public void testMultipleTimestamps() throws IOException { init(this.name.getMethodName()); - List kvList1 = getKeyValueSet(timestamps1, numRows, qf1, family); - for (Cell kv : kvList1) { + List kvList1 = getKeyValueSet(timestamps1, numRows, qf1, family); + for (ExtendedCell kv : kvList1) { this.store.add(kv, null); } flushStore(store, id++); - List kvList2 = getKeyValueSet(timestamps2, numRows, qf1, family); - for (Cell kv : kvList2) { + List kvList2 = getKeyValueSet(timestamps2, numRows, qf1, family); + for (ExtendedCell kv : kvList2) { this.store.add(kv, null); } @@ -1200,34 +1200,37 @@ private long countMemStoreScanner(StoreScanner scanner) { public void testNumberOfMemStoreScannersAfterFlush() throws IOException { long seqId = 100; long timestamp = EnvironmentEdgeManager.currentTime(); - Cell cell0 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); + ExtendedCell cell0 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell0, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Collections.emptyList()); - Cell cell1 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); + ExtendedCell cell1 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell1, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1)); seqId = 101; timestamp = EnvironmentEdgeManager.currentTime(); - Cell cell2 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family) - .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); + ExtendedCell cell2 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family) + .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell2, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1, cell2)); } - private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeSnapshot, - List inputCellsAfterSnapshot) throws IOException { + private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeSnapshot, + List inputCellsAfterSnapshot) throws IOException { init(this.name.getMethodName() + "-" + inputCellsBeforeSnapshot.size()); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); long seqId = Long.MIN_VALUE; - for (Cell c : inputCellsBeforeSnapshot) { + for (ExtendedCell c : inputCellsBeforeSnapshot) { quals.add(CellUtil.cloneQualifier(c)); seqId = Math.max(seqId, c.getSequenceId()); } - for (Cell c : inputCellsAfterSnapshot) { + for (ExtendedCell c : inputCellsAfterSnapshot) { quals.add(CellUtil.cloneQualifier(c)); seqId = Math.max(seqId, c.getSequenceId()); } @@ -1260,17 +1263,16 @@ private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeS } } - private Cell createCell(byte[] qualifier, long ts, long sequenceId, byte[] value) + private ExtendedCell createCell(byte[] qualifier, long ts, long sequenceId, byte[] value) throws IOException { return createCell(row, qualifier, ts, sequenceId, value); } - private Cell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, byte[] value) - throws IOException { - Cell c = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qualifier).setTimestamp(ts).setType(Cell.Type.Put).setValue(value).build(); - PrivateCellUtil.setSequenceId(c, sequenceId); - return c; + private ExtendedCell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, + byte[] value) throws IOException { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) + .setFamily(family).setQualifier(qualifier).setTimestamp(ts).setType(Cell.Type.Put) + .setValue(value).setSequenceId(sequenceId).build(); } @Test @@ -1988,8 +1990,8 @@ public void testCompactingMemStoreNoCellButDataSizeExceedsInmemoryFlushSize() byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell); int largeCellByteSize = MutableSegment.getCellLength(largeCell); int flushByteSize = smallCellByteSize + largeCellByteSize - 2; @@ -2035,7 +2037,7 @@ public void testCompactingMemStoreNoCellButDataSizeExceedsInmemoryFlushSize() for (int i = 0; i < 100; i++) { long currentTimestamp = timestamp + 100 + i; - Cell cell = createCell(qf2, currentTimestamp, seqId, largeValue); + ExtendedCell cell = createCell(qf2, currentTimestamp, seqId, largeValue); store.add(cell, new NonThreadSafeMemStoreSizing()); } } finally { @@ -2064,7 +2066,7 @@ public void testCompactingMemStoreCellExceedInmemoryFlushSize() throws Exception MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); long timestamp = EnvironmentEdgeManager.currentTime(); long seqId = 100; - Cell cell = createCell(qf1, timestamp, seqId, value); + ExtendedCell cell = createCell(qf1, timestamp, seqId, value); int cellByteSize = MutableSegment.getCellLength(cell); store.add(cell, memStoreSizing); assertTrue(memStoreSizing.getCellsCount() == 1); @@ -2089,9 +2091,9 @@ public void testForceCloneOfBigCellForCellChunkImmutableSegment() throws Excepti final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; final byte[] rowKey1 = Bytes.toBytes("rowKey1"); - final Cell originalCell1 = createCell(rowKey1, qf1, timestamp, seqId, cellValue); + final ExtendedCell originalCell1 = createCell(rowKey1, qf1, timestamp, seqId, cellValue); final byte[] rowKey2 = Bytes.toBytes("rowKey2"); - final Cell originalCell2 = createCell(rowKey2, qf1, timestamp, seqId, cellValue); + final ExtendedCell originalCell2 = createCell(rowKey2, qf1, timestamp, seqId, cellValue); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(qf1); @@ -2124,9 +2126,9 @@ public void testForceCloneOfBigCellForCellChunkImmutableSegment() throws Excepti StoreScanner storeScanner = (StoreScanner) store.getScanner(new Scan(new Get(rowKey1)), quals, seqId + 1); SegmentScanner segmentScanner = getTypeKeyValueScanner(storeScanner, SegmentScanner.class); - Cell resultCell1 = segmentScanner.next(); + ExtendedCell resultCell1 = segmentScanner.next(); assertTrue(CellUtil.equals(resultCell1, originalCell1)); - int cell1ChunkId = ((ExtendedCell) resultCell1).getChunkId(); + int cell1ChunkId = resultCell1.getChunkId(); assertTrue(cell1ChunkId != ExtendedCell.CELL_NOT_BASED_ON_CHUNK); assertNull(segmentScanner.next()); segmentScanner.close(); @@ -2230,7 +2232,7 @@ private void doWriteTestLargeCellAndSmallCellConcurrently(IntBinaryOperator getF try { for (int i = 1; i <= MyCompactingMemStore3.CELL_COUNT; i++) { long currentTimestamp = timestamp + i; - Cell cell = createCell(qf1, currentTimestamp, seqId, smallValue); + ExtendedCell cell = createCell(qf1, currentTimestamp, seqId, smallValue); totalCellByteSize.addAndGet(MutableSegment.getCellLength(cell)); store.add(cell, memStoreSizing); } @@ -2261,7 +2263,7 @@ private void doWriteTestLargeCellAndSmallCellConcurrently(IntBinaryOperator getF Thread.currentThread().setName(MyCompactingMemStore3.LARGE_CELL_THREAD_NAME); for (int i = 1; i <= MyCompactingMemStore3.CELL_COUNT; i++) { long currentTimestamp = timestamp + i; - Cell cell = createCell(qf2, currentTimestamp, seqId, largeValue); + ExtendedCell cell = createCell(qf2, currentTimestamp, seqId, largeValue); totalCellByteSize.addAndGet(MutableSegment.getCellLength(cell)); store.add(cell, memStoreSizing); } @@ -2328,8 +2330,8 @@ public void testFlattenAndSnapshotCompactingMemStoreConcurrently() throws Except byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell); int largeCellByteSize = MutableSegment.getCellLength(largeCell); int totalCellByteSize = (smallCellByteSize + largeCellByteSize); @@ -2431,8 +2433,8 @@ public void testFlattenSnapshotWriteCompactingMemeStoreConcurrently() throws Exc byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell); int largeCellByteSize = MutableSegment.getCellLength(largeCell); int firstWriteCellByteSize = (smallCellByteSize + largeCellByteSize); @@ -2453,8 +2455,8 @@ public void testFlattenSnapshotWriteCompactingMemeStoreConcurrently() throws Exc store.add(largeCell, new NonThreadSafeMemStoreSizing()); final AtomicReference exceptionRef = new AtomicReference(); - final Cell writeAgainCell1 = createCell(qf3, timestamp, seqId + 1, largeValue); - final Cell writeAgainCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); + final ExtendedCell writeAgainCell1 = createCell(qf3, timestamp, seqId + 1, largeValue); + final ExtendedCell writeAgainCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); final int writeAgainCellByteSize = MutableSegment.getCellLength(writeAgainCell1) + MutableSegment.getCellLength(writeAgainCell2); final Thread writeAgainThread = new Thread(() -> { @@ -2533,8 +2535,8 @@ public void testClearSnapshotGetScannerConcurrently() throws Exception { byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(qf1); quals.add(qf2); @@ -2683,15 +2685,14 @@ public CustomDefaultMemStore(Configuration conf, CellComparator c, */ @Test public void testMemoryLeakWhenFlushMemStoreRetrying() throws Exception { - Configuration conf = HBaseConfiguration.create(); byte[] smallValue = new byte[3]; byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell = createCell(qf2, timestamp, seqId, largeValue); TreeSet quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(qf1); quals.add(qf2); @@ -2798,12 +2799,12 @@ public void testImmutableMemStoreLABRefCnt() throws Exception { byte[] largeValue = new byte[9]; final long timestamp = EnvironmentEdgeManager.currentTime(); final long seqId = 100; - final Cell smallCell1 = createCell(qf1, timestamp, seqId, smallValue); - final Cell largeCell1 = createCell(qf2, timestamp, seqId, largeValue); - final Cell smallCell2 = createCell(qf3, timestamp, seqId + 1, smallValue); - final Cell largeCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); - final Cell smallCell3 = createCell(qf5, timestamp, seqId + 2, smallValue); - final Cell largeCell3 = createCell(qf6, timestamp, seqId + 2, largeValue); + final ExtendedCell smallCell1 = createCell(qf1, timestamp, seqId, smallValue); + final ExtendedCell largeCell1 = createCell(qf2, timestamp, seqId, largeValue); + final ExtendedCell smallCell2 = createCell(qf3, timestamp, seqId + 1, smallValue); + final ExtendedCell largeCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); + final ExtendedCell smallCell3 = createCell(qf5, timestamp, seqId + 2, smallValue); + final ExtendedCell largeCell3 = createCell(qf6, timestamp, seqId + 2, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell1); int largeCellByteSize = MutableSegment.getCellLength(largeCell1); @@ -3179,7 +3180,8 @@ protected boolean checkAndAddToActiveSize(MutableSegment currActive, Cell cellTo } @Override - protected void doAdd(MutableSegment currentActive, Cell cell, MemStoreSizing memstoreSizing) { + protected void doAdd(MutableSegment currentActive, ExtendedCell cell, + MemStoreSizing memstoreSizing) { if (Thread.currentThread().getName().equals(SMALL_CELL_THREAD_NAME)) { try { /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java index d866acd42a4b..fea25b424e10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -58,16 +59,16 @@ public class TestKeyValueHeap { private byte[] col5 = Bytes.toBytes("col5"); // Variable name encoding. kv - Cell kv111 = new KeyValue(row1, fam1, col1, data); - Cell kv112 = new KeyValue(row1, fam1, col2, data); - Cell kv113 = new KeyValue(row1, fam1, col3, data); - Cell kv114 = new KeyValue(row1, fam1, col4, data); - Cell kv115 = new KeyValue(row1, fam1, col5, data); - Cell kv121 = new KeyValue(row1, fam2, col1, data); - Cell kv122 = new KeyValue(row1, fam2, col2, data); - Cell kv211 = new KeyValue(row2, fam1, col1, data); - Cell kv212 = new KeyValue(row2, fam1, col2, data); - Cell kv213 = new KeyValue(row2, fam1, col3, data); + ExtendedCell kv111 = new KeyValue(row1, fam1, col1, data); + ExtendedCell kv112 = new KeyValue(row1, fam1, col2, data); + ExtendedCell kv113 = new KeyValue(row1, fam1, col3, data); + ExtendedCell kv114 = new KeyValue(row1, fam1, col4, data); + ExtendedCell kv115 = new KeyValue(row1, fam1, col5, data); + ExtendedCell kv121 = new KeyValue(row1, fam2, col1, data); + ExtendedCell kv122 = new KeyValue(row1, fam2, col2, data); + ExtendedCell kv211 = new KeyValue(row2, fam1, col1, data); + ExtendedCell kv212 = new KeyValue(row2, fam1, col2, data); + ExtendedCell kv213 = new KeyValue(row2, fam1, col3, data); TestScanner s1 = new TestScanner(Arrays.asList(kv115, kv211, kv212)); TestScanner s2 = new TestScanner(Arrays.asList(kv111, kv112)); @@ -121,7 +122,7 @@ public void testSeek() throws IOException { // Creating KeyValueHeap try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) { - Cell seekKv = new KeyValue(row2, fam1, null, null); + ExtendedCell seekKv = new KeyValue(row2, fam1, null, null); kvh.seek(seekKv); List actual = Arrays.asList(kvh.peek()); @@ -195,8 +196,8 @@ public void testScannerException() throws IOException { @Test public void testPriorityId() throws IOException { - Cell kv113A = new KeyValue(row1, fam1, col3, Bytes.toBytes("aaa")); - Cell kv113B = new KeyValue(row1, fam1, col3, Bytes.toBytes("bbb")); + ExtendedCell kv113A = new KeyValue(row1, fam1, col3, Bytes.toBytes("aaa")); + ExtendedCell kv113B = new KeyValue(row1, fam1, col3, Bytes.toBytes("bbb")); TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 1); TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 2); List expected = Arrays.asList(kv111, kv112, kv113B, kv113A); @@ -212,11 +213,11 @@ private static class TestScanner extends CollectionBackedScanner { private boolean closed = false; private long scannerOrder = 0; - public TestScanner(List list) { + public TestScanner(List list) { super(list); } - public TestScanner(List list, long scannerOrder) { + public TestScanner(List list, long scannerOrder) { this(list); this.scannerOrder = scannerOrder; } @@ -240,7 +241,7 @@ private static class SeekTestScanner extends TestScanner { private int closedNum = 0; private boolean realSeekDone = true; - public SeekTestScanner(List list) { + public SeekTestScanner(List list) { super(list); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java index e91085edd2f0..cd91d39c77b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; @@ -333,7 +334,7 @@ public void testForceCopyOfBigCellInto() { } private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName, - Cell cellToCopyInto) { + ExtendedCell cellToCopyInto) { Thread thread = new Thread() { volatile boolean stopped = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java index f8d09a2c9c15..9b32558edf12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java @@ -28,7 +28,7 @@ import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; @@ -153,7 +153,7 @@ public void testLABChunkQueueWithMultipleMSLABs() throws Exception { } private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName, - Cell cellToCopyInto) { + ExtendedCell cellToCopyInto) { Thread thread = new Thread() { volatile boolean stopped = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 11ff28201801..54401fa578b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -255,7 +256,7 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { Put put = new Put(key); put.addColumn(FAMILY, COLUMN, value); for (Cell c : put.get(FAMILY, COLUMN)) { - writer.append(c); + writer.append((ExtendedCell) c); } writer.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index a51253e9dd7c..253fcc99bd0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.apache.hadoop.hbase.CellUtil.createCell; import static org.apache.hadoop.hbase.KeyValueTestUtil.create; import static org.apache.hadoop.hbase.regionserver.KeyValueScanFixture.scanFixture; import static org.junit.Assert.assertEquals; @@ -36,9 +35,12 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -112,32 +114,74 @@ public class TestStoreScanner { * to test scan does the right thing as it we do Gets, StoreScanner#optimize, and what we do on * (faked) block boundaries. */ - private static final Cell[] CELL_GRID = - new Cell[] { createCell(ONE, CF, ONE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(ONE, CF, TWO, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(ONE, CF, THREE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(ONE, CF, FOUR, 1L, KeyValue.Type.Put.getCode(), VALUE), - // Offset 4 CELL_GRID_BLOCK2_BOUNDARY - createCell(TWO, CF, ONE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO, CF, TWO, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO, CF, THREE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO, CF, FOUR, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO_POINT_TWO, CF, ZERO, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO_POINT_TWO, CF, ZERO_POINT_ZERO, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO_POINT_TWO, CF, FIVE, 1L, KeyValue.Type.Put.getCode(), VALUE), - // Offset 11! CELL_GRID_BLOCK3_BOUNDARY - createCell(THREE, CF, ONE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(THREE, CF, TWO, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(THREE, CF, THREE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(THREE, CF, FOUR, 1L, KeyValue.Type.Put.getCode(), VALUE), - // Offset 15 CELL_GRID_BLOCK4_BOUNDARY - createCell(FOUR, CF, ONE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(FOUR, CF, TWO, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(FOUR, CF, THREE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(FOUR, CF, FOUR, 1L, KeyValue.Type.Put.getCode(), VALUE), - // Offset 19 CELL_GRID_BLOCK5_BOUNDARY - createCell(FOUR, CF, FIVE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(FIVE, CF, ZERO, 1L, KeyValue.Type.Put.getCode(), VALUE), }; + private static final ExtendedCell[] CELL_GRID = new ExtendedCell[] { + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 4 CELL_GRID_BLOCK2_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO).setFamily(CF) + .setQualifier(ZERO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO).setFamily(CF) + .setQualifier(ZERO_POINT_ZERO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()) + .setValue(VALUE).build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO).setFamily(CF) + .setQualifier(FIVE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 11! CELL_GRID_BLOCK3_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 15 CELL_GRID_BLOCK4_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 19 CELL_GRID_BLOCK5_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(FIVE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FIVE).setFamily(CF) + .setQualifier(ZERO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), }; private static class KeyValueHeapWithCount extends KeyValueHeap { @@ -150,7 +194,7 @@ public KeyValueHeapWithCount(List scanners, } @Override - public Cell peek() { + public ExtendedCell peek() { this.count.incrementAndGet(); return super.peek(); } @@ -187,7 +231,7 @@ protected KeyValueHeap newKVHeap(List scanners, } @Override - protected boolean trySkipToNextRow(Cell cell) throws IOException { + protected boolean trySkipToNextRow(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextRow(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -198,7 +242,7 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { } @Override - protected boolean trySkipToNextColumn(Cell cell) throws IOException { + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -209,7 +253,7 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { // Fake block boundaries by having index of next block change as we go through scan. return count.get() > CELL_GRID_BLOCK4_BOUNDARY ? PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK5_BOUNDARY]) @@ -223,14 +267,26 @@ public Cell getNextIndexedKey() { private static final int CELL_WITH_VERSIONS_BLOCK2_BOUNDARY = 4; - private static final Cell[] CELL_WITH_VERSIONS = - new Cell[] { createCell(ONE, CF, ONE, 2L, KeyValue.Type.Put.getCode(), VALUE), - createCell(ONE, CF, ONE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(ONE, CF, TWO, 2L, KeyValue.Type.Put.getCode(), VALUE), - createCell(ONE, CF, TWO, 1L, KeyValue.Type.Put.getCode(), VALUE), - // Offset 4 CELL_WITH_VERSIONS_BLOCK2_BOUNDARY - createCell(TWO, CF, ONE, 1L, KeyValue.Type.Put.getCode(), VALUE), - createCell(TWO, CF, TWO, 1L, KeyValue.Type.Put.getCode(), VALUE), }; + private static final ExtendedCell[] CELL_WITH_VERSIONS = new ExtendedCell[] { + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(ONE).setTimestamp(2L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(TWO).setTimestamp(2L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 4 CELL_WITH_VERSIONS_BLOCK2_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), }; private static class CellWithVersionsStoreScanner extends StoreScanner { // Count of how often optimize is called and of how often it does an optimize. @@ -243,7 +299,7 @@ Arrays. asList(new KeyValueScanner[] { } @Override - protected boolean trySkipToNextColumn(Cell cell) throws IOException { + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -254,7 +310,7 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { // Fake block boundaries by having index of next block change as we go through scan. return PrivateCellUtil .createFirstOnRow(CELL_WITH_VERSIONS[CELL_WITH_VERSIONS_BLOCK2_BOUNDARY]); @@ -272,7 +328,7 @@ Arrays. asList(new KeyValueScanner[] { } @Override - protected boolean trySkipToNextColumn(Cell cell) throws IOException { + protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) + ", optimized=" + optimized); @@ -283,7 +339,7 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { } @Override - public Cell getNextIndexedKey() { + public ExtendedCell getNextIndexedKey() { return null; } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 286b707ae8cc..7decfc6da124 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; @@ -64,8 +65,8 @@ public void testNeverIncludeFakeCell() throws IOException { new ScanInfo(this.conf, fam2, 10, 1, ttl, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), get.getFamilyMap().get(fam2), now - ttl, now, null); - Cell kv = new KeyValue(row1, fam2, col2, 1, data); - Cell cell = PrivateCellUtil.createLastOnRowCol(kv); + ExtendedCell kv = new KeyValue(row1, fam2, col2, 1, data); + ExtendedCell cell = PrivateCellUtil.createLastOnRowCol(kv); qm.setToNewRow(kv); MatchCode code = qm.match(cell); assertFalse(code.compareTo(MatchCode.SEEK_NEXT_COL) != 0); @@ -392,7 +393,7 @@ scanWithFilter, new ScanInfo(this.conf, fam2, 0, 5, ttl, KeepDeletedCells.FALSE, // For last cell, the query matcher will return SEEK_NEXT_COL, and the // ColumnTracker will skip to the next column, which is col4. - Cell lastCell = memstore.get(memstore.size() - 1); + ExtendedCell lastCell = memstore.get(memstore.size() - 1); Cell nextCell = qm.getKeyForNextColumn(lastCell); assertArrayEquals(nextCell.getQualifierArray(), col4); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 2ef7ece92db8..6069283dd57c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -50,6 +50,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -506,7 +507,7 @@ public Object run() throws Exception { final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override - protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreSizing) { + protected void restoreEdit(HStore s, ExtendedCell cell, MemStoreSizing memstoreSizing) { super.restoreEdit(s, cell, memstoreSizing); countOfRestoredEdits.incrementAndGet(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index 9d4257e1aa7d..f6adfbb394bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -112,7 +113,7 @@ public static void createHFile(Configuration configuration, FileSystem fs, Path try { // subtract 2 since iterateOnSplits doesn't include boundary keys for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) { - Cell kv = new KeyValue(key, family, qualifier, now, key); + ExtendedCell kv = new KeyValue(key, family, qualifier, now, key); if (withTag) { // add a tag. Arbitrarily chose mob tag since we have a helper already. Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key);