Skip to content

Commit

Permalink
HBASE-28065 Corrupt HFile data is mishandled in several cases
Browse files Browse the repository at this point in the history
* when no block size is provided and there's not a preread headerBuf, treat the value with
  caution.
* verify HBase checksums before making use of the block header.
* inline verifyOnDiskSizeMatchesHeader to keep throw/return logic in the method body.
* separate validation of onDiskSizeWithHeader as input parameter from as read from block header
* simplify branching around fetching and populating onDiskSizeWithHeader.
* inline retrieving nextOnDiskBlockSize ; add basic validation.
* whenever a read is determined to be corrupt and fallback to HDFS checksum is necessary, also
  invalidate the cached value of headerBuf.
* build out a test suite covering various forms of block header corruption, for blocks in first
  and second positions.

Signed-off-by: Bryan Beaudreault <[email protected]>
  • Loading branch information
ndimiduk authored Sep 21, 2023
1 parent dad1f70 commit 3ddadcf
Show file tree
Hide file tree
Showing 4 changed files with 640 additions and 52 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -392,12 +392,12 @@ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final

/**
* Parse total on disk size including header and checksum.
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
* @param verifyChecksum true if checksum verification is in use.
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
* @param checksumSupport true if checksum verification is in use.
* @return Size of the block with header included.
*/
private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean verifyChecksum) {
return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(verifyChecksum);
private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean checksumSupport) {
return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(checksumSupport);
}

/**
Expand Down Expand Up @@ -1597,33 +1597,48 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean
}

/**
* Returns Check <code>onDiskSizeWithHeaderL</code> size is healthy and then return it as an int
* Check that {@code value} read from a block header seems reasonable, within a large margin of
* error.
* @return {@code true} if the value is safe to proceed, {@code false} otherwise.
*/
private static int checkAndGetSizeAsInt(final long onDiskSizeWithHeaderL, final int hdrSize)
throws IOException {
if (
(onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1)
|| onDiskSizeWithHeaderL >= Integer.MAX_VALUE
) {
throw new IOException(
"Invalid onDisksize=" + onDiskSizeWithHeaderL + ": expected to be at least " + hdrSize
+ " and at most " + Integer.MAX_VALUE + ", or -1");
private boolean checkOnDiskSizeWithHeader(int value) {
if (value < 0) {
if (LOG.isTraceEnabled()) {
LOG.trace(
"onDiskSizeWithHeader={}; value represents a size, so it should never be negative.",
value);
}
return false;
}
if (value - hdrSize < 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("onDiskSizeWithHeader={}, hdrSize={}; don't accept a value that is negative"
+ " after the header size is excluded.", value, hdrSize);
}
return false;
}
return (int) onDiskSizeWithHeaderL;
return true;
}

/**
* Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something is
* not right.
* Check that {@code value} provided by the calling context seems reasonable, within a large
* margin of error.
* @return {@code true} if the value is safe to proceed, {@code false} otherwise.
*/
private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf,
final long offset, boolean verifyChecksum) throws IOException {
// Assert size provided aligns with what is in the header
int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum);
if (passedIn != fromHeader) {
throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader
+ ", offset=" + offset + ", fileContext=" + this.fileContext);
private boolean checkCallerProvidedOnDiskSizeWithHeader(long value) {
// same validation logic as is used by Math.toIntExact(long)
int intValue = (int) value;
if (intValue != value) {
if (LOG.isTraceEnabled()) {
LOG.trace("onDiskSizeWithHeaderL={}; value exceeds int size limits.", value);
}
return false;
}
if (intValue == -1) {
// a magic value we expect to see.
return true;
}
return checkOnDiskSizeWithHeader(intValue);
}

/**
Expand Down Expand Up @@ -1654,14 +1669,16 @@ private void cacheNextBlockHeader(final long offset, ByteBuff onDiskBlock,
this.prefetchedHeader.set(ph);
}

private int getNextBlockOnDiskSize(boolean readNextHeader, ByteBuff onDiskBlock,
int onDiskSizeWithHeader) {
int nextBlockOnDiskSize = -1;
if (readNextHeader) {
nextBlockOnDiskSize =
onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) + hdrSize;
}
return nextBlockOnDiskSize;
/**
* Clear the cached value when its integrity is suspect.
*/
private void invalidateNextBlockHeader() {
prefetchedHeader.set(null);
}

private int getNextBlockOnDiskSize(ByteBuff onDiskBlock, int onDiskSizeWithHeader) {
return onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH)
+ hdrSize;
}

private ByteBuff allocate(int size, boolean intoHeap) {
Expand All @@ -1687,17 +1704,21 @@ private ByteBuff allocate(int size, boolean intoHeap) {
protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics,
boolean intoHeap) throws IOException {
final Span span = Span.current();
final AttributesBuilder attributesBuilder = Attributes.builder();
Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY))
.ifPresent(c -> c.accept(attributesBuilder));
if (offset < 0) {
throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize="
+ onDiskSizeWithHeaderL + ")");
}
if (!checkCallerProvidedOnDiskSizeWithHeader(onDiskSizeWithHeaderL)) {
LOG.trace("Caller provided invalid onDiskSizeWithHeaderL={}", onDiskSizeWithHeaderL);
onDiskSizeWithHeaderL = -1;
}
int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL;

final Span span = Span.current();
final AttributesBuilder attributesBuilder = Attributes.builder();
Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY))
.ifPresent(c -> c.accept(attributesBuilder));
int onDiskSizeWithHeader = checkAndGetSizeAsInt(onDiskSizeWithHeaderL, hdrSize);
// Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1
// Try to use the cached header. Will serve us in rare case where onDiskSizeWithHeaderL==-1
// and will save us having to seek the stream backwards to reread the header we
// read the last time through here.
ByteBuff headerBuf = getCachedHeader(offset);
Expand All @@ -1711,8 +1732,8 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
// file has support for checksums (version 2+).
boolean checksumSupport = this.fileContext.isUseHBaseChecksum();
long startTime = EnvironmentEdgeManager.currentTime();
if (onDiskSizeWithHeader <= 0) {
// We were not passed the block size. Need to get it from the header. If header was
if (onDiskSizeWithHeader == -1) {
// The caller does not know the block size. Need to get it from the header. If header was
// not cached (see getCachedHeader above), need to seek to pull it in. This is costly
// and should happen very rarely. Currently happens on open of a hfile reader where we
// read the trailer blocks to pull in the indices. Otherwise, we are reading block sizes
Expand All @@ -1729,6 +1750,19 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
}
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
}

// The common case is that onDiskSizeWithHeader was produced by a read without checksum
// validation, so give it a sanity check before trying to use it.
if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeader)) {
if (verifyChecksum) {
invalidateNextBlockHeader();
span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build());
return null;
} else {
throw new IOException("Invalid onDiskSizeWithHeader=" + onDiskSizeWithHeader);
}
}

int preReadHeaderSize = headerBuf == null ? 0 : hdrSize;
// Allocate enough space to fit the next block's header too; saves a seek next time through.
// onDiskBlock is whole block + header + checksums then extra hdrSize to read next header;
Expand All @@ -1745,19 +1779,49 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
boolean readNextHeader = readAtOffset(is, onDiskBlock,
onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread);
onDiskBlock.rewind(); // in case of moving position when copying a cached header
int nextBlockOnDiskSize =
getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader);

// the call to validateChecksum for this block excludes the next block header over-read, so
// no reason to delay extracting this value.
int nextBlockOnDiskSize = -1;
if (readNextHeader) {
int parsedVal = getNextBlockOnDiskSize(onDiskBlock, onDiskSizeWithHeader);
if (checkOnDiskSizeWithHeader(parsedVal)) {
nextBlockOnDiskSize = parsedVal;
}
}
if (headerBuf == null) {
headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize);
}
// Do a few checks before we go instantiate HFileBlock.
assert onDiskSizeWithHeader > this.hdrSize;
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, checksumSupport);

ByteBuff curBlock = onDiskBlock.duplicate().position(0).limit(onDiskSizeWithHeader);
// Verify checksum of the data before using it for building HFileBlock.
if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) {
invalidateNextBlockHeader();
span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build());
return null;
}

// TODO: is this check necessary or can we proceed with a provided value regardless of
// what is in the header?
int fromHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
if (onDiskSizeWithHeader != fromHeader) {
if (LOG.isTraceEnabled()) {
LOG.trace("Passed in onDiskSizeWithHeader={} != {}, offset={}, fileContext={}",
onDiskSizeWithHeader, fromHeader, offset, this.fileContext);
}
if (checksumSupport && verifyChecksum) {
// This file supports HBase checksums and verification of those checksums was
// requested. The block size provided by the caller (presumably from the block index)
// does not match the block size written to the block header. treat this as
// HBase-checksum failure.
span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build());
invalidateNextBlockHeader();
return null;
}
throw new IOException("Passed in onDiskSizeWithHeader=" + onDiskSizeWithHeader + " != "
+ fromHeader + ", offset=" + offset + ", fileContext=" + this.fileContext);
}

// remove checksum from buffer now that it's verified
int sizeWithoutChecksum = curBlock.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
curBlock.limit(sizeWithoutChecksum);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public class TestChecksum {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestChecksum.class);

private static final Logger LOG = LoggerFactory.getLogger(TestHFileBlock.class);
private static final Logger LOG = LoggerFactory.getLogger(TestChecksum.class);

static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { NONE, GZ };

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,7 @@ public void testReaderWithoutBlockCache() throws Exception {
fillByteBuffAllocator(alloc, bufCount);
// start write to store file.
Path path = writeStoreFile();
try {
readStoreFile(path, conf, alloc);
} catch (Exception e) {
// fail test
assertTrue(false);
}
readStoreFile(path, conf, alloc);
Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
alloc.clean();
}
Expand Down
Loading

0 comments on commit 3ddadcf

Please sign in to comment.