diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index f4cccfd03b04..0989f73df0a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -92,7 +92,7 @@ protected boolean isTop() { } @Override - public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, + protected HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, final boolean isCompaction) { final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { @@ -283,7 +283,7 @@ public Optional getLastKey() { return super.getLastKey(); } // Get a scanner that caches the block and that uses pread. - HFileScanner scanner = getScanner(true, true); + HFileScanner scanner = getScanner(true, true, false); try { if (scanner.seekBefore(this.splitCell)) { return Optional.ofNullable(scanner.getKey()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 09c379227bda..4f872d7084e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -183,31 +183,9 @@ void readCompleted() { } /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level - * concepts. - * @param cacheBlocks should we cache the blocks? - * @param pread use pread (for concurrent small readers) - * @return the underlying HFileScanner - * @see HBASE-15296 + * Will be overridden in HalfStoreFileReader */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { - return getScanner(cacheBlocks, pread, false); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level - * concepts. should we cache the blocks? use pread (for concurrent small readers) is - * scanner being used for compaction? - * @return the underlying HFileScanner - * @see HBASE-15296 - */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { + protected HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { return reader.getScanner(conf, cacheBlocks, pread, isCompaction); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 9b4e1aea9066..24578417ef34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.AsyncAdmin; @@ -74,7 +75,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.HFileLink; -import org.apache.hadoop.hbase.io.HalfStoreFileReader; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -83,11 +83,12 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileInfo; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; +import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; @@ -757,6 +758,41 @@ static void splitStoreFile(AsyncTableRegionLocator loc, Configuration conf, Path copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc, loc); } + private static StoreFileWriter initStoreFileWriter(Configuration conf, Cell cell, + HFileContext hFileContext, CacheConfig cacheConf, BloomType bloomFilterType, FileSystem fs, + Path outFile, AsyncTableRegionLocator loc) throws IOException { + if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { + byte[] rowKey = CellUtil.cloneRow(cell); + HRegionLocation hRegionLocation = FutureUtils.get(loc.getRegionLocation(rowKey)); + InetSocketAddress[] favoredNodes = null; + if (null == hRegionLocation) { + LOG.warn("Failed get region location for rowkey {} , Using writer without favoured nodes.", + Bytes.toString(rowKey)); + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + } else { + LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey)); + InetSocketAddress initialIsa = + new InetSocketAddress(hRegionLocation.getHostname(), hRegionLocation.getPort()); + if (initialIsa.isUnresolved()) { + LOG.warn("Failed get location for region {} , Using writer without favoured nodes.", + hRegionLocation); + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + } else { + LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString()); + favoredNodes = new InetSocketAddress[] { initialIsa }; + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext) + .withFavoredNodes(favoredNodes).build(); + } + } + } else { + return new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + } + } + /** * Copy half of an HFile into a new HFile with favored nodes. */ @@ -765,14 +801,14 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, throws IOException { FileSystem fs = inFile.getFileSystem(conf); CacheConfig cacheConf = CacheConfig.DISABLED; - HalfStoreFileReader halfReader = null; + StoreFileReader halfReader = null; StoreFileWriter halfWriter = null; try { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build(); StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, fs.getFileStatus(inFile), reference); storeFileInfo.initHFileInfo(context); - halfReader = (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConf); + halfReader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(halfReader.getHFileReader()); Map fileInfo = halfReader.loadFileInfo(); @@ -785,51 +821,22 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .withCreateTime(EnvironmentEdgeManager.currentTime()).build(); - HFileScanner scanner = halfReader.getScanner(false, false, false); - scanner.seekTo(); - do { - final Cell cell = scanner.getCell(); - if (null != halfWriter) { - halfWriter.append(cell); - } else { - - // init halfwriter - if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { - byte[] rowKey = CellUtil.cloneRow(cell); - HRegionLocation hRegionLocation = FutureUtils.get(loc.getRegionLocation(rowKey)); - InetSocketAddress[] favoredNodes = null; - if (null == hRegionLocation) { - LOG.warn( - "Failed get region location for rowkey {} , Using writer without favoured nodes.", - Bytes.toString(rowKey)); - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); - } else { - LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey)); - InetSocketAddress initialIsa = - new InetSocketAddress(hRegionLocation.getHostname(), hRegionLocation.getPort()); - if (initialIsa.isUnresolved()) { - LOG.warn("Failed get location for region {} , Using writer without favoured nodes.", - hRegionLocation); - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); - } else { - LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString()); - favoredNodes = new InetSocketAddress[] { initialIsa }; - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext) - .withFavoredNodes(favoredNodes).build(); - } - } - } else { - halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + try (StoreFileScanner scanner = + halfReader.getStoreFileScanner(false, false, false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + for (;;) { + Cell cell = scanner.next(); + if (cell == null) { + break; + } + if (halfWriter == null) { + // init halfwriter + halfWriter = initStoreFileWriter(conf, cell, hFileContext, cacheConf, bloomFilterType, + fs, outFile, loc); } halfWriter.append(cell); } - - } while (scanner.next()); - + } for (Map.Entry entry : fileInfo.entrySet()) { if (shouldCopyHFileMetaKey(entry.getKey())) { halfWriter.appendFileInfo(entry.getKey(), entry.getValue()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 7dd4cbe44f93..0a41159e3aaa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -124,21 +124,22 @@ private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, Cach (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(halfreader.getHFileReader()); halfreader.loadFileInfo(); - final HFileScanner scanner = halfreader.getScanner(false, false); - - scanner.seekTo(); - Cell curr; - do { - curr = scanner.getCell(); - KeyValue reseekKv = getLastOnCol(curr); - int ret = scanner.reseekTo(reseekKv); - assertTrue("reseek to returned: " + ret, ret > 0); - // System.out.println(curr + ": " + ret); - } while (scanner.next()); - - int ret = scanner.reseekTo(getLastOnCol(curr)); - // System.out.println("Last reseek: " + ret); - assertTrue(ret > 0); + try (HFileScanner scanner = halfreader.getScanner(false, false, false)) { + + scanner.seekTo(); + Cell curr; + do { + curr = scanner.getCell(); + KeyValue reseekKv = getLastOnCol(curr); + int ret = scanner.reseekTo(reseekKv); + assertTrue("reseek to returned: " + ret, ret > 0); + // System.out.println(curr + ": " + ret); + } while (scanner.next()); + + int ret = scanner.reseekTo(getLastOnCol(curr)); + // System.out.println("Last reseek: " + ret); + assertTrue(ret > 0); + } halfreader.close(true); } @@ -222,9 +223,14 @@ private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell se (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConfig); storeFileInfo.getHFileInfo().initMetaAndIndex(halfreader.getHFileReader()); halfreader.loadFileInfo(); - final HFileScanner scanner = halfreader.getScanner(false, false); - scanner.seekBefore(seekBefore); - return scanner.getCell(); + try (HFileScanner scanner = halfreader.getScanner(false, false, false)) { + scanner.seekBefore(seekBefore); + if (scanner.getCell() != null) { + return KeyValueUtil.copyToNewKeyValue(scanner.getCell()); + } else { + return null; + } + } } private KeyValue getLastOnCol(Cell curr) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index c0bc72079cb7..9b43ab32c2c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -62,7 +63,6 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; @@ -316,13 +316,13 @@ public Object answer(InvocationOnMock invocation) throws Throwable { private int count() throws IOException { int count = 0; for (HStoreFile f : this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); - if (!scanner.seekTo()) { - continue; + f.initReader(); + try (StoreFileScanner scanner = f.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } - do { - count++; - } while (scanner.next()); } return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 55320e94a9f9..f4fff4f5cbe4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -99,26 +99,26 @@ public void testHFileScannerThrowsErrors() throws IOException { BloomType.NONE, true); sf.initReader(); StoreFileReader reader = sf.getReader(); - HFileScanner scanner = reader.getScanner(false, true); + try (HFileScanner scanner = reader.getScanner(false, true, false)) { + FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); + assertNotNull(inStream); - FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); - assertNotNull(inStream); + scanner.seekTo(); + // Do at least one successful read + assertTrue(scanner.next()); - scanner.seekTo(); - // Do at least one successful read - assertTrue(scanner.next()); + faultyfs.startFaults(); - faultyfs.startFaults(); - - try { - int scanned = 0; - while (scanner.next()) { - scanned++; + try { + int scanned = 0; + while (scanner.next()) { + scanned++; + } + fail("Scanner didn't throw after faults injected"); + } catch (IOException ioe) { + LOG.info("Got expected exception", ioe); + assertTrue(ioe.getMessage().contains("Fault")); } - fail("Scanner didn't throw after faults injected"); - } catch (IOException ioe) { - LOG.info("Got expected exception", ioe); - assertTrue(ioe.getMessage().contains("Fault")); } reader.close(true); // end of test so evictOnClose } @@ -147,27 +147,32 @@ public void testStoreFileScannerThrowsErrors() throws IOException { Collections.singletonList(sf), false, true, false, false, // 0 is passed as readpoint because this test operates on HStoreFile directly 0); - KeyValueScanner scanner = scanners.get(0); + try { + KeyValueScanner scanner = scanners.get(0); - FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); - assertNotNull(inStream); + FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); + assertNotNull(inStream); - scanner.seek(KeyValue.LOWESTKEY); - // Do at least one successful read - assertNotNull(scanner.next()); - faultyfs.startFaults(); + scanner.seek(KeyValue.LOWESTKEY); + // Do at least one successful read + assertNotNull(scanner.next()); + faultyfs.startFaults(); - try { - int scanned = 0; - while (scanner.next() != null) { - scanned++; + try { + int scanned = 0; + while (scanner.next() != null) { + scanned++; + } + fail("Scanner didn't throw after faults injected"); + } catch (IOException ioe) { + LOG.info("Got expected exception", ioe); + assertTrue(ioe.getMessage().contains("Could not iterate")); + } + } finally { + for (StoreFileScanner scanner : scanners) { + scanner.close(); } - fail("Scanner didn't throw after faults injected"); - } catch (IOException ioe) { - LOG.info("Got expected exception", ioe); - assertTrue(ioe.getMessage().contains("Could not iterate")); } - scanner.close(); } /** @@ -202,13 +207,13 @@ public void testFullSystemBubblesFSErrors() throws Exception { // Load some data util.loadTable(table, fam, false); util.flush(); - util.countRows(table); + HBaseTestingUtil.countRows(table); // Kill the DFS cluster util.getDFSCluster().shutdownDataNodes(); try { - util.countRows(table); + HBaseTestingUtil.countRows(table); fail("Did not fail to count after removing data"); } catch (Exception e) { LOG.info("Got expected error", e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index aa7fb53566df..824c195fd0f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -245,19 +245,20 @@ public void testReference() throws IOException { refHsf.initReader(); // Now confirm that I can read from the reference and that it only gets // keys from top half of the file. - HFileScanner s = refHsf.getReader().getScanner(false, false); - Cell kv = null; - for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { - ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey()); - kv = KeyValueUtil.createKeyValueFromKey(bb); - if (first) { - assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, - midRow.length)); - first = false; + try (HFileScanner s = refHsf.getReader().getScanner(false, false, false)) { + Cell kv = null; + for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { + ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey()); + kv = KeyValueUtil.createKeyValueFromKey(bb); + if (first) { + assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, + midRow.length)); + first = false; + } } + assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, + finalRow.length)); } - assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, - finalRow.length)); } @Test @@ -333,11 +334,12 @@ public void testHFileLink() throws IOException { hsf.initReader(); // Now confirm that I can read from the link - int count = 1; - HFileScanner s = hsf.getReader().getScanner(false, false); - s.seekTo(); - while (s.next()) { - count++; + int count = 0; + try (StoreFileScanner scanner = hsf.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count); } @@ -395,26 +397,25 @@ public void testReferenceToHFileLink() throws IOException { hsfA.initReader(); // Now confirm that I can read from the ref to link - int count = 1; - HFileScanner s = hsfA.getReader().getScanner(false, false); - s.seekTo(); - while (s.next()) { - count++; + int count = 0; + try (StoreFileScanner scanner = hsfA.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } + assertTrue(count > 0); // read some rows here } - assertTrue(count > 0); // read some rows here // Try to open store file from link HStoreFile hsfB = new HStoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE, true); hsfB.initReader(); // Now confirm that I can read from the ref to link - HFileScanner sB = hsfB.getReader().getScanner(false, false); - sB.seekTo(); - - // count++ as seekTo() will advance the scanner - count++; - while (sB.next()) { - count++; + try (StoreFileScanner scanner = hsfB.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } // read the rest of the rows @@ -454,39 +455,41 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f // Now test reading from the top. boolean first = true; ByteBuffer key = null; - HFileScanner topScanner = top.getScanner(false, false); - while ( - (!topScanner.isSeeked() && topScanner.seekTo()) - || (topScanner.isSeeked() && topScanner.next()) - ) { - key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); - - if ( - (PrivateCellUtil.compare(topScanner.getReader().getComparator(), midKV, key.array(), - key.arrayOffset(), key.limit())) > 0 + try (HFileScanner topScanner = top.getScanner(false, false, false)) { + while ( + (!topScanner.isSeeked() && topScanner.seekTo()) + || (topScanner.isSeeked() && topScanner.next()) ) { - fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey); - } - if (first) { - first = false; - LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key))); + key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); + + if ( + (PrivateCellUtil.compare(topScanner.getReader().getComparator(), midKV, key.array(), + key.arrayOffset(), key.limit())) > 0 + ) { + fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey); + } + if (first) { + first = false; + LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key))); + } } } LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key))); first = true; - HFileScanner bottomScanner = bottom.getScanner(false, false); - while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { - previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); - key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); - if (first) { - first = false; - LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous))); + try (HFileScanner bottomScanner = bottom.getScanner(false, false, false)) { + while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { + previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); + key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); + if (first) { + first = false; + LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous))); + } + assertTrue(key.compareTo(bbMidkeyBytes) < 0); + } + if (previous != null) { + LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } - assertTrue(key.compareTo(bbMidkeyBytes) < 0); - } - if (previous != null) { - LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } // Remove references. regionFs.cleanupDaughterRegion(topHri); @@ -507,29 +510,31 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f top = topF.getReader(); // Now read from the top. first = true; - topScanner = top.getScanner(false, false); - KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); - while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { - key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); - keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit()); - assertTrue(PrivateCellUtil.compare(topScanner.getReader().getComparator(), keyOnlyKV, - badmidkey, 0, badmidkey.length) >= 0); - if (first) { - first = false; - KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("First top when key < bottom: " + keyKV); - String tmp = - Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); - for (int i = 0; i < tmp.length(); i++) { - assertTrue(tmp.charAt(i) == 'a'); + try (HFileScanner topScanner = top.getScanner(false, false, false)) { + KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); + while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { + key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); + keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit()); + assertTrue(PrivateCellUtil.compare(topScanner.getReader().getComparator(), keyOnlyKV, + badmidkey, 0, badmidkey.length) >= 0); + if (first) { + first = false; + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("First top when key < bottom: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(tmp.charAt(i) == 'a'); + } } } - } - KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("Last top when key < bottom: " + keyKV); - String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); - for (int i = 0; i < tmp.length(); i++) { - assertTrue(tmp.charAt(i) == 'z'); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("Last top when key < bottom: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(tmp.charAt(i) == 'z'); + } } // Remove references. regionFs.cleanupDaughterRegion(topHri); @@ -545,25 +550,28 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f bottomF.initReader(); bottom = bottomF.getReader(); first = true; - bottomScanner = bottom.getScanner(false, false); - while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { - key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); - if (first) { - first = false; - keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("First bottom when key > top: " + keyKV); - tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); - for (int i = 0; i < tmp.length(); i++) { - assertTrue(tmp.charAt(i) == 'a'); + try (HFileScanner bottomScanner = bottom.getScanner(false, false, false)) { + while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { + key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); + if (first) { + first = false; + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("First bottom when key > top: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(tmp.charAt(i) == 'a'); + } } } - } - keyKV = KeyValueUtil.createKeyValueFromKey(key); - LOG.info("Last bottom when key > top: " + keyKV); - for (int i = 0; i < tmp.length(); i++) { - assertTrue( - Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()).charAt(i) - == 'z'); + KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); + LOG.info("Last bottom when key > top: " + keyKV); + String tmp = + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + for (int i = 0; i < tmp.length(); i++) { + assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()) + .charAt(i) == 'z'); + } } } finally { if (top != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 4c96dc221abf..9ec1ed809316 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; @@ -331,16 +331,21 @@ private void verifyCounts(int countRow1, int countRow2) throws Exception { int count1 = 0; int count2 = 0; for (HStoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); - scanner.seekTo(); - do { - byte[] row = CellUtil.cloneRow(scanner.getCell()); - if (Bytes.equals(row, STARTROW)) { - count1++; - } else if (Bytes.equals(row, secondRowBytes)) { - count2++; + try (StoreFileScanner scanner = f.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + for (Cell cell;;) { + cell = scanner.next(); + if (cell == null) { + break; + } + byte[] row = CellUtil.cloneRow(cell); + if (Bytes.equals(row, STARTROW)) { + count1++; + } else if (Bytes.equals(row, secondRowBytes)) { + count2++; + } } - } while (scanner.next()); + } } assertEquals(countRow1, count1); assertEquals(countRow2, count2); @@ -349,13 +354,12 @@ private void verifyCounts(int countRow1, int countRow2) throws Exception { private int count() throws IOException { int count = 0; for (HStoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); - if (!scanner.seekTo()) { - continue; + try (StoreFileScanner scanner = f.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + while (scanner.next() != null) { + count++; + } } - do { - count++; - } while (scanner.next()); } return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index f7c65b02d8ba..68c6b6434c4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TestMetaTableAccessor; import org.apache.hadoop.hbase.client.Consistency; @@ -43,7 +44,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -501,16 +501,19 @@ public void testVerifySecondaryAbilityToReadWithOnFiles() throws Exception { // Our file does not exist anymore. was moved by the compaction above. LOG.debug(Boolean.toString(getRS().getFileSystem().exists(sf.getPath()))); Assert.assertFalse(getRS().getFileSystem().exists(sf.getPath())); - - HFileScanner scanner = sf.getReader().getScanner(false, false); - scanner.seekTo(); - do { - keys++; - - Cell cell = scanner.getCell(); - sum += Integer - .parseInt(Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); - } while (scanner.next()); + sf.initReader(); + try (StoreFileScanner scanner = sf.getPreadScanner(false, Long.MAX_VALUE, 0, false)) { + scanner.seek(KeyValue.LOWESTKEY); + for (Cell cell;;) { + cell = scanner.next(); + if (cell == null) { + break; + } + keys++; + sum += Integer.parseInt( + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + } + } } Assert.assertEquals(3000, keys); Assert.assertEquals(4498500, sum);