diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java index b343592c4151..da3d97547645 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestTooLargeLog.java @@ -36,9 +36,11 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.ServerType; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; @@ -61,27 +63,42 @@ public class TestTooLargeLog { @BeforeClass public static void setUpBeforeClass() throws Exception { + // Slow log needs to be enabled initially to spin up the SlowLogQueueService TEST_UTIL.getConfiguration().setBoolean(HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, true); - TEST_UTIL.getConfiguration().setInt("hbase.ipc.warn.response.size", 100); + TEST_UTIL.getConfiguration().setInt("hbase.ipc.warn.response.size", + HConstants.DEFAULT_BLOCKSIZE / 2); TEST_UTIL.startMiniCluster(1); ADMIN = TEST_UTIL.getAdmin(); } + @AfterClass + public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + /** * Tests that we can trigger based on blocks scanned, and also that we properly pass the block * bytes scanned value through to the client. */ @Test - public void testLogLargeBlockBytesScanned() throws IOException, InterruptedException { + public void testLogLargeBlockBytesScanned() throws IOException { + // Turn off slow log buffer for initial loadTable, because we were seeing core dump + // issues coming from that slow log entry. We will re-enable below. + HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); + regionServer.getConfiguration().setBoolean(HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, false); + regionServer.updateConfiguration(); + byte[] family = Bytes.toBytes("0"); Table table = TEST_UTIL.createTable(TableName.valueOf("testLogLargeBlockBytesScanned"), family); TEST_UTIL.loadTable(table, family); TEST_UTIL.flush(table.getName()); - Set server = - Collections.singleton(TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()); + Set server = Collections.singleton(regionServer.getServerName()); Admin admin = TEST_UTIL.getAdmin(); - admin.clearSlowLogResponses(server); + + // Turn on slow log so we capture large scan below + regionServer.getConfiguration().setBoolean(HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, true); + regionServer.updateConfiguration(); Scan scan = new Scan(); scan.setCaching(1); @@ -90,13 +107,12 @@ public void testLogLargeBlockBytesScanned() throws IOException, InterruptedExcep scanner.next(); } - List entries = - admin.getLogEntries(server, "LARGE_LOG", ServerType.REGION_SERVER, 1, Collections.emptyMap()); + List entries = admin.getLogEntries(server, "LARGE_LOG", ServerType.REGION_SERVER, 100, + Collections.emptyMap()); assertEquals(1, entries.size()); OnlineLogRecord record = (OnlineLogRecord) entries.get(0); - System.out.println(record.toJsonPrettyPrint()); assertTrue("expected " + record.getBlockBytesScanned() + " to be >= 100", record.getBlockBytesScanned() >= 100);