From 34b5d64ae0d78420841d1297fe4bbf28ee0d1baf Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 28 Apr 2023 09:58:05 +0800 Subject: [PATCH] HBASE-27818 Split TestReplicationDroppedTables (#5206) Signed-off-by: Wellington Chevreuil (cherry picked from commit 1ced254b8d1208f543f936d477ff751b06e2ab73) --- ... => ReplicationDroppedTablesTestBase.java} | 132 ++---------------- .../TestEditsBehindDroppedTableTiming.java | 121 ++++++++++++++++ .../TestEditsDroppedWithDroppedTable.java | 41 ++++++ .../TestEditsDroppedWithDroppedTableNS.java | 49 +++++++ .../TestEditsStuckBehindDroppedTable.java | 41 ++++++ 5 files changed, 264 insertions(+), 120 deletions(-) rename hbase-server/src/test/java/org/apache/hadoop/hbase/replication/{TestReplicationDroppedTables.java => ReplicationDroppedTablesTestBase.java} (61%) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java similarity index 61% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java index 7df9a8e6bf7a..98053ff24489 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationDroppedTablesTestBase.java @@ -21,9 +21,7 @@ import static org.junit.Assert.fail; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -37,29 +35,23 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.junit.Assert; import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ ReplicationTests.class, LargeTests.class }) -public class TestReplicationDroppedTables extends TestReplicationBase { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationDroppedTables.class); +/** + * Base class for testing replication for dropped tables. + */ +public class ReplicationDroppedTablesTestBase extends TestReplicationBase { - private static final Logger LOG = LoggerFactory.getLogger(TestReplicationDroppedTables.class); - private static final int ROWS_COUNT = 1000; + private static final Logger LOG = LoggerFactory.getLogger(ReplicationDroppedTablesTestBase.class); + protected static final int ROWS_COUNT = 1000; @Before + @Override public void setUpBase() throws Exception { // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue @@ -103,38 +95,12 @@ public void setUpBase() throws Exception { CONF1.setInt(RpcServer.MAX_REQUEST_SIZE, 10 * 1024); } - @Test - public void testEditsStuckBehindDroppedTable() throws Exception { - // Sanity check Make sure by default edits for dropped tables stall the replication queue, even - // when the table(s) in question have been deleted on both ends. - testEditsBehindDroppedTable(false, "test_dropped"); - } - - @Test - public void testEditsDroppedWithDroppedTable() throws Exception { - // Make sure by default edits for dropped tables are themselves dropped when the - // table(s) in question have been deleted on both ends. - testEditsBehindDroppedTable(true, "test_dropped"); - } - - @Test - public void testEditsDroppedWithDroppedTableNS() throws Exception { - // also try with a namespace - UTIL1.getAdmin().createNamespace(NamespaceDescriptor.create("NS").build()); - UTIL2.getAdmin().createNamespace(NamespaceDescriptor.create("NS").build()); - try { - testEditsBehindDroppedTable(true, "NS:test_dropped"); - } finally { - UTIL1.getAdmin().deleteNamespace("NS"); - UTIL2.getAdmin().deleteNamespace("NS"); - } - } - - private byte[] generateRowKey(int id) { + protected final byte[] generateRowKey(int id) { return Bytes.toBytes(String.format("NormalPut%03d", id)); } - private void testEditsBehindDroppedTable(boolean allowProceeding, String tName) throws Exception { + protected final void testEditsBehindDroppedTable(boolean allowProceeding, String tName) + throws Exception { CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, allowProceeding); CONF1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1); @@ -205,80 +171,6 @@ private void testEditsBehindDroppedTable(boolean allowProceeding, String tName) CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); } - @Test - public void testEditsBehindDroppedTableTiming() throws Exception { - CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, true); - CONF1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1); - - // make sure we have a single region server only, so that all - // edits for all tables go there - restartSourceCluster(1); - - TableName tablename = TableName.valueOf("testdroppedtimed"); - byte[] familyName = Bytes.toBytes("fam"); - byte[] row = Bytes.toBytes("row"); - - TableDescriptor table = - TableDescriptorBuilder.newBuilder(tablename).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(familyName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); - - Connection connection1 = ConnectionFactory.createConnection(CONF1); - Connection connection2 = ConnectionFactory.createConnection(CONF2); - try (Admin admin1 = connection1.getAdmin()) { - admin1.createTable(table); - } - try (Admin admin2 = connection2.getAdmin()) { - admin2.createTable(table); - } - UTIL1.waitUntilAllRegionsAssigned(tablename); - UTIL2.waitUntilAllRegionsAssigned(tablename); - - // now suspend replication - try (Admin admin1 = connection1.getAdmin()) { - admin1.disableReplicationPeer(PEER_ID2); - } - - // put some data (lead with 0 so the edit gets sorted before the other table's edits - // in the replication batch) write a bunch of edits, making sure we fill a batch - try (Table droppedTable = connection1.getTable(tablename)) { - byte[] rowKey = Bytes.toBytes(0 + " put on table to be dropped"); - Put put = new Put(rowKey); - put.addColumn(familyName, row, row); - droppedTable.put(put); - } - - try (Table table1 = connection1.getTable(tableName)) { - for (int i = 0; i < ROWS_COUNT; i++) { - Put put = new Put(generateRowKey(i)).addColumn(famName, row, row); - table1.put(put); - } - } - - try (Admin admin2 = connection2.getAdmin()) { - admin2.disableTable(tablename); - admin2.deleteTable(tablename); - } - - // edit should still be stuck - try (Admin admin1 = connection1.getAdmin()) { - // enable the replication peer. - admin1.enableReplicationPeer(PEER_ID2); - // the source table still exists, replication should be stalled - verifyReplicationStuck(); - - admin1.disableTable(tablename); - // still stuck, source table still exists - verifyReplicationStuck(); - - admin1.deleteTable(tablename); - // now the source table is gone, replication should proceed, the - // offending edits be dropped - verifyReplicationProceeded(); - } - // just to be safe - CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); - } - private boolean peerHasAllNormalRows() throws IOException { try (ResultScanner scanner = htable2.getScanner(new Scan())) { Result[] results = scanner.next(ROWS_COUNT); @@ -292,7 +184,7 @@ private boolean peerHasAllNormalRows() throws IOException { } } - private void verifyReplicationProceeded() throws Exception { + protected final void verifyReplicationProceeded() throws Exception { for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); @@ -306,7 +198,7 @@ private void verifyReplicationProceeded() throws Exception { } } - private void verifyReplicationStuck() throws Exception { + protected final void verifyReplicationStuck() throws Exception { for (int i = 0; i < NB_RETRIES; i++) { if (peerHasAllNormalRows()) { fail("Edit should have been stuck behind dropped tables"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java new file mode 100644 index 000000000000..3ab2f1d60800 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsBehindDroppedTableTiming.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestEditsBehindDroppedTableTiming extends ReplicationDroppedTablesTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestEditsBehindDroppedTableTiming.class); + + @Override + public void setUpBase() throws Exception { + CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, true); + CONF1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1); + super.setUpBase(); + // make sure we have a single region server only, so that all + // edits for all tables go there + restartSourceCluster(1); + } + + @Test + public void testEditsBehindDroppedTableTiming() throws Exception { + TableName tablename = TableName.valueOf("testdroppedtimed"); + byte[] familyName = Bytes.toBytes("fam"); + byte[] row = Bytes.toBytes("row"); + + TableDescriptor table = + TableDescriptorBuilder.newBuilder(tablename).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(familyName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); + + Connection connection1 = ConnectionFactory.createConnection(CONF1); + Connection connection2 = ConnectionFactory.createConnection(CONF2); + try (Admin admin1 = connection1.getAdmin()) { + admin1.createTable(table); + } + try (Admin admin2 = connection2.getAdmin()) { + admin2.createTable(table); + } + UTIL1.waitUntilAllRegionsAssigned(tablename); + UTIL2.waitUntilAllRegionsAssigned(tablename); + + // now suspend replication + try (Admin admin1 = connection1.getAdmin()) { + admin1.disableReplicationPeer(PEER_ID2); + } + + // put some data (lead with 0 so the edit gets sorted before the other table's edits + // in the replication batch) write a bunch of edits, making sure we fill a batch + try (Table droppedTable = connection1.getTable(tablename)) { + byte[] rowKey = Bytes.toBytes(0 + " put on table to be dropped"); + Put put = new Put(rowKey); + put.addColumn(familyName, row, row); + droppedTable.put(put); + } + + try (Table table1 = connection1.getTable(tableName)) { + for (int i = 0; i < ROWS_COUNT; i++) { + Put put = new Put(generateRowKey(i)).addColumn(famName, row, row); + table1.put(put); + } + } + + try (Admin admin2 = connection2.getAdmin()) { + admin2.disableTable(tablename); + admin2.deleteTable(tablename); + } + + // edit should still be stuck + try (Admin admin1 = connection1.getAdmin()) { + // enable the replication peer. + admin1.enableReplicationPeer(PEER_ID2); + // the source table still exists, replication should be stalled + verifyReplicationStuck(); + + admin1.disableTable(tablename); + // still stuck, source table still exists + verifyReplicationStuck(); + + admin1.deleteTable(tablename); + // now the source table is gone, replication should proceed, the + // offending edits be dropped + verifyReplicationProceeded(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java new file mode 100644 index 000000000000..d7702b291195 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTable.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestEditsDroppedWithDroppedTable extends ReplicationDroppedTablesTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestEditsDroppedWithDroppedTable.class); + + @Test + public void testEditsDroppedWithDroppedTable() throws Exception { + // Make sure by default edits for dropped tables are themselves dropped when the + // table(s) in question have been deleted on both ends. + testEditsBehindDroppedTable(true, "test_dropped"); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java new file mode 100644 index 000000000000..b6a463d76842 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsDroppedWithDroppedTableNS.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestEditsDroppedWithDroppedTableNS extends ReplicationDroppedTablesTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestEditsDroppedWithDroppedTableNS.class); + + @Before + @Override + public void setUpBase() throws Exception { + // also try with a namespace + UTIL1.getAdmin().createNamespace(NamespaceDescriptor.create("NS").build()); + UTIL2.getAdmin().createNamespace(NamespaceDescriptor.create("NS").build()); + super.setUpBase(); + } + + @Test + public void testEditsDroppedWithDroppedTableNS() throws Exception { + testEditsBehindDroppedTable(true, "NS:test_dropped"); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java new file mode 100644 index 000000000000..543a130af5b2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestEditsStuckBehindDroppedTable.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestEditsStuckBehindDroppedTable extends ReplicationDroppedTablesTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestEditsStuckBehindDroppedTable.class); + + @Test + public void testEditsStuckBehindDroppedTable() throws Exception { + // Sanity check Make sure by default edits for dropped tables stall the replication queue, even + // when the table(s) in question have been deleted on both ends. + testEditsBehindDroppedTable(false, "test_dropped"); + } + +}