diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index b50f4baec6ba..c7923164f3f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -104,23 +104,18 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s // Call coprocessors preDelete(env); - setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META); - break; - case DELETE_TABLE_REMOVE_FROM_META: - LOG.debug("Deleting regions from META for {}", this); - DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); setNextState(DeleteTableState.DELETE_TABLE_CLEAR_FS_LAYOUT); break; case DELETE_TABLE_CLEAR_FS_LAYOUT: LOG.debug("Deleting regions from filesystem for {}", this); DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); - setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE); - regions = null; + setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META); break; - case DELETE_TABLE_UPDATE_DESC_CACHE: - LOG.debug("Deleting descriptor for {}", this); - DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName()); + case DELETE_TABLE_REMOVE_FROM_META: + LOG.debug("Deleting regions from META for {}", this); + DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); setNextState(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS); + regions = null; break; case DELETE_TABLE_UNASSIGN_REGIONS: LOG.debug("Deleting assignment state for {}", this); @@ -393,6 +388,8 @@ protected static void deleteFromMeta(final MasterProcedureEnv env, final TableNa if (fnm != null) { fnm.deleteFavoredNodesForRegions(regions); } + + deleteTableDescriptorCache(env, tableName); } protected static void deleteAssignmentState(final MasterProcedureEnv env, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index 52da607ef835..7381e123d1f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -98,13 +98,10 @@ protected Flow executeFromState(final MasterProcedureEnv env, TruncateTableState // Call coprocessors preTruncate(env); - setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META); - break; - case TRUNCATE_TABLE_REMOVE_FROM_META: - tableDescriptor = env.getMasterServices().getTableDescriptors() - .get(tableName); - DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); - DeleteTableProcedure.deleteAssignmentState(env, getTableName()); + //We need to cache table descriptor in the initial stage, so that it's saved within + //the procedure stage and can get recovered if the procedure crashes between + //TRUNCATE_TABLE_REMOVE_FROM_META and TRUNCATE_TABLE_CREATE_FS_LAYOUT + tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT); break; case TRUNCATE_TABLE_CLEAR_FS_LAYOUT: @@ -121,6 +118,13 @@ protected Flow executeFromState(final MasterProcedureEnv env, TruncateTableState } else { regions = recreateRegionInfo(regions); } + setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META); + break; + case TRUNCATE_TABLE_REMOVE_FROM_META: + List originalRegions = env.getAssignmentManager() + .getRegionStates().getRegionsOfTable(getTableName()); + DeleteTableProcedure.deleteFromMeta(env, getTableName(), originalRegions); + DeleteTableProcedure.deleteAssignmentState(env, getTableName()); setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT); break; case TRUNCATE_TABLE_CREATE_FS_LAYOUT: diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java index 88159662bf15..fa83a9328554 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java @@ -19,9 +19,12 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.FileSystem; @@ -133,6 +136,21 @@ public void testSimpleDeleteWithSplits() throws Exception { testSimpleDelete(tableName, splitKeys); } + @Test + public void testDeleteFromMeta() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, null, "f1", "f2"); + List regionsList = new ArrayList<>(); + UTIL.getAdmin().disableTable(tableName); + MasterProcedureEnv procedureEnv = getMasterProcedureExecutor().getEnvironment(); + assertNotNull("Table should be on TableDescriptors cache.", + procedureEnv.getMasterServices().getTableDescriptors().get(tableName)); + DeleteTableProcedure.deleteFromMeta(procedureEnv, tableName, regionsList); + assertNull("Table shouldn't be on TableDescriptors anymore.", + procedureEnv.getMasterServices().getTableDescriptors().get(tableName)); + } + private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) throws Exception { RegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");