Skip to content

Commit

Permalink
HBASE-22806 Recreating a deleted column family brings back the delete…
Browse files Browse the repository at this point in the history
…d cells (apache#530)

Signed-off-by: stack <[email protected]>
  • Loading branch information
Pankaj authored and Guanghao Zhang committed Sep 9, 2019
1 parent 8a7b4a3 commit 16f6d75
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -120,14 +120,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS
break;
case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
break;
case MODIFY_TABLE_POST_OPERATION:
Expand All @@ -138,6 +130,14 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS
if (env.getAssignmentManager().isTableEnabled(getTableName())) {
addChildProcedure(new ReopenTableRegionsProcedure(getTableName()));
}
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
setNextState(ModifyTableState.MODIFY_TABLE_SYNC_SCHEMA_TO_PEER);
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_SYNC_SCHEMA_TO_PEER);
break;
case MODIFY_TABLE_SYNC_SCHEMA_TO_PEER:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
Expand Down Expand Up @@ -103,6 +104,7 @@
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.TableDescriptorChecker;
Expand Down Expand Up @@ -6911,4 +6913,59 @@ public void testModifyTableWithZeroRegionReplicas() throws Exception {

TEST_UTIL.getAdmin().modifyTable(newDesc);
}

@Test(timeout = 60000)
public void testModifyTableWithMemstoreData() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableAndValidateTableSchemaModification(tableName, true);
}

@Test(timeout = 60000)
public void testDeleteCFWithMemstoreData() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableAndValidateTableSchemaModification(tableName, false);
}

/**
* Create table and validate online schema modification
* @param tableName Table name
* @param modifyTable Modify table if true otherwise delete column family
* @throws IOException in case of failures
*/
private void createTableAndValidateTableSchemaModification(TableName tableName,
boolean modifyTable) throws Exception {
Admin admin = TEST_UTIL.getAdmin();
// Create table with two Cfs
byte[] cf1 = Bytes.toBytes("cf1");
byte[] cf2 = Bytes.toBytes("cf2");
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build();
admin.createTable(tableDesc);

Table t = TEST_UTIL.getConnection().getTable(tableName);
// Insert few records and flush the table
t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val1")));
t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
admin.flush(tableName);
Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), tableName);
List<Path> regionDirs = FSUtils.getRegionDirs(TEST_UTIL.getTestFileSystem(), tableDir);
assertTrue(regionDirs.size() == 1);
List<Path> familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
assertTrue(familyDirs.size() == 2);

// Insert record but dont flush the table
t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val2")));
t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));

if (modifyTable) {
tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).removeColumnFamily(cf2).build();
admin.modifyTable(tableDesc);
} else {
admin.deleteColumnFamily(tableName, cf2);
}
// After table modification or delete family there should be only one CF in FS
familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
assertTrue("CF dir count should be 1, but was " + familyDirs.size(), familyDirs.size() == 1);
}
}

0 comments on commit 16f6d75

Please sign in to comment.