diff --git a/dbms/src/TiDB/Schema/SchemaBuilder.cpp b/dbms/src/TiDB/Schema/SchemaBuilder.cpp index e5fccd1ebfe..42c015d9e52 100644 --- a/dbms/src/TiDB/Schema/SchemaBuilder.cpp +++ b/dbms/src/TiDB/Schema/SchemaBuilder.cpp @@ -391,7 +391,6 @@ void SchemaBuilder::applySetTiFlashReplica(DatabaseID databa auto & tmt_context = context.getTMTContext(); if (table_info->replica_info.count == 0) { - // Replicat number is to 0, mark the table as tombstone in TiFlash auto storage = tmt_context.getStorages().get(keyspace_id, table_info->id); if (unlikely(storage == nullptr)) { @@ -402,7 +401,14 @@ void SchemaBuilder::applySetTiFlashReplica(DatabaseID databa return; } - applyDropTable(database_id, table_id, "SetTiFlashReplica-0"); + // We can not mark the table is safe to be physically drop from the tiflash instances when + // the number of tiflash replica is set to be 0. + // There could be a concurrent issue that cause data loss. Check the following link for details: + // https://github.com/pingcap/tiflash/issues/9438#issuecomment-2360370761 + // applyDropTable(database_id, table_id, "SetTiFlashReplica-0"); + + // Now only update the replica number to be 0 instead + updateTiFlashReplicaNumOnStorage(database_id, table_id, storage, table_info); return; } diff --git a/tests/fullstack-test2/ddl/alter_table_tiflash_replica.test b/tests/fullstack-test2/ddl/alter_table_tiflash_replica.test index e7adfe248e2..1952b7bf954 100644 --- a/tests/fullstack-test2/ddl/alter_table_tiflash_replica.test +++ b/tests/fullstack-test2/ddl/alter_table_tiflash_replica.test @@ -82,13 +82,11 @@ mysql> set session tidb_isolation_read_engines='tiflash';select * from test.t; | x | +----+ | 1 | -+----+ | 8 | -+----+ | 50 | +----+ >> DBGInvoke get_partition_tables_tiflash_replica_count("test", "t") ┌─get_partition_tables_tiflash_replica_count(test, t)─┐ │ 1/1/1/ │ -└─────────────────────────────────────────────────────┘ \ No newline at end of file +└─────────────────────────────────────────────────────┘