diff --git a/dbms/src/Common/FailPoint.cpp b/dbms/src/Common/FailPoint.cpp index 95db5d4c414..43e3599a534 100644 --- a/dbms/src/Common/FailPoint.cpp +++ b/dbms/src/Common/FailPoint.cpp @@ -76,6 +76,7 @@ namespace DB M(skip_check_segment_update) \ M(force_set_page_file_write_errno) \ M(force_split_io_size_4k) \ + M(force_set_num_regions_for_table) \ M(minimum_block_size_for_cross_join) \ M(random_exception_after_dt_write_done) \ M(random_slow_page_storage_write) \ diff --git a/dbms/src/Debug/MockRaftStoreProxy.cpp b/dbms/src/Debug/MockRaftStoreProxy.cpp index 21de8fcffca..c56119b643b 100644 --- a/dbms/src/Debug/MockRaftStoreProxy.cpp +++ b/dbms/src/Debug/MockRaftStoreProxy.cpp @@ -471,8 +471,13 @@ void MockRaftStoreProxy::unsafeInvokeForTest(std::function> maybe_range) +======= + RegionID region_id, + std::optional> maybe_range) NO_THREAD_SAFETY_ANALYSIS +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)):dbms/src/Debug/MockKVStore/MockRaftStoreProxy.cpp { { auto _ = genLockGuard(); diff --git a/dbms/src/Debug/MockRaftStoreProxy.h b/dbms/src/Debug/MockRaftStoreProxy.h index 0ac1efd7179..de72995074a 100644 --- a/dbms/src/Debug/MockRaftStoreProxy.h +++ b/dbms/src/Debug/MockRaftStoreProxy.h @@ -171,7 +171,7 @@ struct MockRaftStoreProxy : MutexLockWrap void bootstrapWithRegion( KVStore & kvs, TMTContext & tmt, - UInt64 region_id, + RegionID region_id, std::optional> maybe_range); /// Boostrap a table. diff --git a/dbms/src/Debug/MockTiDB.cpp b/dbms/src/Debug/MockTiDB.cpp index 8ee67aedf2c..155cd836e6e 100644 --- a/dbms/src/Debug/MockTiDB.cpp +++ b/dbms/src/Debug/MockTiDB.cpp @@ -68,7 +68,7 @@ MockTiDB::MockTiDB() databases["default"] = 0; } -TablePtr MockTiDB::dropTableInternal( +TablePtr MockTiDB::dropTableByNameImpl( Context & context, const String & database_name, const String & table_name, @@ -79,10 +79,41 @@ TablePtr MockTiDB::dropTableInternal( if (it_by_name == tables_by_name.end()) return nullptr; + auto table = it_by_name->second; + dropTableInternal(context, table, drop_regions); + + tables_by_name.erase(it_by_name); + return table; +} + +TablePtr MockTiDB::dropTableByIdImpl(Context & context, const TableID table_id, bool drop_regions) +{ + auto iter = tables_by_id.find(table_id); + if (iter == tables_by_id.end()) + return nullptr; + + auto table = iter->second; + dropTableInternal(context, table, drop_regions); + + // erase from `tables_by_name` + for (auto iter_by_name = tables_by_name.begin(); iter_by_name != tables_by_name.end(); /* empty */) + { + if (table != iter_by_name->second) + { + ++iter_by_name; + continue; + } + LOG_INFO(Logger::get(), "removing table from MockTiDB, name={} table_id={}", iter_by_name->first, table_id); + iter_by_name = tables_by_name.erase(iter_by_name); + } + return table; +} + +TablePtr MockTiDB::dropTableInternal(Context & context, const TablePtr & table, bool drop_regions) +{ auto & kvstore = context.getTMTContext().getKVStore(); auto & region_table = context.getTMTContext().getRegionTable(); - auto table = it_by_name->second; if (table->isPartitionTable()) { for (const auto & partition : table->table_info.partition.definitions) @@ -98,8 +129,6 @@ TablePtr MockTiDB::dropTableInternal( } tables_by_id.erase(table->id()); - tables_by_name.erase(it_by_name); - if (drop_regions) { for (auto & e : region_table.getRegionsByTable(NullspaceID, table->id())) @@ -121,7 +150,7 @@ void MockTiDB::dropDB(Context & context, const String & database_name, bool drop }); for (const auto & table_name : table_names) - dropTableInternal(context, database_name, table_name, drop_regions); + dropTableByNameImpl(context, database_name, table_name, drop_regions); version++; @@ -141,7 +170,25 @@ void MockTiDB::dropTable(Context & context, const String & database_name, const { std::lock_guard lock(tables_mutex); - auto table = dropTableInternal(context, database_name, table_name, drop_regions); + auto table = dropTableByNameImpl(context, database_name, table_name, drop_regions); + if (!table) + return; + + version++; + + SchemaDiff diff; + diff.type = SchemaActionType::DropTable; + diff.schema_id = table->database_id; + diff.table_id = table->id(); + diff.version = version; + version_diff[version] = diff; +} + +void MockTiDB::dropTableById(Context & context, const TableID & table_id, bool drop_regions) +{ + std::lock_guard lock(tables_mutex); + + auto table = dropTableByIdImpl(context, table_id, drop_regions); if (!table) return; diff --git a/dbms/src/Debug/MockTiDB.h b/dbms/src/Debug/MockTiDB.h index ca5c95ff5d4..8282a0b78e4 100644 --- a/dbms/src/Debug/MockTiDB.h +++ b/dbms/src/Debug/MockTiDB.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include #include @@ -113,6 +114,7 @@ class MockTiDB : public ext::Singleton void dropPartition(const String & database_name, const String & table_name, TableID partition_id); void dropTable(Context & context, const String & database_name, const String & table_name, bool drop_regions); + void dropTableById(Context & context, const TableID & table_id, bool drop_regions); void dropDB(Context & context, const String & database_name, bool drop_regions); @@ -169,11 +171,13 @@ class MockTiDB : public ext::Singleton const String & partition_name, Timestamp tso, bool is_add_part); - TablePtr dropTableInternal( + TablePtr dropTableByNameImpl( Context & context, const String & database_name, const String & table_name, bool drop_regions); + TablePtr dropTableByIdImpl(Context & context, TableID table_id, bool drop_regions); + TablePtr dropTableInternal(Context & context, const TablePtr & table, bool drop_regions); TablePtr getTableByNameInternal(const String & database_name, const String & table_name); TablePtr getTableByID(TableID table_id); diff --git a/dbms/src/Debug/dbgFuncSchema.cpp b/dbms/src/Debug/dbgFuncSchema.cpp index a55d3bf7d55..7e91582893f 100644 --- a/dbms/src/Debug/dbgFuncSchema.cpp +++ b/dbms/src/Debug/dbgFuncSchema.cpp @@ -176,16 +176,20 @@ void dbgFuncRefreshMappedTableSchema(Context & context, const ASTs & args, DBGIn // Trigger gc on all databases / tables. // Usage: -// ./storage-client.sh "DBGInvoke gc_schemas([gc_safe_point])" +// ./storage-client.sh "DBGInvoke gc_schemas([gc_safe_point, ignore_remain_regions])" void dbgFuncGcSchemas(Context & context, const ASTs & args, DBGInvoker::Printer output) { auto & service = context.getSchemaSyncService(); Timestamp gc_safe_point = 0; + bool ignore_remain_regions = false; if (args.empty()) gc_safe_point = PDClientHelper::getGCSafePointWithRetry(context.getTMTContext().getPDClient(), NullspaceID); - else + if (!args.empty()) gc_safe_point = safeGet(typeid_cast(*args[0]).value); - service->gc(gc_safe_point, NullspaceID); + if (args.size() >= 2) + ignore_remain_regions = safeGet(typeid_cast(*args[1]).value) == "true"; + // Note that only call it in tests, we need to ignore remain regions + service->gcImpl(gc_safe_point, NullspaceID, ignore_remain_regions); output("schemas gc done"); } diff --git a/dbms/src/Debug/dbgFuncSchema.h b/dbms/src/Debug/dbgFuncSchema.h index 9409f9b4513..89e44e26fe4 100644 --- a/dbms/src/Debug/dbgFuncSchema.h +++ b/dbms/src/Debug/dbgFuncSchema.h @@ -44,7 +44,7 @@ void dbgFuncRefreshMappedTableSchema(Context & context, const ASTs & args, DBGIn // Trigger gc on all databases / tables. // Usage: -// ./storage-client.sh "DBGInvoke gc_schemas([gc_safe_point])" +// ./storage-client.sh "DBGInvoke gc_schemas([gc_safe_point, ignore_remain_regions])" void dbgFuncGcSchemas(Context & context, const ASTs & args, DBGInvoker::Printer output); // Reset schemas. diff --git a/dbms/src/Storages/KVStore/Decode/PartitionStreams.cpp b/dbms/src/Storages/KVStore/Decode/PartitionStreams.cpp index 7f99a71d76a..f415cb7a69b 100644 --- a/dbms/src/Storages/KVStore/Decode/PartitionStreams.cpp +++ b/dbms/src/Storages/KVStore/Decode/PartitionStreams.cpp @@ -33,6 +33,7 @@ #include #include #include +#include namespace DB { @@ -71,6 +72,10 @@ static DM::WriteResult writeRegionDataToStorage( auto storage = tmt.getStorages().get(keyspace_id, table_id); if (storage == nullptr) { + // - force_decode == false and storage not exist, let upper level sync schema and retry. + // - force_decode == true and storage not exist. It could be the RaftLog or Snapshot comes + // after the schema is totally exceed the GC safepoint. And TiFlash know nothing about + // the schema. We can only throw away those committed rows. return force_decode; } @@ -212,7 +217,6 @@ static DM::WriteResult writeRegionDataToStorage( if (!atomic_read_write(true)) { // Failure won't be tolerated this time. - // TODO: Enrich exception message. throw Exception( ErrorCodes::LOGICAL_ERROR, "Write region failed! region_id={} keyspace={} table_id={}", diff --git a/dbms/src/Storages/KVStore/Decode/RegionTable.cpp b/dbms/src/Storages/KVStore/Decode/RegionTable.cpp index 31e1d71e6dc..c6669e4cc09 100644 --- a/dbms/src/Storages/KVStore/Decode/RegionTable.cpp +++ b/dbms/src/Storages/KVStore/Decode/RegionTable.cpp @@ -24,9 +24,13 @@ #include #include #include +#include #include #include #include +#include + +#include namespace DB { @@ -37,6 +41,10 @@ extern const int UNKNOWN_TABLE; extern const int ILLFORMAT_RAFT_ROW; extern const int TABLE_IS_DROPPED; } // namespace ErrorCodes +namespace FailPoints +{ +extern const char force_set_num_regions_for_table[]; +} // namespace FailPoints RegionTable::Table & RegionTable::getOrCreateTable(const KeyspaceID keyspace_id, const TableID table_id) { @@ -218,8 +226,8 @@ void RegionTable::removeRegion(const RegionID region_id, bool remove_data, const { tables.erase(ks_table_id); } - LOG_INFO(log, "remove region in RegionTable done, region_id={}", region_id); } + LOG_INFO(log, "remove region in RegionTable done, region_id={}", region_id); // Sometime we don't need to remove data. e.g. remove region after region merge. if (remove_data) @@ -318,6 +326,31 @@ void RegionTable::handleInternalRegionsByTable( } } +std::vector RegionTable::getRegionIdsByTable(KeyspaceID keyspace_id, TableID table_id) const +{ + fiu_do_on(FailPoints::force_set_num_regions_for_table, { + if (auto v = FailPointHelper::getFailPointVal(FailPoints::force_set_num_regions_for_table); v) + { + auto num_regions = std::any_cast>(v.value()); + return num_regions; + } + }); + + std::lock_guard lock(mutex); + if (auto iter = tables.find(KeyspaceTableID{keyspace_id, table_id}); // + unlikely(iter != tables.end())) + { + std::vector ret_regions; + ret_regions.reserve(iter->second.regions.size()); + for (const auto & r : iter->second.regions) + { + ret_regions.emplace_back(r.first); + } + return ret_regions; + } + return {}; +} + std::vector> RegionTable::getRegionsByTable( const KeyspaceID keyspace_id, const TableID table_id) const diff --git a/dbms/src/Storages/KVStore/Decode/RegionTable.h b/dbms/src/Storages/KVStore/Decode/RegionTable.h index a3e476b3230..e46a03a09af 100644 --- a/dbms/src/Storages/KVStore/Decode/RegionTable.h +++ b/dbms/src/Storages/KVStore/Decode/RegionTable.h @@ -101,9 +101,6 @@ class RegionTable : private boost::noncopyable InternalRegions regions; }; - using TableMap = std::unordered_map>; - using RegionInfoMap = std::unordered_map; - explicit RegionTable(Context & context_); void restore(); @@ -126,6 +123,8 @@ class RegionTable : private boost::noncopyable KeyspaceID keyspace_id, TableID table_id, std::function && callback) const; + + std::vector getRegionIdsByTable(KeyspaceID keyspace_id, TableID table_id) const; std::vector> getRegionsByTable(KeyspaceID keyspace_id, TableID table_id) const; /// Write the data of the given region into the table with the given table ID, fill the data list for outer to remove. @@ -188,7 +187,10 @@ class RegionTable : private boost::noncopyable InternalRegion & doGetInternalRegion(KeyspaceTableID ks_table_id, RegionID region_id); private: + using TableMap = std::unordered_map>; TableMap tables; + + using RegionInfoMap = std::unordered_map; RegionInfoMap regions; SafeTsMap safe_ts_map; diff --git a/dbms/src/Storages/KVStore/tests/gtest_kvstore.cpp b/dbms/src/Storages/KVStore/tests/gtest_kvstore.cpp index c293f7ca0d7..04898ae2200 100644 --- a/dbms/src/Storages/KVStore/tests/gtest_kvstore.cpp +++ b/dbms/src/Storages/KVStore/tests/gtest_kvstore.cpp @@ -13,13 +13,35 @@ // limitations under the License. #include +<<<<<<< HEAD #include namespace DB +======= +#include +#include + + +namespace DB::tests +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) { namespace tests { +<<<<<<< HEAD TEST_F(RegionKVStoreTest, PersistenceV1) +======= +public: + void testRaftMerge(Context & ctx, KVStore & kvs, TMTContext & tmt); + static void testRaftMergeRollback(KVStore & kvs, TMTContext & tmt); + RegionKVStoreOldTest() + { + log = DB::Logger::get("RegionKVStoreOldTest"); + test_path = TiFlashTestEnv::getTemporaryPath("/region_kvs_old_test"); + } +}; + +TEST_F(RegionKVStoreOldTest, PersistenceV1) +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) try { auto ctx = TiFlashTestEnv::getGlobalContext(); @@ -255,7 +277,6 @@ void RegionKVStoreTest::testRaftMergeRollback(KVStore & kvs, TMTContext & tmt) auto source_region = kvs.getRegion(region_id); auto target_region = kvs.getRegion(1); - auto && [request, response] = MockRaftStoreProxy::composePrepareMerge(target_region->cloneMetaRegion(), source_region->appliedIndex()); kvs.handleAdminRaftCmd(std::move(request), std::move(response), region_id, 31, 6, tmt); @@ -302,9 +323,10 @@ void RegionKVStoreTest::testRaftMergeRollback(KVStore & kvs, TMTContext & tmt) static void testRaftSplit(KVStore & kvs, TMTContext & tmt, std::unique_ptr & proxy_instance) { + const TableID table_id = 1; { auto region = kvs.getRegion(1); - auto table_id = 1; + // row with handle_id == 3 region->insert( "lock", RecordKVFormat::genKey(table_id, 3), @@ -314,7 +336,7 @@ static void testRaftSplit(KVStore & kvs, TMTContext & tmt, std::unique_ptrinsert( "lock", RecordKVFormat::genKey(table_id, 8), @@ -327,16 +349,19 @@ static void testRaftSplit(KVStore & kvs, TMTContext & tmt, std::unique_ptrdataInfo(), "[write 2 lock 2 default 2 ]"); } + // Split region RegionID region_id = 1; RegionID region_id2 = 7; auto source_region = kvs.getRegion(region_id); auto old_epoch = source_region->mutMeta().getMetaRegion().region_epoch(); const auto & ori_source_range = source_region->getRange()->comparableKeys(); - RegionRangeKeys::RegionRange new_source_range - = RegionRangeKeys::makeComparableKeys(RecordKVFormat::genKey(1, 5), RecordKVFormat::genKey(1, 10)); - RegionRangeKeys::RegionRange new_target_range - = RegionRangeKeys::makeComparableKeys(RecordKVFormat::genKey(1, 0), RecordKVFormat::genKey(1, 5)); + RegionRangeKeys::RegionRange new_source_range = RegionRangeKeys::makeComparableKeys( // + RecordKVFormat::genKey(table_id, 5), + RecordKVFormat::genKey(table_id, 10)); + RegionRangeKeys::RegionRange new_target_range = RegionRangeKeys::makeComparableKeys( // + RecordKVFormat::genKey(table_id, 0), + RecordKVFormat::genKey(table_id, 5)); auto && [request, response] = MockRaftStoreProxy::composeBatchSplit( {region_id, region_id2}, regionRangeToEncodeKeys(new_source_range, new_target_range), @@ -344,18 +369,18 @@ static void testRaftSplit(KVStore & kvs, TMTContext & tmt, std::unique_ptrdataInfo(), "[write 1 lock 1 default 1 ]"); - ASSERT_EQ(kvs.getRegion(7)->dataInfo(), "[lock 1 ]"); + ASSERT_EQ(kvs.getRegion(region_id)->dataInfo(), "[write 1 lock 1 default 1 ]"); + ASSERT_EQ(kvs.getRegion(region_id2)->dataInfo(), "[lock 1 ]"); } // Rollback 1 to before split // 7 is persisted @@ -411,16 +436,29 @@ static void testRaftSplit(KVStore & kvs, TMTContext & tmt, std::unique_ptr>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) { + const RegionID source_region_id = 7; + const RegionID target_region_id = 1; + const TableID table_id = 1; + proxy_instance->debugAddRegions( + kvs, + ctx.getTMTContext(), + {target_region_id, source_region_id}, + {{RecordKVFormat::genKey(table_id, 0), RecordKVFormat::genKey(table_id, 5)}, + {RecordKVFormat::genKey(table_id, 5), RecordKVFormat::genKey(table_id, 10)}}); + { - auto region_id = 7; - kvs.getRegion(1)->clearAllData(); - kvs.getRegion(region_id)->clearAllData(); + kvs.getRegion(target_region_id)->clearAllData(); + kvs.getRegion(source_region_id)->clearAllData(); { - auto region = kvs.getRegion(1); - auto table_id = 1; + // Region 1 with handle_id == 6 + auto region = kvs.getRegion(target_region_id); region->insert( "lock", RecordKVFormat::genKey(table_id, 6), @@ -433,8 +471,8 @@ void RegionKVStoreTest::testRaftMerge(KVStore & kvs, TMTContext & tmt) ASSERT_EQ(region->dataInfo(), "[write 1 lock 1 default 1 ]"); } { - auto region = kvs.getRegion(region_id); - auto table_id = 1; + // Region 7 with handle_id == 2 + auto region = kvs.getRegion(source_region_id); region->insert( "lock", RecordKVFormat::genKey(table_id, 2), @@ -448,10 +486,10 @@ void RegionKVStoreTest::testRaftMerge(KVStore & kvs, TMTContext & tmt) } } + // Prepare merge for region 7 { - auto region_id = 7; - auto source_region = kvs.getRegion(region_id); - auto target_region = kvs.getRegion(1); + auto source_region = kvs.getRegion(source_region_id); + auto target_region = kvs.getRegion(target_region_id); auto && [request, response] = MockRaftStoreProxy::composePrepareMerge(target_region->cloneMetaRegion(), source_region->appliedIndex()); kvs.handleAdminRaftCmd(std::move(request), std::move(response), source_region->id(), 35, 6, tmt); @@ -459,9 +497,8 @@ void RegionKVStoreTest::testRaftMerge(KVStore & kvs, TMTContext & tmt) } { - auto source_id = 7, target_id = 1; - auto source_region = kvs.getRegion(source_id); - + /// Mock that source region is Applying, reject + auto source_region = kvs.getRegion(source_region_id); auto && [request, response] = MockRaftStoreProxy::composeCommitMerge(source_region->cloneMetaRegion(), source_region->appliedIndex()); source_region->setStateApplying(); @@ -469,24 +506,30 @@ void RegionKVStoreTest::testRaftMerge(KVStore & kvs, TMTContext & tmt) const auto & source_region_meta_delegate = source_region->meta.makeRaftCommandDelegate(); try { - kvs.getRegion(target_id)->meta.makeRaftCommandDelegate().checkBeforeCommitMerge( - request, - source_region_meta_delegate); + kvs.getRegion(target_region_id) + ->meta.makeRaftCommandDelegate() + .checkBeforeCommitMerge(request, source_region_meta_delegate); ASSERT_TRUE(false); } catch (Exception & e) { ASSERT_EQ(e.message(), "checkBeforeCommitMerge: unexpected state Applying of source 1"); } + } + + { + /// Mock that source region is Normal but meta not exist, reject + auto source_region = kvs.getRegion(source_region_id); + auto && [request, response] + = MockRaftStoreProxy::composeCommitMerge(source_region->cloneMetaRegion(), source_region->appliedIndex()); source_region->setPeerState(raft_serverpb::PeerState::Normal); - { - request.mutable_commit_merge()->mutable_source()->mutable_start_key()->clear(); - } + request.mutable_commit_merge()->mutable_source()->mutable_start_key()->clear(); + const auto & source_region_meta_delegate = source_region->meta.makeRaftCommandDelegate(); try { - kvs.getRegion(target_id)->meta.makeRaftCommandDelegate().checkBeforeCommitMerge( - request, - source_region_meta_delegate); + kvs.getRegion(target_region_id) + ->meta.makeRaftCommandDelegate() + .checkBeforeCommitMerge(request, source_region_meta_delegate); ASSERT_TRUE(false); } catch (Exception & e) @@ -496,61 +539,74 @@ void RegionKVStoreTest::testRaftMerge(KVStore & kvs, TMTContext & tmt) } { - auto source_id = 7, target_id = 1; - auto source_region = kvs.getRegion(source_id); + /// Commit merge for merging region 7 -> region 1 + auto source_region = kvs.getRegion(source_region_id); auto && [request, response] = MockRaftStoreProxy::composeCommitMerge(source_region->cloneMetaRegion(), source_region->appliedIndex()); + + // before commit merge { - auto mmp = kvs.getRegionsByRangeOverlap(RegionRangeKeys::makeComparableKeys(TiKVKey(""), TiKVKey(""))); - ASSERT_TRUE(mmp.count(target_id) != 0); - ASSERT_EQ(mmp.size(), 2); + auto region_map + = kvs.getRegionsByRangeOverlap(RegionRangeKeys::makeComparableKeys(TiKVKey(""), TiKVKey(""))); + ASSERT_TRUE(region_map.contains(target_region_id)); + ASSERT_EQ(region_map.size(), 2); } kvs.handleAdminRaftCmd( raft_cmdpb::AdminRequest(request), raft_cmdpb::AdminResponse(response), - target_id, + target_region_id, 36, 6, tmt); - ASSERT_EQ(kvs.getRegion(source_id), nullptr); + // checks after commit merge + ASSERT_EQ(kvs.getRegion(source_region_id), nullptr); { - auto mmp = kvs.getRegionsByRangeOverlap( - RegionRangeKeys::makeComparableKeys(RecordKVFormat::genKey(1, 0), RecordKVFormat::genKey(1, 5))); - ASSERT_TRUE(mmp.count(1) != 0); + auto mmp = kvs.getRegionsByRangeOverlap(RegionRangeKeys::makeComparableKeys( + RecordKVFormat::genKey(table_id, 0), + RecordKVFormat::genKey(table_id, 5))); + ASSERT_TRUE(mmp.contains(target_region_id)); ASSERT_EQ(mmp.size(), 1); } + ASSERT_EQ(kvs.getRegion(target_region_id)->dataInfo(), "[lock 2 ]"); + + // Add region 7 back and merge again { // add 7 back auto task_lock = kvs.genTaskLock(); auto lock = kvs.genRegionMgrWriteLock(task_lock); - auto region - = makeRegion(7, RecordKVFormat::genKey(1, 0), RecordKVFormat::genKey(1, 5), kvs.getProxyHelper()); - lock.regions.emplace(7, region); + auto region = makeRegion( + source_region_id, + RecordKVFormat::genKey(table_id, 0), + RecordKVFormat::genKey(table_id, 5), + kvs.getProxyHelper()); + lock.regions.emplace(source_region_id, region); lock.index.add(region); } { - auto mmp = kvs.getRegionsByRangeOverlap( - RegionRangeKeys::makeComparableKeys(RecordKVFormat::genKey(1, 0), RecordKVFormat::genKey(1, 5))); - ASSERT_TRUE(mmp.count(7) != 0); - ASSERT_TRUE(mmp.count(1) != 0); + auto mmp = kvs.getRegionsByRangeOverlap(RegionRangeKeys::makeComparableKeys( + RecordKVFormat::genKey(table_id, 0), + RecordKVFormat::genKey(table_id, 5))); + ASSERT_TRUE(mmp.contains(source_region_id)); + ASSERT_TRUE(mmp.contains(target_region_id)); ASSERT_EQ(mmp.size(), 2); } kvs.handleAdminRaftCmd( raft_cmdpb::AdminRequest(request), raft_cmdpb::AdminResponse(response), - target_id, + target_region_id, 36, 6, tmt); { - auto mmp = kvs.getRegionsByRangeOverlap( - RegionRangeKeys::makeComparableKeys(RecordKVFormat::genKey(1, 0), RecordKVFormat::genKey(1, 5))); - ASSERT_TRUE(mmp.count(1) != 0); + auto mmp = kvs.getRegionsByRangeOverlap(RegionRangeKeys::makeComparableKeys( + RecordKVFormat::genKey(table_id, 0), + RecordKVFormat::genKey(table_id, 5))); + ASSERT_TRUE(mmp.contains(target_region_id)); ASSERT_EQ(mmp.size(), 1); } - ASSERT_EQ(kvs.getRegion(1)->dataInfo(), "[lock 2 ]"); + ASSERT_EQ(kvs.getRegion(target_region_id)->dataInfo(), "[lock 2 ]"); } } @@ -891,7 +947,11 @@ TEST_F(RegionKVStoreTest, AdminSplit) } } +<<<<<<< HEAD TEST_F(RegionKVStoreTest, AdminMerge) +======= +TEST_F(RegionKVStoreOldTest, AdminMergeRollback) +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) { createDefaultRegions(); auto ctx = TiFlashTestEnv::getGlobalContext(); @@ -902,13 +962,25 @@ TEST_F(RegionKVStoreTest, AdminMerge) {1, 7}, {{RecordKVFormat::genKey(1, 0), RecordKVFormat::genKey(1, 5)}, {RecordKVFormat::genKey(1, 5), RecordKVFormat::genKey(1, 10)}}); +<<<<<<< HEAD { testRaftMergeRollback(kvs, ctx.getTMTContext()); testRaftMerge(kvs, ctx.getTMTContext()); } +======= + testRaftMergeRollback(kvs, ctx.getTMTContext()); +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) } +TEST_F(RegionKVStoreOldTest, AdminMerge) +try +{ + auto ctx = TiFlashTestEnv::getGlobalContext(); + KVStore & kvs = getKVS(); + testRaftMerge(ctx, kvs, ctx.getTMTContext()); +} +CATCH TEST_F(RegionKVStoreTest, AdminChangePeer) { diff --git a/dbms/src/Storages/KVStore/tests/gtest_new_kvstore.cpp b/dbms/src/Storages/KVStore/tests/gtest_new_kvstore.cpp index 568ebcbef9d..da140387dd5 100644 --- a/dbms/src/Storages/KVStore/tests/gtest_new_kvstore.cpp +++ b/dbms/src/Storages/KVStore/tests/gtest_new_kvstore.cpp @@ -12,15 +12,159 @@ // See the License for the specific language governing permissions and // limitations under the License. +<<<<<<< HEAD #include #include +======= +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) #include "kvstore_helper.h" -namespace DB +namespace DB::tests { -namespace tests +<<<<<<< HEAD +======= + +TEST_F(RegionKVStoreTest, RegionStruct) +try { + auto & ctx = TiFlashTestEnv::getGlobalContext(); + initStorages(); + MockRaftStoreProxy::FailCond cond; + KVStore & kvs = getKVS(); + auto table_id = proxy_instance->bootstrapTable(ctx, kvs, ctx.getTMTContext()); + auto start = RecordKVFormat::genKey(table_id, 0); + auto end = RecordKVFormat::genKey(table_id, 100); + auto str_key = RecordKVFormat::genKey(table_id, 1, 111); + auto [str_val_write, str_val_default] = proxy_instance->generateTiKVKeyValue(111, 999); + auto str_lock_value + = RecordKVFormat::encodeLockCfValue(RecordKVFormat::CFModifyFlag::PutFlag, "PK", 111, 999).toString(); + proxy_instance->bootstrapWithRegion(kvs, ctx.getTMTContext(), 1, std::nullopt); + { + RegionID region_id = 1; + auto kvr1 = kvs.getRegion(region_id); + ASSERT_NE(kvr1, nullptr); + auto [index, term] = proxy_instance->rawWrite( + region_id, + {str_key, str_key}, + {str_lock_value, str_val_default}, + {WriteCmdType::Put, WriteCmdType::Put}, + {ColumnFamilyType::Lock, ColumnFamilyType::Default}); + UNUSED(term); + proxy_instance->doApply(kvs, ctx.getTMTContext(), cond, region_id, index); + ASSERT_EQ(kvr1->getLockByKey(str_key)->dataSize(), str_lock_value.size()); + ASSERT_EQ(kvr1->getLockByKey(RecordKVFormat::genKey(table_id, 1, 112)), nullptr); + } +} +CATCH + + +TEST_F(RegionKVStoreTest, MemoryTracker) +try +{ + auto & ctx = TiFlashTestEnv::getGlobalContext(); + initStorages(); + KVStore & kvs = getKVS(); + auto table_id = proxy_instance->bootstrapTable(ctx, kvs, ctx.getTMTContext()); + auto start = RecordKVFormat::genKey(table_id, 0); + auto end = RecordKVFormat::genKey(table_id, 100); + auto str_key = RecordKVFormat::genKey(table_id, 1, 111); + auto [str_val_write, str_val_default] = proxy_instance->generateTiKVKeyValue(111, 999); + MockRaftStoreProxy::FailCond cond; + proxy_instance->debugAddRegions( + kvs, + ctx.getTMTContext(), + {1, 2}, + {{RecordKVFormat::genKey(table_id, 0), RecordKVFormat::genKey(table_id, 10)}, + {RecordKVFormat::genKey(table_id, 11), RecordKVFormat::genKey(table_id, 20)}}); + + { + auto region_id = 1; + auto kvr1 = kvs.getRegion(region_id); + auto [index, term] + = proxy_instance + ->rawWrite(region_id, {str_key}, {str_val_default}, {WriteCmdType::Put}, {ColumnFamilyType::Default}); + UNUSED(term); + proxy_instance->doApply(kvs, ctx.getTMTContext(), cond, region_id, index); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), str_key.dataSize() + str_val_default.size()); + } + + { + root_of_kvstore_mem_trackers->reset(); + RegionPtr region = tests::makeRegion(700, start, end, proxy_helper.get()); + region->insert("default", TiKVKey::copyFrom(str_key), TiKVValue::copyFrom(str_val_default)); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), str_key.dataSize() + str_val_default.size()); + region->remove("default", TiKVKey::copyFrom(str_key)); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); + } + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); + { + root_of_kvstore_mem_trackers->reset(); + RegionPtr region = tests::makeRegion(701, start, end, proxy_helper.get()); + region->insert("default", TiKVKey::copyFrom(str_key), TiKVValue::copyFrom(str_val_default)); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), str_key.dataSize() + str_val_default.size()); + } + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); + { + root_of_kvstore_mem_trackers->reset(); + RegionPtr region = tests::makeRegion(702, start, end, proxy_helper.get()); + region->insert("default", TiKVKey::copyFrom(str_key), TiKVValue::copyFrom(str_val_default)); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), str_key.dataSize() + str_val_default.size()); + tryPersistRegion(kvs, 1); + reloadKVSFromDisk(); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), str_key.dataSize() + str_val_default.size()); + } + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); + { + root_of_kvstore_mem_trackers->reset(); + RegionPtr region = tests::makeRegion(800, start, end, proxy_helper.get()); + region->insert("default", TiKVKey::copyFrom(str_key), TiKVValue::copyFrom(str_val_default)); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), str_key.dataSize() + str_val_default.size()); + region->insert("write", TiKVKey::copyFrom(str_key), TiKVValue::copyFrom(str_val_write)); + std::optional data_list_read = ReadRegionCommitCache(region, true); + ASSERT_TRUE(data_list_read); + ASSERT_EQ(1, data_list_read->size()); + RemoveRegionCommitCache(region, *data_list_read); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); + } + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); + { + root_of_kvstore_mem_trackers->reset(); + RegionPtr region = tests::makeRegion(900, start, end, proxy_helper.get()); + region->insert("default", TiKVKey::copyFrom(str_key), TiKVValue::copyFrom(str_val_default)); + auto str_key2 = RecordKVFormat::genKey(table_id, 20, 111); + auto [str_val_write2, str_val_default2] = proxy_instance->generateTiKVKeyValue(111, 999); + region->insert("default", TiKVKey::copyFrom(str_key2), TiKVValue::copyFrom(str_val_default2)); + auto expected = str_key.dataSize() + str_val_default.size() + str_key2.dataSize() + str_val_default2.size(); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), expected); + auto new_region = splitRegion( + region, + RegionMeta( + createPeer(901, true), + createRegionInfo(902, RecordKVFormat::genKey(table_id, 50), end), + initialApplyState())); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), expected); + region->mergeDataFrom(*new_region); + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), expected); + } + ASSERT_EQ(root_of_kvstore_mem_trackers->get(), 0); +} +CATCH + +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) TEST_F(RegionKVStoreTest, KVStoreFailRecovery) try { @@ -1133,6 +1277,59 @@ try } CATCH +<<<<<<< HEAD } // namespace tests } // namespace DB +======= +void RegionKVStoreTest::dropTable(Context & ctx, TableID table_id) +{ + MockTiDB::instance().dropTableById(ctx, table_id, /*drop_regions*/ false); + auto & tmt = ctx.getTMTContext(); + auto schema_syncer = tmt.getSchemaSyncerManager(); + schema_syncer->syncSchemas(ctx, NullspaceID); + auto sync_service = std::make_shared(ctx); + sync_service->gcImpl(std::numeric_limits::max(), NullspaceID, /*ignore_remain_regions*/ true); + sync_service->shutdown(); +} + +TEST_F(RegionKVStoreTest, KVStoreApplyWriteToNonExistStorage) +try +{ + auto ctx = TiFlashTestEnv::getGlobalContext(); + proxy_instance->cluster_ver = RaftstoreVer::V2; + RegionID region_id = 2; + initStorages(); + KVStore & kvs = getKVS(); + TableID table_id = proxy_instance->bootstrapTable(ctx, kvs, ctx.getTMTContext()); + auto start = RecordKVFormat::genKey(table_id, 0); + auto end = RecordKVFormat::genKey(table_id, 100); + proxy_instance + ->bootstrapWithRegion(kvs, ctx.getTMTContext(), region_id, std::make_pair(start.toString(), end.toString())); + + { + auto str_key = RecordKVFormat::genKey(table_id, 1, 111); + auto [str_val_write, str_val_default] = proxy_instance->generateTiKVKeyValue(111, 999); + auto str_lock_value + = RecordKVFormat::encodeLockCfValue(RecordKVFormat::CFModifyFlag::PutFlag, "PK", 111, 999).toString(); + auto kvr1 = kvs.getRegion(region_id); + ASSERT_NE(kvr1, nullptr); + auto [index, term] = proxy_instance->rawWrite( + region_id, + {str_key, str_key, str_key}, + {str_lock_value, str_val_default, str_val_write}, + {WriteCmdType::Put, WriteCmdType::Put, WriteCmdType::Put}, + {ColumnFamilyType::Lock, ColumnFamilyType::Default, ColumnFamilyType::Write}); + UNUSED(term); + + dropTable(ctx, table_id); + + // No exception thrown, the rows are just throw away + MockRaftStoreProxy::FailCond cond; + proxy_instance->doApply(kvs, ctx.getTMTContext(), cond, region_id, index); + } +} +CATCH + +} // namespace DB::tests +>>>>>>> 6058d19646 (ddl: Fix the storage instance may be physically dropped when the region is not removed (#8721)) diff --git a/dbms/src/Storages/KVStore/tests/region_kvstore_test.h b/dbms/src/Storages/KVStore/tests/region_kvstore_test.h new file mode 100644 index 00000000000..84a12a2acca --- /dev/null +++ b/dbms/src/Storages/KVStore/tests/region_kvstore_test.h @@ -0,0 +1,86 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace DB::tests +{ + +class RegionKVStoreTest : public KVStoreTestBase +{ +public: + RegionKVStoreTest() + { + log = DB::Logger::get("RegionKVStoreTest"); + test_path = TiFlashTestEnv::getTemporaryPath("/region_kvs_test"); + } + + static RegionPtr splitRegion(const RegionPtr & region, RegionMeta && meta) + { + return region->splitInto(std::move(meta)); + } + + static void dropTable(Context & ctx, TableID table_id); +}; + +inline void validateSSTGeneration( + KVStore & kvs, + std::unique_ptr & proxy_instance, + UInt64 region_id, + MockSSTGenerator & cf_data, + ColumnFamilyType cf, + int sst_size, + int key_count) +{ + auto kvr1 = kvs.getRegion(region_id); + auto r1 = proxy_instance->getRegion(region_id); + auto proxy_helper = proxy_instance->generateProxyHelper(); + auto ssts = cf_data.ssts(); + ASSERT_EQ(ssts.size(), sst_size); + auto make_inner_func = [](const TiFlashRaftProxyHelper * proxy_helper, + SSTView snap, + SSTReader::RegionRangeFilter range, + size_t split_id) -> std::unique_ptr { + auto parsed_kind = MockSSTGenerator::parseSSTViewKind(buffToStrView(snap.path)); + auto reader = std::make_unique(proxy_helper, snap, range, split_id); + assert(reader->sst_format_kind() == parsed_kind); + return reader; + }; + MultiSSTReader reader{ + proxy_helper.get(), + cf, + make_inner_func, + ssts, + Logger::get(), + kvr1->getRange(), + DM::SSTScanSoftLimit::HEAD_OR_ONLY_SPLIT}; + + size_t counter = 0; + while (reader.remained()) + { + // repeatedly remained are called. + reader.remained(); + reader.remained(); + counter++; + auto v = std::string(reader.valueView().data); + ASSERT_EQ(v, "v" + std::to_string(counter)); + reader.next(); + } + ASSERT_EQ(counter, key_count); +} + +} // namespace DB::tests diff --git a/dbms/src/TiDB/Schema/SchemaSyncService.cpp b/dbms/src/TiDB/Schema/SchemaSyncService.cpp index 22eecbc6ce0..2c6035b2640 100644 --- a/dbms/src/TiDB/Schema/SchemaSyncService.cpp +++ b/dbms/src/TiDB/Schema/SchemaSyncService.cpp @@ -203,6 +203,11 @@ void SchemaSyncService::updateLastGcSafepoint(KeyspaceID keyspace_id, Timestamp } bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) +{ + return gcImpl(gc_safepoint, keyspace_id, /*ignore_remain_regions*/ false); +} + +bool SchemaSyncService::gcImpl(Timestamp gc_safepoint, KeyspaceID keyspace_id, bool ignore_remain_regions) { const std::optional last_gc_safepoint = lastGcSafePoint(keyspace_id); // for new deploy cluster, there is an interval that gc_safepoint return 0, skip it @@ -287,6 +292,37 @@ bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) *database_id, table_info.id); }(); + + auto & region_table = tmt_context.getRegionTable(); + if (auto remain_regions = region_table.getRegionIdsByTable(keyspace_id, table_info.id); // + !remain_regions.empty()) + { + if (likely(!ignore_remain_regions)) + { + LOG_WARNING( + keyspace_log, + "Physically drop table is skip, regions are not totally removed from TiFlash, remain_region_ids={}" + " table_tombstone={} safepoint={} {}", + remain_regions, + storage->getTombstone(), + gc_safepoint, + canonical_name); + continue; + } + else + { + LOG_WARNING( + keyspace_log, + "Physically drop table is executed while regions are not totally removed from TiFlash," + " remain_region_ids={} ignore_remain_regions={} table_tombstone={} safepoint={} {} ", + remain_regions, + ignore_remain_regions, + storage->getTombstone(), + gc_safepoint, + canonical_name); + } + } + LOG_INFO( keyspace_log, "Physically drop table begin, table_tombstone={} safepoint={} {}", diff --git a/dbms/src/TiDB/Schema/SchemaSyncService.h b/dbms/src/TiDB/Schema/SchemaSyncService.h index 88d540885d5..bd72c904051 100644 --- a/dbms/src/TiDB/Schema/SchemaSyncService.h +++ b/dbms/src/TiDB/Schema/SchemaSyncService.h @@ -25,6 +25,10 @@ namespace DB { +namespace tests +{ +class RegionKVStoreTest; +} class Logger; using LoggerPtr = std::shared_ptr; @@ -54,11 +58,13 @@ class SchemaSyncService std::optional lastGcSafePoint(KeyspaceID keyspace_id) const; void updateLastGcSafepoint(KeyspaceID keyspace_id, Timestamp gc_safepoint); + bool gcImpl(Timestamp gc_safepoint, KeyspaceID keyspace_id, bool ignore_remain_regions); private: Context & context; friend void dbgFuncGcSchemas(Context &, const ASTs &, DBGInvokerPrinter); + friend class tests::RegionKVStoreTest; struct KeyspaceGCContext { diff --git a/dbms/src/TiDB/Schema/tests/gtest_schema_sync.cpp b/dbms/src/TiDB/Schema/tests/gtest_schema_sync.cpp index e6414084c43..c5dc2648849 100644 --- a/dbms/src/TiDB/Schema/tests/gtest_schema_sync.cpp +++ b/dbms/src/TiDB/Schema/tests/gtest_schema_sync.cpp @@ -33,12 +33,16 @@ #include #include +#include +#include + namespace DB { namespace FailPoints { extern const char exception_before_rename_table_old_meta_removed[]; extern const char force_context_path[]; +extern const char force_set_num_regions_for_table[]; } // namespace FailPoints namespace tests { @@ -290,8 +294,7 @@ try refreshTableSchema(table_id); } - global_ctx.initializeSchemaSyncService(); - auto sync_service = global_ctx.getSchemaSyncService(); + auto sync_service = std::make_shared(global_ctx); // run gc with safepoint == 0, will be skip ASSERT_FALSE(sync_service->gc(0, NullspaceID)); ASSERT_TRUE(sync_service->gc(10000000, NullspaceID)); @@ -308,6 +311,62 @@ try } CATCH +TEST_F(SchemaSyncTest, PhysicalDropTableMeetsUnRemovedRegions) +try +{ + auto pd_client = global_ctx.getTMTContext().getPDClient(); + + const String db_name = "mock_db"; + MockTiDB::instance().newDataBase(db_name); + + auto cols = ColumnsDescription({ + {"col1", typeFromString("String")}, + {"col2", typeFromString("Int64")}, + }); + // table_name, cols, pk_name + std::vector> tables{ + {"t1", cols, ""}, + }; + auto table_ids = MockTiDB::instance().newTables(db_name, tables, pd_client->getTS(), "dt"); + + refreshSchema(); + for (auto table_id : table_ids) + { + refreshTableSchema(table_id); + } + + mustGetSyncedTableByName(db_name, "t1"); + + MockTiDB::instance().dropTable(global_ctx, db_name, "t1", true); + + refreshSchema(); + for (auto table_id : table_ids) + { + refreshTableSchema(table_id); + } + + // prevent the storage instance from being physically removed + FailPointHelper::enableFailPoint( + FailPoints::force_set_num_regions_for_table, + std::vector{1001, 1002, 1003}); + SCOPE_EXIT({ FailPointHelper::disableFailPoint(FailPoints::force_set_num_regions_for_table); }); + + auto sync_service = std::make_shared(global_ctx); + ASSERT_TRUE(sync_service->gc(std::numeric_limits::max(), NullspaceID)); + + size_t num_remain_tables = 0; + for (auto table_id : table_ids) + { + auto storage = global_ctx.getTMTContext().getStorages().get(NullspaceID, table_id); + ASSERT_TRUE(storage->isTombstone()); + ++num_remain_tables; + } + ASSERT_EQ(num_remain_tables, 1); + + sync_service->shutdown(); +} +CATCH + TEST_F(SchemaSyncTest, RenamePartitionTable) try { diff --git a/tests/fullstack-test2/ddl/alter_drop_table.test b/tests/fullstack-test2/ddl/alter_drop_table.test index 5fd6329f000..afb2659e68d 100644 --- a/tests/fullstack-test2/ddl/alter_drop_table.test +++ b/tests/fullstack-test2/ddl/alter_drop_table.test @@ -19,7 +19,7 @@ # Clean the tombstone table in the testing env >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') ## create table and drop without tiflash replica mysql> drop table if exists test.t1; diff --git a/tests/fullstack-test2/ddl/flashback/flashback_database.test b/tests/fullstack-test2/ddl/flashback/flashback_database.test index 257e28be866..3094e415813 100644 --- a/tests/fullstack-test2/ddl/flashback/flashback_database.test +++ b/tests/fullstack-test2/ddl/flashback/flashback_database.test @@ -54,8 +54,8 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t # ensure the flashbacked table and database is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') -=> DBGInvoke __refresh_schemas() ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __refresh_schemas() +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t3 order by a; +------+------+ @@ -73,7 +73,7 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t | 53 | efg | NULL | +----+------+------+ -=> DBGInvoke __enable_schema_sync_service('false') +>> DBGInvoke __enable_schema_sync_service('false') >> DBGInvoke __init_fail_point() mysql> drop database if exists d1; @@ -82,9 +82,9 @@ mysql> drop database if exists d1_new; ## case 2, non-partition table mysql> create database d1; -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() -=> DBGInvoke mapped_database_exists(d1) +>> DBGInvoke mapped_database_exists(d1) ┌─mapped_database_exists(d1)───┐ │ true │ └──────────────────────────────┘ @@ -105,7 +105,7 @@ mysql> insert into d1.t1 values(2,2); mysql> drop database d1; -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() # make write cmd take effect >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) @@ -123,10 +123,10 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t | 2 | 2 | +------+------+ -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() # ensure the flashbacked table and database is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t1 order by a; +------+------+ @@ -142,7 +142,7 @@ mysql> drop database if exists d1_new; ## case 3, partition table mysql> create database d1; -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() mysql> create table d1.t2(id INT NOT NULL,name VARCHAR(30)) PARTITION BY RANGE (id) ( PARTITION p0 VALUES LESS THAN (50),PARTITION p1 VALUES LESS THAN (100)); mysql> insert into d1.t2 values(1, 'abc'),(2, 'cde'),(53, 'efg'); @@ -154,7 +154,7 @@ mysql> alter table d1.t2 add column b int; mysql> drop database d1; -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() # make write cmd take effect >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) @@ -173,10 +173,10 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t | 53 | efg | NULL | +----+------+------+ -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() # ensure the flashbacked table and database is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t2 order by id; +----+------+------+ @@ -223,7 +223,7 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t # ensure the flashbacked table and database is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from d1_new.t1 order by a; +------+ diff --git a/tests/fullstack-test2/ddl/flashback/recover_table.test b/tests/fullstack-test2/ddl/flashback/recover_table.test index 890f776be02..a88cbd63ffc 100644 --- a/tests/fullstack-test2/ddl/flashback/recover_table.test +++ b/tests/fullstack-test2/ddl/flashback/recover_table.test @@ -14,7 +14,7 @@ # Clean the tombstone table in the testing env >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') ### Case 1 ## Test case for applying raft cmd for tombstoned table @@ -115,8 +115,8 @@ mysql> insert into test.t_drop values(1, 1); func> wait_table test t_drop -=> DBGInvoke __enable_schema_sync_service('false') -=> DBGInvoke __init_fail_point() +>> DBGInvoke __enable_schema_sync_service('false') +>> DBGInvoke __init_fail_point() mysql> alter table test.t_drop add column c int; @@ -127,7 +127,7 @@ mysql> insert into test.t_drop values(1,2,3); mysql> drop table test.t_drop; -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() # make write cmd take effect >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) @@ -151,14 +151,14 @@ mysql> set session tidb_isolation_read_engines='tiflash';select * from test.t_dr mysql> drop table test.t_drop; -=> DBGInvoke __refresh_schemas() +>> DBGInvoke __refresh_schemas() >> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' ┌─tidb_database─┬─tidb_name─┐ │ test │ t_drop │ └───────────────┴───────────┘ -=> DBGInvoke __enable_schema_sync_service('true') -=> DBGInvoke __gc_schemas(9223372036854775807) +>> DBGInvoke __enable_schema_sync_service('true') +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') # check the table is physically dropped >> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' diff --git a/tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test b/tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test index 6f740ace25c..215a95472e7 100644 --- a/tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test +++ b/tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test @@ -126,7 +126,7 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e2 # ensure the swap out table is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e order by id; +-----+-------+-------+------+ @@ -178,7 +178,7 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e2 +----+-------+-------+------+ # ensure the swap out table is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e order by id; +-----+-------+-------+------+ diff --git a/tests/fullstack-test2/ddl/partitions/reorganize_partition.test b/tests/fullstack-test2/ddl/partitions/reorganize_partition.test index 47482e1c30a..b9b68664853 100644 --- a/tests/fullstack-test2/ddl/partitions/reorganize_partition.test +++ b/tests/fullstack-test2/ddl/partitions/reorganize_partition.test @@ -174,7 +174,7 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1 # ensure the partitions is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') ->> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __gc_schemas(18446744073709551615, 'true') >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1 order by id; +----+------+------+