diff --git a/server/replication/replication_mode.go b/server/replication/replication_mode.go index f1e8b5a9c8a..87205a0c149 100644 --- a/server/replication/replication_mode.go +++ b/server/replication/replication_mode.go @@ -30,7 +30,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" - "github.com/tikv/pd/pkg/slice" + "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -63,7 +63,7 @@ type FileReplicater interface { // DrStatusFile is the file name that stores the dr status. const DrStatusFile = "DR_STATE" -const persistFileTimeout = time.Second * 10 +const persistFileTimeout = time.Second * 3 // ModeManager is used to control how raft logs are synchronized between // different tikv nodes. @@ -71,11 +71,11 @@ type ModeManager struct { initTime time.Time syncutil.RWMutex - config config.ReplicationModeConfig - storage endpoint.ReplicationStatusStorage - cluster schedule.Cluster - fileReplicater FileReplicater - replicatedMembers []uint64 + config config.ReplicationModeConfig + storage endpoint.ReplicationStatusStorage + cluster schedule.Cluster + fileReplicater FileReplicater + replicateState sync.Map drAutoSync drAutoSyncStatus // intermediate states of the recovery process @@ -241,7 +241,6 @@ func (m *ModeManager) drSwitchToAsyncWait(availableStores []uint64) error { return err } dr := drAutoSyncStatus{State: drStateAsyncWait, StateID: id, AvailableStores: availableStores} - m.drPersistStatusWithLock(dr) if err := m.storage.SaveReplicationStatus(modeDRAutoSync, dr); err != nil { log.Warn("failed to switch to async state", zap.String("replicate-mode", modeDRAutoSync), errs.ZapError(err)) return err @@ -264,7 +263,6 @@ func (m *ModeManager) drSwitchToAsyncWithLock(availableStores []uint64) error { return err } dr := drAutoSyncStatus{State: drStateAsync, StateID: id, AvailableStores: availableStores} - m.drPersistStatusWithLock(dr) if err := m.storage.SaveReplicationStatus(modeDRAutoSync, dr); err != nil { log.Warn("failed to switch to async state", zap.String("replicate-mode", modeDRAutoSync), errs.ZapError(err)) return err @@ -288,7 +286,6 @@ func (m *ModeManager) drSwitchToSyncRecoverWithLock() error { } now := time.Now() dr := drAutoSyncStatus{State: drStateSyncRecover, StateID: id, RecoverStartTime: &now} - m.drPersistStatusWithLock(dr) if err = m.storage.SaveReplicationStatus(modeDRAutoSync, dr); err != nil { log.Warn("failed to switch to sync_recover state", zap.String("replicate-mode", modeDRAutoSync), errs.ZapError(err)) return err @@ -308,7 +305,6 @@ func (m *ModeManager) drSwitchToSync() error { return err } dr := drAutoSyncStatus{State: drStateSync, StateID: id} - m.drPersistStatusWithLock(dr) if err := m.storage.SaveReplicationStatus(modeDRAutoSync, dr); err != nil { log.Warn("failed to switch to sync state", zap.String("replicate-mode", modeDRAutoSync), errs.ZapError(err)) return err @@ -318,50 +314,6 @@ func (m *ModeManager) drSwitchToSync() error { return nil } -func (m *ModeManager) drPersistStatusWithLock(status drAutoSyncStatus) { - ctx, cancel := context.WithTimeout(context.Background(), persistFileTimeout) - defer cancel() - - members, err := m.fileReplicater.GetMembers() - if err != nil { - log.Warn("failed to get members", zap.String("replicate-mode", modeDRAutoSync)) - return - } - - data, _ := json.Marshal(status) - - m.replicatedMembers = m.replicatedMembers[:0] - for _, member := range members { - if err := m.fileReplicater.ReplicateFileToMember(ctx, member, DrStatusFile, data); err != nil { - log.Warn("failed to switch state", zap.String("replicate-mode", modeDRAutoSync), zap.String("new-state", status.State), errs.ZapError(err)) - // Throw away the error to make it possible to switch to async when - // primary and dr DC are disconnected. This will result in the - // inability to accurately determine whether data is fully - // synchronized when using dr DC to disaster recovery. - // Since the member will not be in `replicatedMembers` list, PD will - // try to replicate state file later. - } else { - m.replicatedMembers = append(m.replicatedMembers, member.GetMemberId()) - } - } -} - -func (m *ModeManager) drCheckNeedPersistStatus(members []*pdpb.Member) bool { - m.RLock() - defer m.RUnlock() - return slice.AnyOf(members, func(i int) bool { // if there is any member in the new list - return slice.NoneOf(m.replicatedMembers, func(j int) bool { // not replicated - return m.replicatedMembers[j] == members[i].GetMemberId() - }) - }) -} - -func (m *ModeManager) drPersistStatus() { - m.Lock() - defer m.Unlock() - m.drPersistStatusWithLock(drAutoSyncStatus{State: m.drAutoSync.State, StateID: m.drAutoSync.StateID}) -} - func (m *ModeManager) drGetState() string { m.RLock() defer m.RUnlock() @@ -369,8 +321,9 @@ func (m *ModeManager) drGetState() string { } const ( - idleTimeout = time.Minute - tickInterval = 500 * time.Millisecond + idleTimeout = time.Minute + tickInterval = 500 * time.Millisecond + replicateStateInterval = time.Second * 5 ) // Run starts the background job. @@ -381,47 +334,103 @@ func (m *ModeManager) Run(ctx context.Context) { case <-ctx.Done(): return } - for { - select { - case <-time.After(tickInterval): - case <-ctx.Done(): - return + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + for { + select { + case <-time.After(tickInterval): + case <-ctx.Done(): + return + } + m.tickUpdateState() + } + }() + + go func() { + defer wg.Done() + for { + select { + case <-time.After(replicateStateInterval): + case <-ctx.Done(): + return + } + m.tickReplicateStatus() } - m.tickDR() + }() + + wg.Wait() +} + +func minimalUpVoters(rule *placement.Rule, upStores, downStores []*core.StoreInfo) int { + if rule.Role == placement.Learner { + return 0 + } + var up, down int + for _, s := range upStores { + if placement.MatchLabelConstraints(s, rule.LabelConstraints) { + up++ + } + } + for _, s := range downStores { + if placement.MatchLabelConstraints(s, rule.LabelConstraints) { + down++ + } + } + minimalUp := rule.Count - down + if minimalUp < 0 { + minimalUp = 0 + } + if minimalUp > up { + minimalUp = up } + return minimalUp } -func (m *ModeManager) tickDR() { +func (m *ModeManager) tickUpdateState() { if m.getModeName() != modeDRAutoSync { return } drTickCounter.Inc() - totalPrimaryPeers, totalDrPeers := m.config.DRAutoSync.PrimaryReplicas, m.config.DRAutoSync.DRReplicas - stores := m.checkStoreStatus() + stores, storeIDs := m.checkStoreStatus() - // canSync is true when every region has at least 1 replica in each DC. - canSync := len(stores[primaryDown]) < totalPrimaryPeers && len(stores[drDown]) < totalDrPeers && - len(stores[primaryUp]) > 0 && len(stores[drUp]) > 0 + var primaryHasVoter, drHasVoter bool + var totalVoter, totalUpVoter int + for _, r := range m.cluster.GetRuleManager().GetAllRules() { + if len(r.StartKey) > 0 || len(r.EndKey) > 0 { + // All rules should be global rules. If not, skip it. + continue + } + if r.Role != placement.Learner { + totalVoter += r.Count + } + minimalUpPrimary := minimalUpVoters(r, stores[primaryUp], stores[primaryDown]) + minimalUpDr := minimalUpVoters(r, stores[drUp], stores[drDown]) + primaryHasVoter = primaryHasVoter || minimalUpPrimary > 0 + drHasVoter = drHasVoter || minimalUpDr > 0 + upVoters := minimalUpPrimary + minimalUpDr + if upVoters > r.Count { + upVoters = r.Count + } + totalUpVoter += upVoters + } + // canSync is true when every region has at least 1 voter replica in each DC. // hasMajority is true when every region has majority peer online. - var upPeers int - if len(stores[primaryDown]) < totalPrimaryPeers { - upPeers += totalPrimaryPeers - len(stores[primaryDown]) - } - if len(stores[drDown]) < totalDrPeers { - upPeers += totalDrPeers - len(stores[drDown]) - } - hasMajority := upPeers*2 > totalPrimaryPeers+totalDrPeers + canSync := primaryHasVoter && drHasVoter + hasMajority := totalUpVoter*2 > totalVoter log.Debug("replication store status", - zap.Uint64s("up-primary", stores[primaryUp]), - zap.Uint64s("up-dr", stores[drUp]), - zap.Uint64s("down-primary", stores[primaryDown]), - zap.Uint64s("down-dr", stores[drDown]), + zap.Uint64s("up-primary", storeIDs[primaryUp]), + zap.Uint64s("up-dr", storeIDs[drUp]), + zap.Uint64s("down-primary", storeIDs[primaryDown]), + zap.Uint64s("down-dr", storeIDs[drDown]), zap.Bool("can-sync", canSync), - zap.Int("up-peers", upPeers), zap.Bool("has-majority", hasMajority), ) @@ -447,31 +456,31 @@ func (m *ModeManager) tickDR() { case drStateSync: // If hasMajority is false, the cluster is always unavailable. Switch to async won't help. if !canSync && hasMajority { - m.drSwitchToAsyncWait(stores[primaryUp]) + m.drSwitchToAsyncWait(storeIDs[primaryUp]) } case drStateAsyncWait: if canSync { m.drSwitchToSync() break } - if oldAvailableStores := m.drGetAvailableStores(); !reflect.DeepEqual(oldAvailableStores, stores[primaryUp]) { - m.drSwitchToAsyncWait(stores[primaryUp]) + if oldAvailableStores := m.drGetAvailableStores(); !reflect.DeepEqual(oldAvailableStores, storeIDs[primaryUp]) { + m.drSwitchToAsyncWait(storeIDs[primaryUp]) break } - if m.drCheckStoreStateUpdated(stores[primaryUp]) { - m.drSwitchToAsync(stores[primaryUp]) + if m.drCheckStoreStateUpdated(storeIDs[primaryUp]) { + m.drSwitchToAsync(storeIDs[primaryUp]) } case drStateAsync: if canSync { m.drSwitchToSyncRecover() break } - if !reflect.DeepEqual(m.drGetAvailableStores(), stores[primaryUp]) && m.drCheckStoreStateUpdated(stores[primaryUp]) { - m.drSwitchToAsync(stores[primaryUp]) + if !reflect.DeepEqual(m.drGetAvailableStores(), storeIDs[primaryUp]) && m.drCheckStoreStateUpdated(storeIDs[primaryUp]) { + m.drSwitchToAsync(storeIDs[primaryUp]) } case drStateSyncRecover: if !canSync && hasMajority { - m.drSwitchToAsync(stores[primaryUp]) + m.drSwitchToAsync(storeIDs[primaryUp]) } else { m.updateProgress() progress := m.estimateProgress() @@ -484,8 +493,42 @@ func (m *ModeManager) tickDR() { } } } +} + +func (m *ModeManager) tickReplicateStatus() { + if m.getModeName() != modeDRAutoSync { + return + } - m.checkReplicateFile() + m.RLock() + state := drAutoSyncStatus{ + State: m.drAutoSync.State, + StateID: m.drAutoSync.StateID, + AvailableStores: m.drAutoSync.AvailableStores, + RecoverStartTime: m.drAutoSync.RecoverStartTime, + } + m.RUnlock() + + data, _ := json.Marshal(state) + + members, err := m.fileReplicater.GetMembers() + if err != nil { + log.Warn("failed to get members", zap.String("replicate-mode", modeDRAutoSync)) + return + } + for _, member := range members { + stateID, ok := m.replicateState.Load(member.GetMemberId()) + if !ok || stateID.(uint64) != state.StateID { + ctx, cancel := context.WithTimeout(context.Background(), persistFileTimeout) + err := m.fileReplicater.ReplicateFileToMember(ctx, member, DrStatusFile, data) + if err != nil { + log.Warn("failed to switch state", zap.String("replicate-mode", modeDRAutoSync), zap.String("new-state", state.State), errs.ZapError(err)) + } else { + m.replicateState.Store(member.GetMemberId(), state.StateID) + } + cancel() + } + } } const ( @@ -496,39 +539,40 @@ const ( storeStatusTypeCount ) -func (m *ModeManager) checkStoreStatus() [][]uint64 { +func (m *ModeManager) checkStoreStatus() ([][]*core.StoreInfo, [][]uint64) { m.RLock() defer m.RUnlock() - stores := make([][]uint64, storeStatusTypeCount) + stores, storeIDs := make([][]*core.StoreInfo, storeStatusTypeCount), make([][]uint64, storeStatusTypeCount) for _, s := range m.cluster.GetStores() { if s.IsRemoved() { continue } - // learner peers do not participate in major commit or vote, so it should not count in primary/dr as a normal store. - if s.GetRegionCount() == s.GetLearnerCount() { - continue - } down := s.DownTime() >= m.config.DRAutoSync.WaitStoreTimeout.Duration labelValue := s.GetLabelValue(m.config.DRAutoSync.LabelKey) if labelValue == m.config.DRAutoSync.Primary { if down { - stores[primaryDown] = append(stores[primaryDown], s.GetID()) + stores[primaryDown] = append(stores[primaryDown], s) + storeIDs[primaryDown] = append(storeIDs[primaryDown], s.GetID()) } else { - stores[primaryUp] = append(stores[primaryUp], s.GetID()) + stores[primaryUp] = append(stores[primaryUp], s) + storeIDs[primaryUp] = append(storeIDs[primaryUp], s.GetID()) } } if labelValue == m.config.DRAutoSync.DR { if down { - stores[drDown] = append(stores[drDown], s.GetID()) + stores[drDown] = append(stores[drDown], s) + storeIDs[drDown] = append(storeIDs[drDown], s.GetID()) } else { - stores[drUp] = append(stores[drUp], s.GetID()) + stores[drUp] = append(stores[drUp], s) + storeIDs[drUp] = append(storeIDs[drUp], s.GetID()) } } } for i := range stores { - sort.Slice(stores[i], func(a, b int) bool { return stores[i][a] < stores[i][b] }) + sort.Slice(stores[i], func(a, b int) bool { return stores[i][a].GetID() < stores[i][b].GetID() }) + sort.Slice(storeIDs[i], func(a, b int) bool { return storeIDs[i][a] < storeIDs[i][b] }) } - return stores + return stores, storeIDs } // UpdateStoreDRStatus saves the dr-autosync status of a store. @@ -557,17 +601,6 @@ func (m *ModeManager) drCheckStoreStateUpdated(stores []uint64) bool { return true } -func (m *ModeManager) checkReplicateFile() { - members, err := m.fileReplicater.GetMembers() - if err != nil { - log.Warn("failed to get members", zap.String("replicate-mode", modeDRAutoSync)) - return - } - if m.drCheckNeedPersistStatus(members) { - m.drPersistStatus() - } -} - var ( regionScanBatchSize = 1024 regionMinSampleSize = 512 diff --git a/server/replication/replication_mode_test.go b/server/replication/replication_mode_test.go index 09456893eb0..e01fb7a0b9a 100644 --- a/server/replication/replication_mode_test.go +++ b/server/replication/replication_mode_test.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/mock/mockconfig" + "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/server/config" @@ -167,14 +168,17 @@ func TestStateSwitch(t *testing.T) { LabelKey: "zone", Primary: "zone1", DR: "zone2", - PrimaryReplicas: 4, - DRReplicas: 2, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) replicator := newMockReplicator([]uint64{1}) rep, err := NewReplicationModeManager(conf, store, cluster, replicator) re.NoError(err) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "zone", value: "zone1", role: placement.Voter, count: 4}, + {key: "zone", value: "zone2", role: placement.Voter, count: 2}, + }), true) cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1"}) cluster.AddLabelsStore(2, 1, map[string]string{"zone": "zone1"}) @@ -185,6 +189,7 @@ func TestStateSwitch(t *testing.T) { re.Equal(drStateSync, rep.drGetState()) stateID := rep.drAutoSync.StateID re.NotEqual(uint64(0), stateID) + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"sync","state_id":%d}`, stateID), replicator.lastData[1]) assertStateIDUpdate := func() { re.NotEqual(stateID, rep.drAutoSync.StateID) @@ -198,9 +203,10 @@ func TestStateSwitch(t *testing.T) { } // only one zone, sync -> async_wait -> async - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2,3,4]}`, stateID), replicator.lastData[1]) re.False(rep.GetReplicationStatus().GetDrAutoSync().GetPauseRegionSplit()) @@ -209,112 +215,119 @@ func TestStateSwitch(t *testing.T) { re.True(rep.GetReplicationStatus().GetDrAutoSync().GetPauseRegionSplit()) syncStoreStatus(1, 2, 3, 4) - rep.tickDR() + rep.tickUpdateState() assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async","state_id":%d,"available_stores":[1,2,3,4]}`, stateID), replicator.lastData[1]) // add new store in dr zone. cluster.AddLabelsStore(5, 1, map[string]string{"zone": "zone2"}) cluster.AddLabersStoreWithLearnerCount(6, 1, 1, map[string]string{"zone": "zone2"}) // async -> sync - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) rep.drSwitchToSync() re.Equal(drStateSync, rep.drGetState()) assertStateIDUpdate() // sync -> async_wait - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSync, rep.drGetState()) setStoreState(cluster, "down", "up", "up", "up", "up", "up") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSync, rep.drGetState()) setStoreState(cluster, "down", "down", "up", "up", "up", "up") setStoreState(cluster, "down", "down", "down", "up", "up", "up") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSync, rep.drGetState()) // cannot guarantee majority, keep sync. setStoreState(cluster, "up", "up", "up", "up", "up", "down") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSync, rep.drGetState()) - // once the voter node down, even learner node up, swith to async state. - setStoreState(cluster, "up", "up", "up", "up", "down", "up") - rep.tickDR() + // once zone2 down, swith to async state. + setStoreState(cluster, "up", "up", "up", "up", "down", "down") + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) rep.drSwitchToSync() replicator.errors[2] = errors.New("fail to replicate") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) assertStateIDUpdate() delete(replicator.errors, 1) // async_wait -> sync setStoreState(cluster, "up", "up", "up", "up", "up", "up") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSync, rep.drGetState()) re.False(rep.GetReplicationStatus().GetDrAutoSync().GetPauseRegionSplit()) // async_wait -> async_wait - setStoreState(cluster, "up", "up", "up", "up", "down", "up") - rep.tickDR() + setStoreState(cluster, "up", "up", "up", "up", "down", "down") + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2,3,4]}`, stateID), replicator.lastData[1]) - setStoreState(cluster, "down", "up", "up", "up", "down", "up") - rep.tickDR() + setStoreState(cluster, "down", "up", "up", "up", "down", "down") + rep.tickUpdateState() assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[2,3,4]}`, stateID), replicator.lastData[1]) - setStoreState(cluster, "up", "down", "up", "up", "down", "up") - rep.tickDR() + setStoreState(cluster, "up", "down", "up", "up", "down", "down") + rep.tickUpdateState() assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,3,4]}`, stateID), replicator.lastData[1]) // async_wait -> async - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) syncStoreStatus(1, 3) - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) syncStoreStatus(4) - rep.tickDR() + rep.tickUpdateState() assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async","state_id":%d,"available_stores":[1,3,4]}`, stateID), replicator.lastData[1]) // async -> async - setStoreState(cluster, "up", "up", "up", "up", "down", "up") - rep.tickDR() + setStoreState(cluster, "up", "up", "up", "up", "down", "down") + rep.tickUpdateState() // store 2 won't be available before it syncs status. + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async","state_id":%d,"available_stores":[1,3,4]}`, stateID), replicator.lastData[1]) syncStoreStatus(1, 2, 3, 4) - rep.tickDR() + rep.tickUpdateState() assertStateIDUpdate() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"async","state_id":%d,"available_stores":[1,2,3,4]}`, stateID), replicator.lastData[1]) // async -> sync_recover setStoreState(cluster, "up", "up", "up", "up", "up", "up") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) assertStateIDUpdate() rep.drSwitchToAsync([]uint64{1, 2, 3, 4, 5}) setStoreState(cluster, "down", "up", "up", "up", "up", "up") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) assertStateIDUpdate() // sync_recover -> async - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) - setStoreState(cluster, "up", "up", "up", "up", "down", "up") - rep.tickDR() + setStoreState(cluster, "up", "up", "up", "up", "down", "down") + rep.tickUpdateState() re.Equal(drStateAsync, rep.drGetState()) assertStateIDUpdate() // lost majority, does not switch to async. rep.drSwitchToSyncRecover() assertStateIDUpdate() - setStoreState(cluster, "down", "down", "up", "up", "down", "up") - rep.tickDR() + setStoreState(cluster, "down", "down", "up", "up", "down", "down") + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) // sync_recover -> sync @@ -328,7 +341,7 @@ func TestStateSwitch(t *testing.T) { State: pb.RegionReplicationState_SIMPLE_MAJORITY, })) cluster.PutRegion(region) - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) region = region.Clone(core.SetReplicationStatus(&pb.RegionReplicationStatus{ @@ -336,14 +349,14 @@ func TestStateSwitch(t *testing.T) { StateId: rep.drAutoSync.StateID - 1, // mismatch state id })) cluster.PutRegion(region) - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSyncRecover, rep.drGetState()) region = region.Clone(core.SetReplicationStatus(&pb.RegionReplicationStatus{ State: pb.RegionReplicationState_INTEGRITY_OVER_LABEL, StateId: rep.drAutoSync.StateID, })) cluster.PutRegion(region) - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateSync, rep.drGetState()) assertStateIDUpdate() } @@ -357,37 +370,44 @@ func TestReplicateState(t *testing.T) { LabelKey: "zone", Primary: "zone1", DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "zone", value: "zone1", role: placement.Voter, count: 2}, + {key: "zone", value: "zone2", role: placement.Voter, count: 1}, + }), true) replicator := newMockReplicator([]uint64{1}) rep, err := NewReplicationModeManager(conf, store, cluster, replicator) re.NoError(err) + cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1"}) + cluster.AddLabelsStore(2, 1, map[string]string{"zone": "zone1"}) stateID := rep.drAutoSync.StateID // replicate after initialized + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"sync","state_id":%d}`, stateID), replicator.lastData[1]) // repliate state to new member replicator.memberIDs = append(replicator.memberIDs, 2, 3) - rep.checkReplicateFile() + rep.tickReplicateStatus() re.Equal(fmt.Sprintf(`{"state":"sync","state_id":%d}`, stateID), replicator.lastData[2]) re.Equal(fmt.Sprintf(`{"state":"sync","state_id":%d}`, stateID), replicator.lastData[3]) // inject error replicator.errors[2] = errors.New("failed to persist") - rep.tickDR() // switch async_wait since there is only one zone + rep.tickUpdateState() // switch async_wait since there is only one zone newStateID := rep.drAutoSync.StateID - re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d}`, newStateID), replicator.lastData[1]) + rep.tickReplicateStatus() + re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2]}`, newStateID), replicator.lastData[1]) re.Equal(fmt.Sprintf(`{"state":"sync","state_id":%d}`, stateID), replicator.lastData[2]) - re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d}`, newStateID), replicator.lastData[3]) + re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2]}`, newStateID), replicator.lastData[3]) // clear error, replicate to node 2 next time delete(replicator.errors, 2) - rep.checkReplicateFile() - re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d}`, newStateID), replicator.lastData[2]) + rep.tickReplicateStatus() + re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2]}`, newStateID), replicator.lastData[2]) } func TestAsynctimeout(t *testing.T) { @@ -399,11 +419,14 @@ func TestAsynctimeout(t *testing.T) { LabelKey: "zone", Primary: "zone1", DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "zone", value: "zone1", role: placement.Voter, count: 2}, + {key: "zone", value: "zone2", role: placement.Voter, count: 1}, + }), true) var replicator mockFileReplicator rep, err := NewReplicationModeManager(conf, store, cluster, &replicator) re.NoError(err) @@ -413,7 +436,7 @@ func TestAsynctimeout(t *testing.T) { cluster.AddLabelsStore(3, 1, map[string]string{"zone": "zone2"}) setStoreState(cluster, "up", "up", "down") - rep.tickDR() + rep.tickUpdateState() re.Equal(drStateAsyncWait, rep.drGetState()) } @@ -442,11 +465,14 @@ func TestRecoverProgress(t *testing.T) { LabelKey: "zone", Primary: "zone1", DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "zone", value: "zone1", role: placement.Voter, count: 2}, + {key: "zone", value: "zone2", role: placement.Voter, count: 1}, + }), true) cluster.AddLabelsStore(1, 1, map[string]string{}) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) @@ -504,11 +530,14 @@ func TestRecoverProgressWithSplitAndMerge(t *testing.T) { LabelKey: "zone", Primary: "zone1", DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "zone", value: "zone1", role: placement.Voter, count: 2}, + {key: "zone", value: "zone2", role: placement.Voter, count: 1}, + }), true) cluster.AddLabelsStore(1, 1, map[string]string{}) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) @@ -560,6 +589,157 @@ func TestRecoverProgressWithSplitAndMerge(t *testing.T) { re.Equal(float32(1.0), rep.estimateProgress()) } +func TestComplexPlacementRules(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + store := storage.NewStorageWithMemoryBackend() + conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + }} + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + replicator := newMockReplicator([]uint64{1}) + rep, err := NewReplicationModeManager(conf, store, cluster, replicator) + re.NoError(err) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "logic", value: "logic1", role: placement.Voter, count: 1}, + {key: "logic", value: "logic2", role: placement.Voter, count: 1}, + {key: "logic", value: "logic3", role: placement.Voter, count: 1}, + {key: "logic", value: "logic4", role: placement.Voter, count: 1}, + {key: "logic", value: "logic5", role: placement.Voter, count: 1}, + }), true) + + cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1", "logic": "logic1"}) + cluster.AddLabelsStore(2, 1, map[string]string{"zone": "zone1", "logic": "logic1"}) + cluster.AddLabelsStore(3, 1, map[string]string{"zone": "zone1", "logic": "logic2"}) + cluster.AddLabelsStore(4, 1, map[string]string{"zone": "zone1", "logic": "logic2"}) + cluster.AddLabelsStore(5, 1, map[string]string{"zone": "zone1", "logic": "logic3"}) + cluster.AddLabelsStore(6, 1, map[string]string{"zone": "zone1", "logic": "logic3"}) + cluster.AddLabelsStore(7, 1, map[string]string{"zone": "zone2", "logic": "logic4"}) + cluster.AddLabelsStore(8, 1, map[string]string{"zone": "zone2", "logic": "logic4"}) + cluster.AddLabelsStore(9, 1, map[string]string{"zone": "zone2", "logic": "logic5"}) + cluster.AddLabelsStore(10, 1, map[string]string{"zone": "zone2", "logic": "logic5"}) + + // initial state is sync + re.Equal(drStateSync, rep.drGetState()) + + // down logic3 + logic5, can remain sync + setStoreState(cluster, "up", "up", "up", "up", "down", "down", "up", "up", "down", "down") + rep.tickUpdateState() + re.Equal(drStateSync, rep.drGetState()) + + // down 1 tikv from logic4 + 1 tikv from logic5, cannot sync + setStoreState(cluster, "up", "up", "up", "up", "up", "up", "up", "down", "up", "down") + rep.tickUpdateState() + re.Equal(drStateAsyncWait, rep.drGetState()) + rep.tickReplicateStatus() + re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2,3,4,5,6]}`, rep.drAutoSync.StateID), replicator.lastData[1]) + + // reset to sync + setStoreState(cluster, "up", "up", "up", "up", "up", "up", "up", "up", "up", "up") + rep.tickUpdateState() + re.Equal(drStateSync, rep.drGetState()) + + // lost majority, down 1 tikv from logic2 + 1 tikv from logic3 + 1tikv from logic5, remain sync state + setStoreState(cluster, "up", "up", "up", "down", "up", "down", "up", "up", "up", "down") + rep.tickUpdateState() + re.Equal(drStateSync, rep.drGetState()) +} + +func TestComplexPlacementRules2(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + store := storage.NewStorageWithMemoryBackend() + conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + }} + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + replicator := newMockReplicator([]uint64{1}) + rep, err := NewReplicationModeManager(conf, store, cluster, replicator) + re.NoError(err) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "logic", value: "logic1", role: placement.Voter, count: 2}, + {key: "logic", value: "logic2", role: placement.Voter, count: 1}, + {key: "logic", value: "logic3", role: placement.Voter, count: 2}, + }), true) + + cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1", "logic": "logic1"}) + cluster.AddLabelsStore(2, 1, map[string]string{"zone": "zone1", "logic": "logic1"}) + cluster.AddLabelsStore(3, 1, map[string]string{"zone": "zone1", "logic": "logic2"}) + cluster.AddLabelsStore(4, 1, map[string]string{"zone": "zone1", "logic": "logic2"}) + cluster.AddLabelsStore(5, 1, map[string]string{"zone": "zone2", "logic": "logic3"}) + cluster.AddLabelsStore(6, 1, map[string]string{"zone": "zone2", "logic": "logic3"}) + cluster.AddLabelsStore(7, 1, map[string]string{"zone": "zone2", "logic": "logic3"}) + + // initial state is sync + re.Equal(drStateSync, rep.drGetState()) + + // down 1 from logic3, can remain sync + setStoreState(cluster, "up", "up", "up", "up", "up", "down", "up") + rep.tickUpdateState() + re.Equal(drStateSync, rep.drGetState()) + + // down 1 from logic1, 1 from logic2, can remain sync + setStoreState(cluster, "up", "down", "up", "down", "up", "up", "up") + rep.tickUpdateState() + re.Equal(drStateSync, rep.drGetState()) + + // down another from logic3, cannot sync + setStoreState(cluster, "up", "up", "up", "up", "down", "down", "up") + rep.tickUpdateState() + re.Equal(drStateAsyncWait, rep.drGetState()) + rep.tickReplicateStatus() + re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2,3,4]}`, rep.drAutoSync.StateID), replicator.lastData[1]) +} + +func TestComplexPlacementRules3(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + store := storage.NewStorageWithMemoryBackend() + conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + }} + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) + replicator := newMockReplicator([]uint64{1}) + rep, err := NewReplicationModeManager(conf, store, cluster, replicator) + re.NoError(err) + cluster.GetRuleManager().SetAllGroupBundles( + genPlacementRuleConfig([]ruleConfig{ + {key: "logic", value: "logic1", role: placement.Voter, count: 2}, + {key: "logic", value: "logic2", role: placement.Learner, count: 1}, + {key: "logic", value: "logic3", role: placement.Voter, count: 1}, + }), true) + + cluster.AddLabelsStore(1, 1, map[string]string{"zone": "zone1", "logic": "logic1"}) + cluster.AddLabelsStore(2, 1, map[string]string{"zone": "zone1", "logic": "logic1"}) + cluster.AddLabelsStore(3, 1, map[string]string{"zone": "zone1", "logic": "logic2"}) + cluster.AddLabelsStore(4, 1, map[string]string{"zone": "zone1", "logic": "logic2"}) + cluster.AddLabelsStore(5, 1, map[string]string{"zone": "zone2", "logic": "logic3"}) + + // initial state is sync + re.Equal(drStateSync, rep.drGetState()) + + // zone2 down, switch state, available stores should contain logic2 (learner) + setStoreState(cluster, "up", "up", "up", "up", "down") + rep.tickUpdateState() + re.Equal(drStateAsyncWait, rep.drGetState()) + rep.tickReplicateStatus() + re.Equal(fmt.Sprintf(`{"state":"async_wait","state_id":%d,"available_stores":[1,2,3,4]}`, rep.drAutoSync.StateID), replicator.lastData[1]) +} + func genRegions(cluster *mockcluster.Cluster, stateID uint64, n int) []*core.RegionInfo { var regions []*core.RegionInfo for i := 1; i <= n; i++ { @@ -579,3 +759,27 @@ func genRegions(cluster *mockcluster.Cluster, stateID uint64, n int) []*core.Reg } return regions } + +type ruleConfig struct { + key string + value string + role placement.PeerRoleType + count int +} + +func genPlacementRuleConfig(rules []ruleConfig) []placement.GroupBundle { + group := placement.GroupBundle{ + ID: "group1", + } + for i, r := range rules { + group.Rules = append(group.Rules, &placement.Rule{ + ID: fmt.Sprintf("rule%d", i), + Role: r.role, + LabelConstraints: []placement.LabelConstraint{ + {Key: r.key, Op: placement.In, Values: []string{r.value}}, + }, + Count: r.count, + }) + } + return []placement.GroupBundle{group} +}