From aa9311717725184073525065e0cdba59aebc774b Mon Sep 17 00:00:00 2001 From: lucklove Date: Thu, 20 May 2021 12:25:16 +0800 Subject: [PATCH] Don't check labels when user enable placement-rule From https://docs.pingcap.com/tidb/stable/configure-placement-rules#scenario-4-add-two-follower-replicas-for-a-table-in-the-beijing-node-with-high-performance-disks: > After enabling Placement Rules, the previously configured max-replicas and location-labels no longer take effect. To adjust the replica policy, use the interface related to Placement Rules. Fix https://github.com/pingcap/tiup/issues/1371 --- pkg/cluster/api/pdapi.go | 8 ++++---- pkg/cluster/manager/display.go | 9 ++++++--- pkg/cluster/manager/scale_out.go | 8 +++++--- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/cluster/api/pdapi.go b/pkg/cluster/api/pdapi.go index d4ea9a555a..d0e2ca5308 100644 --- a/pkg/cluster/api/pdapi.go +++ b/pkg/cluster/api/pdapi.go @@ -692,18 +692,18 @@ func (pc *PDClient) GetReplicateConfig() ([]byte, error) { } // GetLocationLabels gets the replication.location-labels config from pd server -func (pc *PDClient) GetLocationLabels() ([]string, error) { +func (pc *PDClient) GetLocationLabels() ([]string, bool, error) { config, err := pc.GetReplicateConfig() if err != nil { - return nil, err + return nil, false, err } rc := PDReplicationConfig{} if err := json.Unmarshal(config, &rc); err != nil { - return nil, perrs.Annotatef(err, "unmarshal replication config: %s", string(config)) + return nil, false, perrs.Annotatef(err, "unmarshal replication config: %s", string(config)) } - return rc.LocationLabels, nil + return rc.LocationLabels, rc.EnablePlacementRules, nil } // GetTiKVLabels implements TiKVLabelProvider diff --git a/pkg/cluster/manager/display.go b/pkg/cluster/manager/display.go index 3d6dd35b95..068f36bc28 100644 --- a/pkg/cluster/manager/display.go +++ b/pkg/cluster/manager/display.go @@ -203,10 +203,13 @@ func (m *Manager) Display(name string, opt operator.Options) error { if t, ok := topo.(*spec.Specification); ok { // Check if TiKV's label set correctly pdClient := api.NewPDClient(masterActive, 10*time.Second, tlsCfg) - if lbs, err := pdClient.GetLocationLabels(); err != nil { + + if lbs, placementRule, err := pdClient.GetLocationLabels(); err != nil { log.Debugf("get location labels from pd failed: %v", err) - } else if err := spec.CheckTiKVLabels(lbs, pdClient); err != nil { - color.Yellow("\nWARN: there is something wrong with TiKV labels, which may cause data losing:\n%v", err) + } else if !placementRule { + if err := spec.CheckTiKVLabels(lbs, pdClient); err != nil { + color.Yellow("\nWARN: there is something wrong with TiKV labels, which may cause data losing:\n%v", err) + } } // Check if there is some instance in tombstone state diff --git a/pkg/cluster/manager/scale_out.go b/pkg/cluster/manager/scale_out.go index e117100588..ea8740b702 100644 --- a/pkg/cluster/manager/scale_out.go +++ b/pkg/cluster/manager/scale_out.go @@ -97,12 +97,14 @@ func (m *Manager) ScaleOut( return err } pdClient := api.NewPDClient(pdList, 10*time.Second, tlsCfg) - lbs, err := pdClient.GetLocationLabels() + lbs, placementRule, err := pdClient.GetLocationLabels() if err != nil { return err } - if err := spec.CheckTiKVLabels(lbs, mergedTopo.(*spec.Specification)); err != nil { - return perrs.Errorf("check TiKV label failed, please fix that before continue:\n%s", err) + if !placementRule { + if err := spec.CheckTiKVLabels(lbs, mergedTopo.(*spec.Specification)); err != nil { + return perrs.Errorf("check TiKV label failed, please fix that before continue:\n%s", err) + } } }