diff --git a/pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go b/pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go new file mode 100644 index 0000000000..1d06cb0bab --- /dev/null +++ b/pkg/apis/pingcap/v1alpha1/defaulting/dmcluster_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaulting + +import ( + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" +) + +func TestSetDMSpecDefault(t *testing.T) { + g := NewGomegaWithT(t) + + dc := newDMCluster() + SetDMClusterDefault(dc) + g.Expect(dc.Spec.Master.Config).Should(BeNil()) + + dc = newDMCluster() + rpcTimeoutStr := "40s" + dc.Spec.Master.Config = &v1alpha1.MasterConfig{ + RPCTimeoutStr: &rpcTimeoutStr, + } + SetDMClusterDefault(dc) + g.Expect(*dc.Spec.Master.Config.RPCTimeoutStr).Should(Equal(rpcTimeoutStr)) + + dc = newDMCluster() + dc.Spec.Version = "v2.0.0-rc.2" + keepAliveTTL := int64(15) + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + KeepAliveTTL: &keepAliveTTL, + } + SetDMClusterDefault(dc) + g.Expect(*dc.Spec.Worker.Config.KeepAliveTTL).Should(Equal(keepAliveTTL)) + g.Expect(*dc.Spec.Master.MaxFailoverCount).Should(Equal(int32(3))) + g.Expect(dc.Spec.Master.BaseImage).Should(Equal(defaultMasterImage)) + g.Expect(*dc.Spec.Worker.MaxFailoverCount).Should(Equal(int32(3))) + g.Expect(dc.Spec.Worker.BaseImage).Should(Equal(defaultWorkerImage)) +} + +func newDMCluster() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + } +} diff --git a/pkg/apis/pingcap/v1alpha1/dm_config_test.go b/pkg/apis/pingcap/v1alpha1/dm_config_test.go new file mode 100644 index 0000000000..86a0bb1fed --- /dev/null +++ b/pkg/apis/pingcap/v1alpha1/dm_config_test.go @@ -0,0 +1,98 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/BurntSushi/toml" + . "github.com/onsi/gomega" + "k8s.io/utils/pointer" +) + +func TestDMMasterConfig(t *testing.T) { + g := NewGomegaWithT(t) + c := &MasterConfig{ + RPCTimeoutStr: pointer.StringPtr("40s"), + RPCRateLimit: pointer.Float64Ptr(15), + DMSecurityConfig: DMSecurityConfig{ + SSLCA: pointer.StringPtr("/var/lib/dm-master-tls/ca.crt"), + SSLCert: pointer.StringPtr("/var/lib/dm-master-tls/tls.crt"), + SSLKey: pointer.StringPtr("/var/lib/dm-master-tls/tls.key"), + }, + } + jsonStr, err := json.Marshal(c) + g.Expect(err).To(Succeed()) + g.Expect(jsonStr).To(ContainSubstring("rpc-rate-limit")) + g.Expect(jsonStr).To(ContainSubstring("40s")) + g.Expect(jsonStr).NotTo(ContainSubstring("rpc-rate-burst"), "Expected empty fields to be omitted") + var jsonUnmarshaled MasterConfig + err = json.Unmarshal(jsonStr, &jsonUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&jsonUnmarshaled).To(Equal(c)) + + buff := new(bytes.Buffer) + encoder := toml.NewEncoder(buff) + err = encoder.Encode(c) + g.Expect(err).To(Succeed()) + tStr := buff.String() + g.Expect(tStr).To((Equal(`rpc-timeout = "40s" +rpc-rate-limit = 15.0 +ssl-ca = "/var/lib/dm-master-tls/ca.crt" +ssl-cert = "/var/lib/dm-master-tls/tls.crt" +ssl-key = "/var/lib/dm-master-tls/tls.key" +`))) + + var tUnmarshaled MasterConfig + err = toml.Unmarshal([]byte(tStr), &tUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&tUnmarshaled).To(Equal(c)) +} + +func TestDMWorkerConfig(t *testing.T) { + g := NewGomegaWithT(t) + c := &WorkerConfig{ + KeepAliveTTL: pointer.Int64Ptr(15), + DMSecurityConfig: DMSecurityConfig{ + SSLCA: pointer.StringPtr("/var/lib/dm-worker-tls/ca.crt"), + SSLCert: pointer.StringPtr("/var/lib/dm-worker-tls/tls.crt"), + SSLKey: pointer.StringPtr("/var/lib/dm-worker-tls/tls.key"), + }, + } + jsonStr, err := json.Marshal(c) + g.Expect(err).To(Succeed()) + g.Expect(jsonStr).NotTo(ContainSubstring("log-file"), "Expected empty fields to be omitted") + var jsonUnmarshaled WorkerConfig + err = json.Unmarshal(jsonStr, &jsonUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&jsonUnmarshaled).To(Equal(c)) + + buff := new(bytes.Buffer) + encoder := toml.NewEncoder(buff) + err = encoder.Encode(c) + g.Expect(err).To(Succeed()) + tStr := buff.String() + g.Expect(tStr).To((Equal(`keepalive-ttl = 15 +ssl-ca = "/var/lib/dm-worker-tls/ca.crt" +ssl-cert = "/var/lib/dm-worker-tls/tls.crt" +ssl-key = "/var/lib/dm-worker-tls/tls.key" +`))) + + var tUnmarshaled WorkerConfig + err = toml.Unmarshal([]byte(tStr), &tUnmarshaled) + g.Expect(err).To(Succeed()) + g.Expect(&tUnmarshaled).To(Equal(c)) +} diff --git a/pkg/apis/pingcap/v1alpha1/dmcluster_test.go b/pkg/apis/pingcap/v1alpha1/dmcluster_test.go new file mode 100644 index 0000000000..570f8dc0df --- /dev/null +++ b/pkg/apis/pingcap/v1alpha1/dmcluster_test.go @@ -0,0 +1,314 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "testing" + + . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" +) + +func TestDMMasterIsAvailable(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*DMCluster) + expectFn func(*GomegaWithT, bool) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMCluster() + test.update(dc) + test.expectFn(g, dc.MasterIsAvailable()) + } + tests := []testcase{ + { + name: "dm-master members count is 1", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + } + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeFalse()) + }, + }, + { + name: "dm-master members count is 2, but health count is 1", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: false}, + } + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeFalse()) + }, + }, + { + name: "dm-master members count is 3, health count is 3, but ready replicas is 1", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + dc.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 1} + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeFalse()) + }, + }, + { + name: "dm-master is available", + update: func(dc *DMCluster) { + dc.Status.Master.Members = map[string]MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + dc.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + }, + expectFn: func(g *GomegaWithT, b bool) { + g.Expect(b).To(BeTrue()) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMComponentAccessor(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + cluster *DMClusterSpec + component *ComponentSpec + expectFn func(*GomegaWithT, ComponentAccessor) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + accessor := buildDMClusterComponentAccessor(test.cluster, test.component) + test.expectFn(g, accessor) + } + affinity := &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{ + TopologyKey: "rack", + }}, + }, + } + toleration1 := corev1.Toleration{ + Key: "k1", + } + toleration2 := corev1.Toleration{ + Key: "k2", + } + tests := []testcase{ + { + name: "use cluster-level defaults", + cluster: &DMClusterSpec{ + ImagePullPolicy: corev1.PullNever, + HostNetwork: pointer.BoolPtr(true), + Affinity: affinity, + PriorityClassName: pointer.StringPtr("test"), + SchedulerName: "test", + }, + component: &ComponentSpec{}, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.ImagePullPolicy()).Should(Equal(corev1.PullNever)) + g.Expect(a.HostNetwork()).Should(Equal(true)) + g.Expect(a.Affinity()).Should(Equal(affinity)) + g.Expect(*a.PriorityClassName()).Should(Equal("test")) + g.Expect(a.SchedulerName()).Should(Equal("test")) + }, + }, + { + name: "override at component-level", + cluster: &DMClusterSpec{ + ImagePullPolicy: corev1.PullNever, + HostNetwork: pointer.BoolPtr(true), + Affinity: nil, + PriorityClassName: pointer.StringPtr("test"), + SchedulerName: "test", + }, + component: &ComponentSpec{ + ImagePullPolicy: func() *corev1.PullPolicy { a := corev1.PullAlways; return &a }(), + HostNetwork: func() *bool { a := false; return &a }(), + Affinity: affinity, + PriorityClassName: pointer.StringPtr("override"), + SchedulerName: pointer.StringPtr("override"), + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.ImagePullPolicy()).Should(Equal(corev1.PullAlways)) + g.Expect(a.HostNetwork()).Should(Equal(false)) + g.Expect(a.Affinity()).Should(Equal(affinity)) + g.Expect(*a.PriorityClassName()).Should(Equal("override")) + g.Expect(a.SchedulerName()).Should(Equal("override")) + }, + }, + { + name: "node selector merge", + cluster: &DMClusterSpec{ + NodeSelector: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + NodeSelector: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.NodeSelector()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "annotations merge", + cluster: &DMClusterSpec{ + Annotations: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + Annotations: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Annotations()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "annotations merge", + cluster: &DMClusterSpec{ + Annotations: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + Annotations: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Annotations()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "tolerations merge", + cluster: &DMClusterSpec{ + Tolerations: []corev1.Toleration{toleration1}, + }, + component: &ComponentSpec{ + Tolerations: []corev1.Toleration{toleration2}, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Tolerations()).Should(ConsistOf(toleration2)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestMasterVersion(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*DMCluster) + expectFn func(*GomegaWithT, *DMCluster) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMCluster() + test.update(dc) + test.expectFn(g, dc) + } + tests := []testcase{ + { + name: "has tag", + update: func(dc *DMCluster) { + dc.Spec.Master.BaseImage = "pingcap/dm:v2.0.0-rc.2" + }, + expectFn: func(g *GomegaWithT, dc *DMCluster) { + g.Expect(dc.MasterVersion()).To(Equal("v2.0.0-rc.2")) + }, + }, + { + name: "don't have tag", + update: func(dc *DMCluster) { + dc.Spec.Master.BaseImage = "pingcap/pd" + }, + expectFn: func(g *GomegaWithT, dc *DMCluster) { + g.Expect(dc.MasterVersion()).To(Equal("latest")) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func newDMCluster() *DMCluster { + return &DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dm-master", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: DMClusterSpec{ + Master: MasterSpec{ + Replicas: 3, + StorageSize: "10G", + }, + Worker: &WorkerSpec{ + Replicas: 3, + StorageSize: "10G", + }, + }, + } +} diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation.go b/pkg/apis/pingcap/v1alpha1/validation/validation.go index 37086176eb..f506996167 100644 --- a/pkg/apis/pingcap/v1alpha1/validation/validation.go +++ b/pkg/apis/pingcap/v1alpha1/validation/validation.go @@ -22,6 +22,7 @@ import ( "reflect" "strings" + "github.com/Masterminds/semver" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/label" corev1 "k8s.io/api/core/v1" @@ -222,6 +223,15 @@ func validatePumpSpec(spec *v1alpha1.PumpSpec, fldPath *field.Path) field.ErrorL func validateDMClusterSpec(spec *v1alpha1.DMClusterSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} + if spec.Version != "" { + clusterVersionLT2, _ := clusterVersionLessThan2(spec.Version) + if clusterVersionLT2 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, "dm cluster version can't set to v1.x.y")) + } + } + if spec.Discovery.Address == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("discovery.address"), "discovery.address must not be empty")) + } allErrs = append(allErrs, validateMasterSpec(&spec.Master, fldPath.Child("master"))...) if spec.Worker != nil { allErrs = append(allErrs, validateWorkerSpec(spec.Worker, fldPath.Child("worker"))...) @@ -232,6 +242,10 @@ func validateDMClusterSpec(spec *v1alpha1.DMClusterSpec, fldPath *field.Path) fi func validateMasterSpec(spec *v1alpha1.MasterSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...) + // make sure that storageSize for dm-master is assigned + if spec.Replicas > 0 && spec.StorageSize == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("storageSize"), "storageSize must not be empty")) + } return allErrs } @@ -510,3 +524,13 @@ func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.Error } return allErrs } + +// clusterVersionLessThan2 makes sure that deployed dm cluster version not to be v1.0.x +func clusterVersionLessThan2(version string) (bool, error) { + v, err := semver.NewVersion(version) + if err != nil { + return false, err + } + + return v.Major() < 2, nil +} diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go index fbb441711f..57f6780d24 100644 --- a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go +++ b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go @@ -189,6 +189,153 @@ func TestValidateAnnotations(t *testing.T) { } } +func TestValidateDMAnnotations(t *testing.T) { + successCases := []struct { + name string + dc v1alpha1.DMCluster + }{ + { + name: "all-fields-valid", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + label.AnnDMMasterDeleteSlots: "[1,2]", + label.AnnDMWorkerDeleteSlots: "[1]", + }, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + }, + { + name: "no delete slots", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + }, + // TODO: more cases + } + + for _, v := range successCases { + if errs := validateAnnotations(v.dc.ObjectMeta.Annotations, field.NewPath("metadata", "annotations")); len(errs) != 0 { + t.Errorf("[%s]: unexpected error: %v", v.name, errs) + } + } + + errorCases := []struct { + name string + dc v1alpha1.DMCluster + errs []field.Error + }{ + { + name: "delete slots empty string", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + label.AnnDMMasterDeleteSlots: "", + label.AnnDMWorkerDeleteSlots: "", + }, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + errs: []field.Error{ + { + Type: field.ErrorTypeInvalid, + Detail: `value of "dm-master.tidb.pingcap.com/delete-slots" annotation must be a JSON list of int32`, + }, + { + Type: field.ErrorTypeInvalid, + Detail: `value of "dm-worker.tidb.pingcap.com/delete-slots" annotation must be a JSON list of int32`, + }, + }, + }, + { + name: "delete slots invalid format", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + label.AnnDMWorkerDeleteSlots: "1,2,3", + }, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.1", + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + }, + }, + }, + errs: []field.Error{ + { + Type: field.ErrorTypeInvalid, + Detail: `value of "dm-worker.tidb.pingcap.com/delete-slots" annotation must be a JSON list of int32`, + }, + }, + }, + } + + for _, v := range errorCases { + errs := validateDMAnnotations(v.dc.ObjectMeta.Annotations, field.NewPath("metadata", "annotations")) + if len(errs) != len(v.errs) { + t.Errorf("[%s]: expected %d failures, got %d failures: %v", v.name, len(v.errs), len(errs), errs) + continue + } + for i := range errs { + if errs[i].Type != v.errs[i].Type { + t.Errorf("[%s]: expected error type %q, got %q", v.name, v.errs[i].Type, errs[i].Type) + } + if !strings.Contains(errs[i].Detail, v.errs[i].Detail) { + t.Errorf("[%s]: expected error errs[i].Detail %q, got %q", v.name, v.errs[i].Detail, errs[i].Detail) + } + if len(v.errs[i].Field) > 0 { + if errs[i].Field != v.errs[i].Field { + t.Errorf("[%s]: expected error field %q, got %q", v.name, v.errs[i].Field, errs[i].Field) + } + } + } + } +} + func TestValidateRequestsStorage(t *testing.T) { g := NewGomegaWithT(t) tests := []struct { @@ -300,6 +447,58 @@ func TestValidateTidbMonitor(t *testing.T) { } } +func TestValidateDMCluster(t *testing.T) { + g := NewGomegaWithT(t) + tests := []struct { + name string + version string + discoveryAddr string + masterReplicas int32 + masterStorageSize string + expectedError string + }{ + { + name: "invalid version", + version: "v1.0.6", + discoveryAddr: "http://basic-discovery.demo:10261", + expectedError: "dm cluster version can't set to v1.x.y", + }, + { + name: "empty discovery address", + expectedError: "discovery.address must not be empty", + }, + { + name: "dm-master storageSize not given", + version: "v2.0.0-rc.2", + discoveryAddr: "http://basic-discovery.demo:10261", + masterReplicas: 3, + expectedError: "storageSize must not be empty", + }, + { + name: "correct configuration", + version: "nightly", + discoveryAddr: "http://basic-discovery.demo:10261", + masterReplicas: 3, + masterStorageSize: "10Gi", + expectedError: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dc := newDMCluster() + dc.Spec.Version = tt.version + dc.Spec.Discovery.Address = tt.discoveryAddr + dc.Spec.Master.Replicas = tt.masterReplicas + dc.Spec.Master.StorageSize = tt.masterStorageSize + err := ValidateDMCluster(dc) + if tt.expectedError != "" { + g.Expect(len(err)).Should(Equal(1)) + g.Expect(err[0].Detail).To(ContainSubstring(tt.expectedError)) + } + }) + } +} + func newTidbCluster() *v1alpha1.TidbCluster { tc := &v1alpha1.TidbCluster{ Spec: v1alpha1.TidbClusterSpec{ @@ -329,6 +528,19 @@ func newTidbMonitor() *v1alpha1.TidbMonitor { return monitor } +func newDMCluster() *v1alpha1.DMCluster { + dc := &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Discovery: v1alpha1.DMDiscoverySpec{}, + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + } + dc.Name = "test-validate-dm-cluster" + dc.Namespace = "default" + return dc +} + func TestValidateLocalDescendingPath(t *testing.T) { successCases := []string{ "data", diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index e4b709120d..5211c4e636 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -54,6 +54,20 @@ func TestGetOwnerRef(t *testing.T) { g.Expect(*ref.BlockOwnerDeletion).To(BeTrue()) } +func TestGetDMOwnerRef(t *testing.T) { + g := NewGomegaWithT(t) + + dc := newDMCluster() + dc.UID = types.UID("demo-uid") + ref := GetDMOwnerRef(dc) + g.Expect(ref.APIVersion).To(Equal(DMControllerKind.GroupVersion().String())) + g.Expect(ref.Kind).To(Equal(DMControllerKind.Kind)) + g.Expect(ref.Name).To(Equal(dc.GetName())) + g.Expect(ref.UID).To(Equal(types.UID("demo-uid"))) + g.Expect(*ref.Controller).To(BeTrue()) + g.Expect(*ref.BlockOwnerDeletion).To(BeTrue()) +} + func TestGetServiceType(t *testing.T) { g := NewGomegaWithT(t) @@ -184,6 +198,26 @@ func TestDiscoveryMemberName(t *testing.T) { g.Expect(DiscoveryMemberName("demo")).To(Equal("demo-discovery")) } +func TestDMMasterMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMMasterMemberName("demo")).To(Equal("demo-dm-master")) +} + +func TestDMMasterPeerMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMMasterPeerMemberName("demo")).To(Equal("demo-dm-master-peer")) +} + +func TestDMWorkerMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMWorkerMemberName("demo")).To(Equal("demo-dm-worker")) +} + +func TestDMWorkerPeerMemberName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(DMWorkerPeerMemberName("demo")).To(Equal("demo-dm-worker-peer")) +} + func TestAnnProm(t *testing.T) { g := NewGomegaWithT(t) @@ -357,6 +391,24 @@ func newTidbCluster() *v1alpha1.TidbCluster { return tc } +func newDMCluster() *v1alpha1.DMCluster { + retainPVP := corev1.PersistentVolumeReclaimRetain + dc := &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "demo", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + PVReclaimPolicy: &retainPVP, + }, + } + return dc +} + func newService(tc *v1alpha1.TidbCluster, _ string) *corev1.Service { svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/dmcluster/dm_cluster_condition_updater_test.go b/pkg/controller/dmcluster/dm_cluster_condition_updater_test.go new file mode 100644 index 0000000000..7a692f6e88 --- /dev/null +++ b/pkg/controller/dmcluster/dm_cluster_condition_updater_test.go @@ -0,0 +1,151 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmcluster + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + utildmcluster "github.com/pingcap/tidb-operator/pkg/util/dmcluster" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" +) + +func TestDMClusterConditionUpdater_Ready(t *testing.T) { + tests := []struct { + name string + dc *v1alpha1.DMCluster + wantStatus v1.ConditionStatus + wantReason string + wantMessage string + }{ + { + name: "statfulset(s) not up to date", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + }, + }, + Worker: v1alpha1.WorkerStatus{ + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "1", + UpdateRevision: "2", + }, + }, + }, + }, + wantStatus: v1.ConditionFalse, + wantReason: utildmcluster.StatfulSetNotUpToDate, + wantMessage: "Statefulset(s) are in progress", + }, + { + name: "dm-master(s) not healthy", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 1, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "dm-master-1": { + Health: false, + }, + }, + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + Worker: v1alpha1.WorkerStatus{ + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + }, + }, + wantStatus: v1.ConditionFalse, + wantReason: utildmcluster.MasterUnhealthy, + wantMessage: "dm-master(s) are not healthy", + }, + { + name: "all ready", + dc: &v1alpha1.DMCluster{ + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 1, + }, + Worker: &v1alpha1.WorkerSpec{ + Replicas: 1, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "dm-master-0": { + Health: true, + }, + }, + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + Worker: v1alpha1.WorkerStatus{ + Members: map[string]v1alpha1.WorkerMember{ + "dm-worker-0": { + Stage: "free", + }, + }, + StatefulSet: &appsv1.StatefulSetStatus{ + CurrentRevision: "2", + UpdateRevision: "2", + }, + }, + }, + }, + wantStatus: v1.ConditionTrue, + wantReason: utildmcluster.Ready, + wantMessage: "DM cluster is fully up and running", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + conditionUpdater := &dmClusterConditionUpdater{} + conditionUpdater.Update(tt.dc) + cond := utildmcluster.GetDMClusterCondition(tt.dc.Status, v1alpha1.DMClusterReady) + if diff := cmp.Diff(tt.wantStatus, cond.Status); diff != "" { + t.Errorf("unexpected status (-want, +got): %s", diff) + } + if diff := cmp.Diff(tt.wantReason, cond.Reason); diff != "" { + t.Errorf("unexpected reason (-want, +got): %s", diff) + } + if diff := cmp.Diff(tt.wantMessage, cond.Message); diff != "" { + t.Errorf("unexpected message (-want, +got): %s", diff) + } + }) + } +} diff --git a/pkg/controller/dmcluster/dm_cluster_control.go b/pkg/controller/dmcluster/dm_cluster_control.go index ac155ac4a8..1e0cb52b6b 100644 --- a/pkg/controller/dmcluster/dm_cluster_control.go +++ b/pkg/controller/dmcluster/dm_cluster_control.go @@ -191,3 +191,26 @@ func (dcc *defaultDMClusterControl) updateDMCluster(dc *v1alpha1.DMCluster) erro } return errorutils.NewAggregate(errs) } + +var _ ControlInterface = &defaultDMClusterControl{} + +type FakeDMClusterControlInterface struct { + err error +} + +func NewFakeDMClusterControlInterface() *FakeDMClusterControlInterface { + return &FakeDMClusterControlInterface{} +} + +func (ftcc *FakeDMClusterControlInterface) SetUpdateDCError(err error) { + ftcc.err = err +} + +func (ftcc *FakeDMClusterControlInterface) UpdateDMCluster(_ *v1alpha1.DMCluster) error { + if ftcc.err != nil { + return ftcc.err + } + return nil +} + +var _ ControlInterface = &FakeDMClusterControlInterface{} diff --git a/pkg/controller/dmcluster/dm_cluster_control_test.go b/pkg/controller/dmcluster/dm_cluster_control_test.go new file mode 100644 index 0000000000..18fb931770 --- /dev/null +++ b/pkg/controller/dmcluster/dm_cluster_control_test.go @@ -0,0 +1,302 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmcluster + +import ( + "fmt" + "strings" + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" + "github.com/pingcap/tidb-operator/pkg/controller" + mm "github.com/pingcap/tidb-operator/pkg/manager/member" + "github.com/pingcap/tidb-operator/pkg/manager/meta" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" +) + +func TestTidbClusterControlUpdateTidbCluster(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(cluster *v1alpha1.DMCluster) + syncReclaimPolicyErr bool + orphanPodCleanerErr bool + syncMasterMemberManagerErr bool + syncWorkerMemberManagerErr bool + pvcCleanerErr bool + updateDCStatusErr bool + errExpectFn func(*GomegaWithT, error) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMClusterForDMClusterControl() + if test.update != nil { + test.update(dc) + } + control, reclaimPolicyManager, orphanPodCleaner, masterMemberManager, workerMemberManager, pvcCleaner, dcControl := newFakeDMClusterControl() + + if test.syncReclaimPolicyErr { + reclaimPolicyManager.SetSyncError(fmt.Errorf("reclaim policy sync error")) + } + if test.orphanPodCleanerErr { + orphanPodCleaner.SetnOrphanPodCleanerError(fmt.Errorf("clean orphan pod error")) + } + if test.syncMasterMemberManagerErr { + masterMemberManager.SetSyncError(fmt.Errorf("dm-master member manager sync error")) + } + if test.syncWorkerMemberManagerErr { + workerMemberManager.SetSyncError(fmt.Errorf("dm-worker member manager sync error")) + } + if test.pvcCleanerErr { + pvcCleaner.SetPVCCleanerError(fmt.Errorf("clean PVC error")) + } + + if test.updateDCStatusErr { + dcControl.SetUpdateDMClusterError(fmt.Errorf("update dmcluster status error"), 0) + } + + err := control.UpdateDMCluster(dc) + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + } + tests := []testcase{ + { + name: "reclaim policy sync error", + update: nil, + syncReclaimPolicyErr: true, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "reclaim policy sync error")).To(Equal(true)) + }, + }, + { + name: "clean orphan pod error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: true, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "clean orphan pod error")).To(Equal(true)) + }, + }, + { + name: "dm-master member manager sync error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: true, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "dm-master member manager sync error")).To(Equal(true)) + }, + }, + { + name: "dm-worker member manager sync error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: true, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "dm-worker member manager sync error")).To(Equal(true)) + }, + }, + { + name: "clean PVC error", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: true, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "clean PVC error")).To(Equal(true)) + }, + }, + { + name: "dmcluster status is not updated", + update: nil, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "dmcluster status update failed", + update: func(cluster *v1alpha1.DMCluster) { + cluster.Status.Master.Members = map[string]v1alpha1.MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + cluster.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + cluster.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "dm-worker-0": {Name: "dm-worker-0", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-1": {Name: "dm-worker-1", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-2": {Name: "dm-worker-2", Stage: v1alpha1.DMWorkerStateFree}, + } + cluster.Status.Worker.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + }, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + pvcCleanerErr: false, + updateDCStatusErr: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "update dmcluster status error")).To(Equal(true)) + }, + }, + { + name: "normal", + update: func(cluster *v1alpha1.DMCluster) { + cluster.Status.Master.Members = map[string]v1alpha1.MasterMember{ + "dm-master-0": {Name: "dm-master-0", Health: true}, + "dm-master-1": {Name: "dm-master-1", Health: true}, + "dm-master-2": {Name: "dm-master-2", Health: true}, + } + cluster.Status.Master.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + cluster.Status.Worker.Members = map[string]v1alpha1.WorkerMember{ + "dm-worker-0": {Name: "dm-worker-0", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-1": {Name: "dm-worker-1", Stage: v1alpha1.DMWorkerStateFree}, + "dm-worker-2": {Name: "dm-worker-2", Stage: v1alpha1.DMWorkerStateFree}, + } + cluster.Status.Worker.StatefulSet = &apps.StatefulSetStatus{ReadyReplicas: 3} + }, + syncReclaimPolicyErr: false, + orphanPodCleanerErr: false, + syncMasterMemberManagerErr: false, + syncWorkerMemberManagerErr: false, + updateDCStatusErr: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMClusterStatusEquality(t *testing.T) { + g := NewGomegaWithT(t) + dcStatus := v1alpha1.DMClusterStatus{} + + tcStatusCopy := dcStatus.DeepCopy() + tcStatusCopy.Master = v1alpha1.MasterStatus{} + g.Expect(apiequality.Semantic.DeepEqual(&dcStatus, tcStatusCopy)).To(Equal(true)) + + tcStatusCopy = dcStatus.DeepCopy() + tcStatusCopy.Master.Phase = v1alpha1.NormalPhase + g.Expect(apiequality.Semantic.DeepEqual(&dcStatus, tcStatusCopy)).To(Equal(false)) +} + +func newFakeDMClusterControl() ( + ControlInterface, + *meta.FakeReclaimPolicyManager, + *mm.FakeOrphanPodsCleaner, + *mm.FakeMasterMemberManager, + *mm.FakeWorkerMemberManager, + *mm.FakePVCCleaner, + *controller.FakeDMClusterControl) { + cli := fake.NewSimpleClientset() + dcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().DMClusters() + recorder := record.NewFakeRecorder(10) + + dcControl := controller.NewFakeDMClusterControl(dcInformer) + masterMemberManager := mm.NewFakeMasterMemberManager() + workerMemberManager := mm.NewFakeWorkerMemberManager() + reclaimPolicyManager := meta.NewFakeReclaimPolicyManager() + orphanPodCleaner := mm.NewFakeOrphanPodsCleaner() + pvcCleaner := mm.NewFakePVCCleaner() + pvcResizer := mm.NewFakePVCResizer() + control := NewDefaultDMClusterControl( + dcControl, + masterMemberManager, + workerMemberManager, + reclaimPolicyManager, + orphanPodCleaner, + pvcCleaner, + pvcResizer, + &dmClusterConditionUpdater{}, + recorder, + ) + + return control, reclaimPolicyManager, orphanPodCleaner, masterMemberManager, workerMemberManager, pvcCleaner, dcControl +} + +func newDMClusterForDMClusterControl() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dm-master", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + StorageSize: "10Gi", + }, + Worker: &v1alpha1.WorkerSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + StorageSize: "10Gi", + }, + }, + } +} diff --git a/pkg/controller/dmcluster/dm_cluster_controller.go b/pkg/controller/dmcluster/dm_cluster_controller.go index 907b06ce70..4383d83320 100644 --- a/pkg/controller/dmcluster/dm_cluster_controller.go +++ b/pkg/controller/dmcluster/dm_cluster_controller.go @@ -17,7 +17,6 @@ import ( "fmt" "time" - "github.com/Masterminds/semver" perrors "github.com/pingcap/errors" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -195,7 +194,7 @@ func NewController( setInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: dcc.addStatefulSet, UpdateFunc: func(old, cur interface{}) { - dcc.updateStatefuSet(old, cur) + dcc.updateStatefulSet(old, cur) }, DeleteFunc: dcc.deleteStatefulSet, }) @@ -266,13 +265,6 @@ func (dcc *Controller) sync(key string) error { if err != nil { return err } - clusterVersionLT2, err := clusterVersionLessThan2(dc.MasterVersion()) - if err != nil { - klog.V(4).Infof("cluster version: %s is not semantic versioning compatible", dc.MasterVersion()) - } else if clusterVersionLT2 { - klog.Errorf("dm version %s not supported, only support to deploy dm from v2.0", dc.MasterVersion()) - return nil - } return dcc.syncDMCluster(dc.DeepCopy()) } @@ -313,8 +305,8 @@ func (dcc *Controller) addStatefulSet(obj interface{}) { dcc.enqueueDMCluster(dc) } -// updateStatefuSet adds the dmcluster for the current and old statefulsets to the sync queue. -func (dcc *Controller) updateStatefuSet(old, cur interface{}) { +// updateStatefulSet adds the dmcluster for the current and old statefulsets to the sync queue. +func (dcc *Controller) updateStatefulSet(old, cur interface{}) { curSet := cur.(*apps.StatefulSet) oldSet := old.(*apps.StatefulSet) ns := curSet.GetNamespace() @@ -390,12 +382,3 @@ func (dcc *Controller) resolveDMClusterFromSet(namespace string, set *apps.State } return dc } - -func clusterVersionLessThan2(version string) (bool, error) { - v, err := semver.NewVersion(version) - if err != nil { - return true, err - } - - return v.Major() < 2, nil -} diff --git a/pkg/controller/dmcluster/dm_cluster_controller_test.go b/pkg/controller/dmcluster/dm_cluster_controller_test.go new file mode 100644 index 0000000000..966e79e4b3 --- /dev/null +++ b/pkg/controller/dmcluster/dm_cluster_controller_test.go @@ -0,0 +1,344 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmcluster + +import ( + "fmt" + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/scheme" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + controllerfake "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestDMClusterControllerEnqueueDMCluster(t *testing.T) { + g := NewGomegaWithT(t) + dc := newDMCluster() + dcc, _, _ := newFakeDMClusterController() + + dcc.enqueueDMCluster(dc) + g.Expect(dcc.queue.Len()).To(Equal(1)) +} + +func TestDMClusterControllerEnqueueDMClusterFailed(t *testing.T) { + g := NewGomegaWithT(t) + dcc, _, _ := newFakeDMClusterController() + + dcc.enqueueDMCluster(struct{}{}) + g.Expect(dcc.queue.Len()).To(Equal(0)) +} + +func TestDMClusterControllerAddStatefulSet(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modifySet func(*v1alpha1.DMCluster) *apps.StatefulSet + addDMClusterToIndexer bool + expectedLen int + } + + testFn := func(test *testcase, t *testing.T) { + t.Log("test: ", test.name) + + dc := newDMCluster() + set := test.modifySet(dc) + + dcc, dcIndexer, _ := newFakeDMClusterController() + + if test.addDMClusterToIndexer { + err := dcIndexer.Add(dc) + g.Expect(err).NotTo(HaveOccurred()) + } + dcc.addStatefulSet(set) + g.Expect(dcc.queue.Len()).To(Equal(test.expectedLen)) + } + + tests := []testcase{ + { + name: "normal", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + return newStatefulSet(dc) + }, + addDMClusterToIndexer: true, + expectedLen: 1, + }, + { + name: "have deletionTimestamp", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + set := newStatefulSet(dc) + set.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(30 * time.Second)} + return set + }, + addDMClusterToIndexer: true, + expectedLen: 1, + }, + { + name: "without controllerRef", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + set := newStatefulSet(dc) + set.OwnerReferences = nil + return set + }, + addDMClusterToIndexer: true, + expectedLen: 0, + }, + { + name: "without dmcluster", + modifySet: func(dc *v1alpha1.DMCluster) *apps.StatefulSet { + return newStatefulSet(dc) + }, + addDMClusterToIndexer: false, + expectedLen: 0, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMClusterControllerUpdateStatefulSet(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + updateSet func(*apps.StatefulSet) *apps.StatefulSet + addDMClusterToIndexer bool + expectedLen int + } + + testFn := func(test *testcase, t *testing.T) { + t.Log("test: ", test.name) + + dc := newDMCluster() + set1 := newStatefulSet(dc) + set2 := test.updateSet(set1) + + dcc, dcIndexer, _ := newFakeDMClusterController() + + if test.addDMClusterToIndexer { + err := dcIndexer.Add(dc) + g.Expect(err).NotTo(HaveOccurred()) + } + dcc.updateStatefulSet(set1, set2) + g.Expect(dcc.queue.Len()).To(Equal(test.expectedLen)) + } + + tests := []testcase{ + { + name: "normal", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + set2.ResourceVersion = "1000" + return &set2 + }, + addDMClusterToIndexer: true, + expectedLen: 1, + }, + { + name: "same resouceVersion", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + return &set2 + }, + addDMClusterToIndexer: true, + expectedLen: 0, + }, + { + name: "without controllerRef", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + set2.ResourceVersion = "1000" + set2.OwnerReferences = nil + return &set2 + }, + addDMClusterToIndexer: true, + expectedLen: 0, + }, + { + name: "without dmcluster", + updateSet: func(set1 *apps.StatefulSet) *apps.StatefulSet { + set2 := *set1 + set2.ResourceVersion = "1000" + return &set2 + }, + addDMClusterToIndexer: false, + expectedLen: 0, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestDMClusterControllerSync(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + addDcToIndexer bool + errWhenUpdateDMCluster bool + errExpectFn func(*GomegaWithT, error) + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMCluster() + dcc, dcIndexer, dcControl := newFakeDMClusterController() + + if test.addDcToIndexer { + err := dcIndexer.Add(dc) + g.Expect(err).NotTo(HaveOccurred()) + } + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(dc) + g.Expect(err).NotTo(HaveOccurred()) + + if test.errWhenUpdateDMCluster { + dcControl.SetUpdateDCError(fmt.Errorf("update dm cluster failed")) + } + + err = dcc.sync(key) + + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + } + + tests := []testcase{ + { + name: "normal", + addDcToIndexer: true, + errWhenUpdateDMCluster: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "can't found dm cluster", + addDcToIndexer: false, + errWhenUpdateDMCluster: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "update dm cluster failed", + addDcToIndexer: true, + errWhenUpdateDMCluster: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "update dm cluster failed")).To(Equal(true)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } + +} + +func alwaysReady() bool { return true } + +func newFakeDMClusterController() (*Controller, cache.Indexer, *FakeDMClusterControlInterface) { + cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + genericCli := controllerfake.NewFakeClientWithScheme(scheme.Scheme) + informerFactory := informers.NewSharedInformerFactory(cli, 0) + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeCli, 0) + + dcInformer := informerFactory.Pingcap().V1alpha1().DMClusters() + autoFailover := true + dcControl := NewFakeDMClusterControlInterface() + + dcc := NewController( + kubeCli, + cli, + genericCli, + informerFactory, + kubeInformerFactory, + autoFailover, + 5*time.Minute, + 5*time.Minute, + ) + dcc.dcListerSynced = alwaysReady + dcc.setListerSynced = alwaysReady + + dcc.control = dcControl + return dcc, dcInformer.Informer().GetIndexer(), dcControl +} + +func newDMCluster() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dm-master", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.MasterConfig{}, + StorageSize: "10Gi", + }, + Worker: &v1alpha1.WorkerSpec{ + Replicas: 3, + BaseImage: "pingcap/dm", + Config: &v1alpha1.WorkerConfig{}, + StorageSize: "10Gi", + }, + }, + } +} + +func newStatefulSet(dc *v1alpha1.DMCluster) *apps.StatefulSet { + return &apps.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefuset", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(dc, controller.DMControllerKind), + }, + ResourceVersion: "1", + }, + Spec: apps.StatefulSetSpec{ + Replicas: &dc.Spec.Master.Replicas, + }, + } +} diff --git a/pkg/controller/dmcluster_control.go b/pkg/controller/dmcluster_control.go index 1901d51d87..01d0b59dd7 100644 --- a/pkg/controller/dmcluster_control.go +++ b/pkg/controller/dmcluster_control.go @@ -18,8 +18,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + tcinformers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/klog" @@ -79,3 +81,35 @@ func (rdc *realDMClusterControl) UpdateDMCluster(dc *v1alpha1.DMCluster, newStat } return updateDC, err } + +// FakeDMClusterControl is a fake DMClusterControlInterface +type FakeDMClusterControl struct { + DcLister listers.DMClusterLister + DcIndexer cache.Indexer + updateDMClusterTracker RequestTracker +} + +// NewFakeDMClusterControl returns a FakeDMClusterControl +func NewFakeDMClusterControl(dcInformer tcinformers.DMClusterInformer) *FakeDMClusterControl { + return &FakeDMClusterControl{ + dcInformer.Lister(), + dcInformer.Informer().GetIndexer(), + RequestTracker{}, + } +} + +// SetUpdateDMClusterError sets the error attributes of updateDMClusterTracker +func (ssc *FakeDMClusterControl) SetUpdateDMClusterError(err error, after int) { + ssc.updateDMClusterTracker.SetError(err).SetAfter(after) +} + +// UpdateDMCluster updates the DMCluster +func (ssc *FakeDMClusterControl) UpdateDMCluster(dc *v1alpha1.DMCluster, _ *v1alpha1.DMClusterStatus, _ *v1alpha1.DMClusterStatus) (*v1alpha1.DMCluster, error) { + defer ssc.updateDMClusterTracker.Inc() + if ssc.updateDMClusterTracker.ErrorReady() { + defer ssc.updateDMClusterTracker.Reset() + return dc, ssc.updateDMClusterTracker.GetError() + } + + return dc, ssc.DcIndexer.Update(dc) +} diff --git a/pkg/controller/dmcluster_control_test.go b/pkg/controller/dmcluster_control_test.go new file mode 100644 index 0000000000..6dc08f588a --- /dev/null +++ b/pkg/controller/dmcluster_control_test.go @@ -0,0 +1,66 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" + listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" +) + +func TestDMClusterControlUpdateDMCluster(t *testing.T) { + g := NewGomegaWithT(t) + recorder := record.NewFakeRecorder(10) + dc := newDMCluster() + dc.Spec.Master.Replicas = int32(5) + fakeClient := &fake.Clientset{} + control := NewRealDMClusterControl(fakeClient, nil, recorder) + fakeClient.AddReactor("update", "dmclusters", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + return true, update.GetObject(), nil + }) + updateDC, err := control.UpdateDMCluster(dc, &v1alpha1.DMClusterStatus{}, &v1alpha1.DMClusterStatus{}) + g.Expect(err).To(Succeed()) + g.Expect(updateDC.Spec.Master.Replicas).To(Equal(int32(5))) +} + +func TestDMClusterControlUpdateDMClusterConflictSuccess(t *testing.T) { + g := NewGomegaWithT(t) + recorder := record.NewFakeRecorder(10) + dc := newDMCluster() + fakeClient := &fake.Clientset{} + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + dcLister := listers.NewDMClusterLister(indexer) + control := NewRealDMClusterControl(fakeClient, dcLister, recorder) + conflict := false + fakeClient.AddReactor("update", "dmclusters", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + if !conflict { + conflict = true + return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), dc.Name, errors.New("conflict")) + } + return true, update.GetObject(), nil + }) + _, err := control.UpdateDMCluster(dc, &v1alpha1.DMClusterStatus{}, &v1alpha1.DMClusterStatus{}) + g.Expect(err).To(Succeed()) +} diff --git a/pkg/controller/service_control.go b/pkg/controller/service_control.go index d8ca9e54eb..bd061a7414 100644 --- a/pkg/controller/service_control.go +++ b/pkg/controller/service_control.go @@ -17,8 +17,6 @@ import ( "fmt" "strings" - tcinformers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" - v1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -138,21 +136,17 @@ type FakeServiceControl struct { SvcLister corelisters.ServiceLister SvcIndexer cache.Indexer EpsIndexer cache.Indexer - TcLister v1listers.TidbClusterLister - TcIndexer cache.Indexer createServiceTracker RequestTracker updateServiceTracker RequestTracker deleteStatefulSetTracker RequestTracker } // NewFakeServiceControl returns a FakeServiceControl -func NewFakeServiceControl(svcInformer coreinformers.ServiceInformer, epsInformer coreinformers.EndpointsInformer, tcInformer tcinformers.TidbClusterInformer) *FakeServiceControl { +func NewFakeServiceControl(svcInformer coreinformers.ServiceInformer, epsInformer coreinformers.EndpointsInformer) *FakeServiceControl { return &FakeServiceControl{ svcInformer.Lister(), svcInformer.Informer().GetIndexer(), epsInformer.Informer().GetIndexer(), - tcInformer.Lister(), - tcInformer.Informer().GetIndexer(), RequestTracker{}, RequestTracker{}, RequestTracker{}, diff --git a/pkg/controller/stateful_set_control.go b/pkg/controller/stateful_set_control.go index a1fbdd95a2..157d8b0c8e 100644 --- a/pkg/controller/stateful_set_control.go +++ b/pkg/controller/stateful_set_control.go @@ -17,8 +17,6 @@ import ( "fmt" "strings" - tcinformers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1" - v1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -144,8 +142,6 @@ var _ StatefulSetControlInterface = &realStatefulSetControl{} type FakeStatefulSetControl struct { SetLister appslisters.StatefulSetLister SetIndexer cache.Indexer - TcLister v1listers.TidbClusterLister - TcIndexer cache.Indexer createStatefulSetTracker RequestTracker updateStatefulSetTracker RequestTracker deleteStatefulSetTracker RequestTracker @@ -153,12 +149,10 @@ type FakeStatefulSetControl struct { } // NewFakeStatefulSetControl returns a FakeStatefulSetControl -func NewFakeStatefulSetControl(setInformer appsinformers.StatefulSetInformer, tcInformer tcinformers.TidbClusterInformer) *FakeStatefulSetControl { +func NewFakeStatefulSetControl(setInformer appsinformers.StatefulSetInformer) *FakeStatefulSetControl { return &FakeStatefulSetControl{ setInformer.Lister(), setInformer.Informer().GetIndexer(), - tcInformer.Lister(), - tcInformer.Informer().GetIndexer(), RequestTracker{}, RequestTracker{}, RequestTracker{}, diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index af8f4a569d..d162481ae2 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -256,7 +256,7 @@ func NewController( setInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: tcc.addStatefulSet, UpdateFunc: func(old, cur interface{}) { - tcc.updateStatefuSet(old, cur) + tcc.updateStatefulSet(old, cur) }, DeleteFunc: tcc.deleteStatefulSet, }) @@ -367,8 +367,8 @@ func (tcc *Controller) addStatefulSet(obj interface{}) { tcc.enqueueTidbCluster(tc) } -// updateStatefuSet adds the tidbcluster for the current and old statefulsets to the sync queue. -func (tcc *Controller) updateStatefuSet(old, cur interface{}) { +// updateStatefulSet adds the tidbcluster for the current and old statefulsets to the sync queue. +func (tcc *Controller) updateStatefulSet(old, cur interface{}) { curSet := cur.(*apps.StatefulSet) oldSet := old.(*apps.StatefulSet) ns := curSet.GetNamespace() diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go index 79e13a95aa..68e4397bbd 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go @@ -53,7 +53,7 @@ func TestTidbClusterControllerEnqueueTidbClusterFailed(t *testing.T) { g.Expect(tcc.queue.Len()).To(Equal(0)) } -func TestTidbClusterControllerAddStatefuSet(t *testing.T) { +func TestTidbClusterControllerAddStatefulSet(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { name string @@ -82,7 +82,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "normal", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - return newStatefuSet(tc) + return newStatefulSet(tc) }, addTidbClusterToIndexer: true, expectedLen: 1, @@ -90,7 +90,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "have deletionTimestamp", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - set := newStatefuSet(tc) + set := newStatefulSet(tc) set.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(30 * time.Second)} return set }, @@ -100,7 +100,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "without controllerRef", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - set := newStatefuSet(tc) + set := newStatefulSet(tc) set.OwnerReferences = nil return set }, @@ -110,7 +110,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { { name: "without tidbcluster", modifySet: func(tc *v1alpha1.TidbCluster) *apps.StatefulSet { - return newStatefuSet(tc) + return newStatefulSet(tc) }, addTidbClusterToIndexer: false, expectedLen: 0, @@ -122,7 +122,7 @@ func TestTidbClusterControllerAddStatefuSet(t *testing.T) { } } -func TestTidbClusterControllerUpdateStatefuSet(t *testing.T) { +func TestTidbClusterControllerUpdateStatefulSet(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { name string @@ -135,7 +135,7 @@ func TestTidbClusterControllerUpdateStatefuSet(t *testing.T) { t.Log("test: ", test.name) tc := newTidbCluster() - set1 := newStatefuSet(tc) + set1 := newStatefulSet(tc) set2 := test.updateSet(set1) tcc, tcIndexer, _ := newFakeTidbClusterController() @@ -144,7 +144,7 @@ func TestTidbClusterControllerUpdateStatefuSet(t *testing.T) { err := tcIndexer.Add(tc) g.Expect(err).NotTo(HaveOccurred()) } - tcc.updateStatefuSet(set1, set2) + tcc.updateStatefulSet(set1, set2) g.Expect(tcc.queue.Len()).To(Equal(test.expectedLen)) } @@ -336,14 +336,14 @@ func newTidbCluster() *v1alpha1.TidbCluster { } } -func newStatefuSet(tc *v1alpha1.TidbCluster) *apps.StatefulSet { +func newStatefulSet(tc *v1alpha1.TidbCluster) *apps.StatefulSet { return &apps.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefuset", + Name: "test-statefulset", Namespace: corev1.NamespaceDefault, UID: types.UID("test"), OwnerReferences: []metav1.OwnerReference{ diff --git a/pkg/controller/tidbcluster_control_test.go b/pkg/controller/tidbcluster_control_test.go index f07a985984..8d219fdc1b 100644 --- a/pkg/controller/tidbcluster_control_test.go +++ b/pkg/controller/tidbcluster_control_test.go @@ -16,7 +16,6 @@ package controller import ( "errors" "testing" - "time" . "github.com/onsi/gomega" diff --git a/pkg/discovery/server/server_test.go b/pkg/discovery/server/server_test.go index 8c2f5d5171..dec2e060d8 100644 --- a/pkg/discovery/server/server_test.go +++ b/pkg/discovery/server/server_test.go @@ -49,6 +49,17 @@ var ( PD: &v1alpha1.PDSpec{Replicas: 3}, }, } + dc = &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{Kind: "DMCluster", APIVersion: "v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + ResourceVersion: "1", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{Replicas: 3}, + }, + } ) func TestServer(t *testing.T) { @@ -134,3 +145,85 @@ func TestServer(t *testing.T) { t.Errorf("join expects 2, got %d", join) } } + +func TestDMServer(t *testing.T) { + os.Setenv("MY_POD_NAMESPACE", "default") + cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + fakePDControl := pdapi.NewFakePDControl(kubeCli) + faleMasterControl := dmapi.NewFakeMasterControl(kubeCli) + masterClient := dmapi.NewFakeMasterClient() + s := NewServer(fakePDControl, faleMasterControl, cli, kubeCli) + httpServer := httptest.NewServer(s.(*server).container.ServeMux) + defer httpServer.Close() + + var lock sync.RWMutex + masterMemberInfos := make([]*dmapi.MastersInfo, 0) + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + lock.RLock() + defer lock.RUnlock() + if len(masterMemberInfos) <= 0 { + return nil, fmt.Errorf("no members yet") + } + // as masterMemberInfos.Members maybe modified, we must return a copy + ret := append([]*dmapi.MastersInfo{}, masterMemberInfos...) + return ret, nil + }) + cli.PingcapV1alpha1().DMClusters(dc.Namespace).Create(dc) + faleMasterControl.SetMasterClient(dc.Namespace, dc.Name, masterClient) + + var ( + initial int32 + join int32 + ) + + errg, _ := errgroup.WithContext(context.Background()) + + for i := 0; i < 3; i++ { + i := i + errg.Go(func() error { + for { + svc := fmt.Sprintf(`foo-dm-master-%d.foo-dm-master-peer:2380`, i) + url := httpServer.URL + fmt.Sprintf("/new/%s/dm", base64.StdEncoding.EncodeToString([]byte(svc))) + resp, err := http.Get(url) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + time.Sleep(time.Millisecond * 100) + continue + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + lock.Lock() + masterMemberInfos = append(masterMemberInfos, &dmapi.MastersInfo{ + Name: svc, + PeerURLs: []string{ + svc, + }, + }) + lock.Unlock() + if strings.HasPrefix(string(data), "--join=") { + atomic.AddInt32(&join, 1) + } else if strings.HasPrefix(string(data), "--initial-cluster=") { + atomic.AddInt32(&initial, 1) + } + return nil + } + }) + } + + err := errg.Wait() + if err != nil { + t.Errorf("get dm-master info failed: %v", err) + } + + if initial != 1 { + t.Errorf("initial expects 1, got %d", initial) + } + if join != 2 { + t.Errorf("join expects 2, got %d", join) + } +} diff --git a/pkg/dmapi/dmapi.go b/pkg/dmapi/dmapi.go index 983d9fb84b..1ab964e770 100644 --- a/pkg/dmapi/dmapi.go +++ b/pkg/dmapi/dmapi.go @@ -203,13 +203,13 @@ func (mc *masterClient) deleteMember(query string) error { if err != nil { return err } - deleteMemeberResp := &RespHeader{} - err = json.Unmarshal(body, deleteMemeberResp) + deleteMemberResp := &RespHeader{} + err = json.Unmarshal(body, deleteMemberResp) if err != nil { return fmt.Errorf("unable to unmarshal delete member resp: %s, query: %s, err: %s", body, query, err) } - if !deleteMemeberResp.Result { - return fmt.Errorf("unable to delete member, query: %s, err: %s", query, deleteMemeberResp.Msg) + if !deleteMemberResp.Result { + return fmt.Errorf("unable to delete member, query: %s, err: %s", query, deleteMemberResp.Msg) } return nil diff --git a/pkg/dmapi/dmapi_test.go b/pkg/dmapi/dmapi_test.go new file mode 100644 index 0000000000..aaddd3e7d0 --- /dev/null +++ b/pkg/dmapi/dmapi_test.go @@ -0,0 +1,210 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package dmapi + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/onsi/gomega" +) + +const ( + ContentTypeJSON string = "application/json" +) + +func getClientServer(h func(http.ResponseWriter, *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(h)) +} + +func TestGetMembers(t *testing.T) { + g := NewGomegaWithT(t) + masters := []*MastersInfo{ + {Name: "dm-master1", MemberID: "1", Alive: false}, + {Name: "dm-master2", MemberID: "2", Alive: true}, + {Name: "dm-master3", MemberID: "3", Alive: true}, + } + masterResp := MastersResp{ + RespHeader: RespHeader{Result: true, Msg: ""}, + ListMemberResp: []*ListMemberMaster{ + {MembersMaster{ + Msg: "", + Masters: masters, + }}, + }, + } + masterBytes, err := json.Marshal(masterResp) + g.Expect(err).NotTo(HaveOccurred()) + + workers := []*WorkersInfo{ + {Name: "dm-worker1", Addr: "127.0.0.1:8262", Stage: "free"}, + {Name: "dm-worker2", Addr: "127.0.0.1:8263", Stage: "bound", Source: "mysql-replica-01"}, + {Name: "dm-worker3", Addr: "127.0.0.1:8264", Stage: "offline"}, + } + workerResp := WorkerResp{ + RespHeader: RespHeader{Result: true, Msg: ""}, + ListMemberResp: []*ListMemberWorker{ + {MembersWorker{ + Msg: "", + Workers: workers, + }}, + }, + } + workerBytes, err := json.Marshal(workerResp) + g.Expect(err).NotTo(HaveOccurred()) + + leader := MembersLeader{ + Msg: "", + Name: "dm-master2", + Addr: "127.0.0.1:8361", + } + leaderResp := LeaderResp{ + RespHeader: RespHeader{Result: true, Msg: ""}, + ListMemberResp: []*ListMemberLeader{ + {leader}}, + } + leaderBytes, err := json.Marshal(leaderResp) + g.Expect(err).NotTo(HaveOccurred()) + + tcs := []struct { + caseName string + path string + method string + getType string + resp []byte + want interface{} + }{{ + caseName: "GetMasters", + path: fmt.Sprintf("/%s", membersPrefix), + method: "GET", + resp: masterBytes, + want: masters, + getType: "master", + }, { + caseName: "GetWorkers", + path: fmt.Sprintf("/%s", membersPrefix), + method: "GET", + resp: workerBytes, + want: workers, + getType: "worker", + }, { + caseName: "GetLeader", + path: fmt.Sprintf("/%s", membersPrefix), + method: "GET", + resp: leaderBytes, + want: leader, + getType: "leader", + }} + + for _, tc := range tcs { + svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + g.Expect(request.Method).To(Equal(tc.method), "check method") + g.Expect(request.URL.Path).To(Equal(tc.path), "check url") + g.Expect(request.FormValue(tc.getType)).To(Equal("true"), "check form value") + + w.Header().Set("Content-Type", ContentTypeJSON) + w.Write(tc.resp) + }) + defer svc.Close() + + var ( + result interface{} + err error + ) + masterClient := NewMasterClient(svc.URL, DefaultTimeout, &tls.Config{}, false) + switch tc.getType { + case "master": + result, err = masterClient.GetMasters() + case "worker": + result, err = masterClient.GetWorkers() + case "leader": + result, err = masterClient.GetLeader() + } + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(result).To(Equal(tc.want)) + } +} + +func TestEvictLeader(t *testing.T) { + g := NewGomegaWithT(t) + evictLeaderResp := RespHeader{Result: true, Msg: ""} + evictLeaderBytes, err := json.Marshal(evictLeaderResp) + g.Expect(err).NotTo(HaveOccurred()) + + svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + g.Expect(request.Method).To(Equal("PUT"), "check method") + g.Expect(request.URL.Path).To(Equal(fmt.Sprintf("/%s/1", leaderPrefix)), "check url") + + w.Header().Set("Content-Type", ContentTypeJSON) + w.Write(evictLeaderBytes) + }) + + masterClient := NewMasterClient(svc.URL, DefaultTimeout, &tls.Config{}, false) + err = masterClient.EvictLeader() + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestDeleteMember(t *testing.T) { + g := NewGomegaWithT(t) + deleteMemberResp := RespHeader{Result: true, Msg: ""} + deleteMemberBytes, err := json.Marshal(deleteMemberResp) + g.Expect(err).NotTo(HaveOccurred()) + + tcs := []struct { + caseName string + path string + method string + resp []byte + delType string + name string + }{{ + caseName: "DeleteMaster", + path: fmt.Sprintf("/%s", membersPrefix), + method: "DELETE", + resp: deleteMemberBytes, + delType: "master", + name: "dm-master-1", + }, { + caseName: "DeleteWorker", + path: fmt.Sprintf("/%s", membersPrefix), + method: "DELETE", + resp: deleteMemberBytes, + delType: "worker", + name: "dm-worker-1", + }} + + for _, tc := range tcs { + svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + g.Expect(request.Method).To(Equal(tc.method), "check method") + g.Expect(request.URL.Path).To(Equal(fmt.Sprintf("%s/%s/%s", tc.path, tc.delType, tc.name)), "check url") + + w.Header().Set("Content-Type", ContentTypeJSON) + w.Write(tc.resp) + }) + defer svc.Close() + + masterClient := NewMasterClient(svc.URL, DefaultTimeout, &tls.Config{}, false) + switch tc.delType { + case "master": + err = masterClient.DeleteMaster(tc.name) + case "worker": + err = masterClient.DeleteWorker(tc.name) + } + g.Expect(err).NotTo(HaveOccurred()) + } +} diff --git a/pkg/label/label_test.go b/pkg/label/label_test.go index f79ae5cf8e..5204f17d22 100644 --- a/pkg/label/label_test.go +++ b/pkg/label/label_test.go @@ -28,6 +28,14 @@ func TestLabelNew(t *testing.T) { g.Expect(l[ManagedByLabelKey]).To(Equal("tidb-operator")) } +func TestLabelNewDM(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + g.Expect(l[NameLabelKey]).To(Equal("dm-cluster")) + g.Expect(l[ManagedByLabelKey]).To(Equal("tidb-operator")) +} + func TestLabelInstance(t *testing.T) { g := NewGomegaWithT(t) @@ -76,6 +84,22 @@ func TestLabelTiKV(t *testing.T) { g.Expect(l.IsTiKV()).To(BeTrue()) } +func TestLabelDMMaster(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + l.DMMaster() + g.Expect(l.IsDMMaster()).To(BeTrue()) +} + +func TestLabelDMWorker(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + l.DMWorker() + g.Expect(l.IsDMWorker()).To(BeTrue()) +} + func TestLabelSelector(t *testing.T) { g := NewGomegaWithT(t) @@ -130,3 +154,21 @@ func TestLabelLabels(t *testing.T) { } g.Expect(ls).To(Equal(m)) } + +func TestDMLabelLabels(t *testing.T) { + g := NewGomegaWithT(t) + + l := NewDM() + l.DMMaster() + l.Instance("demo") + l.Namespace("ns-1") + ls := l.Labels() + m := map[string]string{ + NameLabelKey: "dm-cluster", + ManagedByLabelKey: "tidb-operator", + ComponentLabelKey: "dm-master", + InstanceLabelKey: "demo", + NamespaceLabelKey: "ns-1", + } + g.Expect(ls).To(Equal(m)) +} diff --git a/pkg/manager/member/dm_master_failover.go b/pkg/manager/member/dm_master_failover.go index 4dee6bd15d..9d2c154ea2 100644 --- a/pkg/manager/member/dm_master_failover.go +++ b/pkg/manager/member/dm_master_failover.go @@ -259,3 +259,20 @@ func setDMMemberDeleted(dc *v1alpha1.DMCluster, podName string) { dc.Status.Master.FailureMembers[podName] = failureMember klog.Infof("dm-master failover: set dm-master member: %s/%s deleted", dc.GetName(), podName) } + +type fakeMasterFailover struct{} + +// NewFakeMasterFailover returns a fake Failover +func NewFakeMasterFailover() DMFailover { + return &fakeMasterFailover{} +} + +func (fmf *fakeMasterFailover) Failover(_ *v1alpha1.DMCluster) error { + return nil +} + +func (fmf *fakeMasterFailover) Recover(_ *v1alpha1.DMCluster) { +} + +func (fmf *fakeMasterFailover) RemoveUndesiredFailures(_ *v1alpha1.DMCluster) { +} diff --git a/pkg/manager/member/dm_master_member_manager.go b/pkg/manager/member/dm_master_member_manager.go index 6f1793424d..f10976221b 100644 --- a/pkg/manager/member/dm_master_member_manager.go +++ b/pkg/manager/member/dm_master_member_manager.go @@ -236,16 +236,6 @@ func (mmm *masterMemberManager) syncMasterStatefulSetForDMCluster(dc *v1alpha1.D return controller.RequeueErrorf("DMCluster: [%s/%s], waiting for dm-master cluster running", ns, dcName) } - if !dc.Status.Master.Synced { - force := NeedForceUpgrade(dc.Annotations) - if force { - dc.Status.Master.Phase = v1alpha1.UpgradePhase - setUpgradePartition(newMasterSet, 0) - errSTS := updateStatefulSet(mmm.setControl, dc, newMasterSet, oldMasterSet) - return controller.RequeueErrorf("dmcluster: [%s/%s]'s dm-master needs force upgrade, %v", ns, dcName, errSTS) - } - } - // Scaling takes precedence over upgrading because: // - if a dm-master fails in the upgrading, users may want to delete it or add // new replicas @@ -268,6 +258,16 @@ func (mmm *masterMemberManager) syncMasterStatefulSetForDMCluster(dc *v1alpha1.D } } + if !dc.Status.Master.Synced { + force := NeedForceUpgrade(dc.Annotations) + if force { + dc.Status.Master.Phase = v1alpha1.UpgradePhase + setUpgradePartition(newMasterSet, 0) + errSTS := updateStatefulSet(mmm.setControl, dc, newMasterSet, oldMasterSet) + return controller.RequeueErrorf("dmcluster: [%s/%s]'s dm-master needs force upgrade, %v", ns, dcName, errSTS) + } + } + if !templateEqual(newMasterSet, oldMasterSet) || dc.Status.Master.Phase == v1alpha1.UpgradePhase { if err := mmm.masterUpgrader.Upgrade(dc, oldMasterSet, newMasterSet); err != nil { return err @@ -401,9 +401,6 @@ func (mmm *masterMemberManager) syncDMClusterStatus(dc *v1alpha1.DMCluster, set // syncMasterConfigMap syncs the configmap of dm-master func (mmm *masterMemberManager) syncMasterConfigMap(dc *v1alpha1.DMCluster, set *apps.StatefulSet) (*corev1.ConfigMap, error) { - if dc.Spec.Master.Config == nil { - return nil, nil - } newCm, err := getMasterConfigMap(dc) if err != nil { return nil, err @@ -461,6 +458,9 @@ func (mmm *masterMemberManager) getNewMasterServiceForDMCluster(dc *v1alpha1.DMC if svcSpec.ClusterIP != nil { masterSvc.Spec.ClusterIP = *svcSpec.ClusterIP } + if svcSpec.PortName != nil { + masterSvc.Spec.Ports[0].Name = *svcSpec.PortName + } } return masterSvc } @@ -539,6 +539,9 @@ func getNewMasterSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( dcName := dc.Name baseMasterSpec := dc.BaseMasterSpec() instanceName := dc.GetInstanceName() + if cm == nil { + return nil, fmt.Errorf("config map for dm-master is not found, dmcluster %s/%s", dc.Namespace, dc.Name) + } masterConfigMap := cm.Name annMount, annVolume := annotationsMountVolume() @@ -729,10 +732,9 @@ func getNewMasterSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( } func getMasterConfigMap(dc *v1alpha1.DMCluster) (*corev1.ConfigMap, error) { - // For backward compatibility, only sync dm configmap when .master.config is non-nil config := dc.Spec.Master.Config if config == nil { - return nil, nil + config = &v1alpha1.MasterConfig{} } // override CA if tls enabled @@ -820,3 +822,22 @@ func (mmm *masterMemberManager) collectUnjoinedMembers(dc *v1alpha1.DMCluster, s } return nil } + +type FakeMasterMemberManager struct { + err error +} + +func NewFakeMasterMemberManager() *FakeMasterMemberManager { + return &FakeMasterMemberManager{} +} + +func (fpmm *FakeMasterMemberManager) SetSyncError(err error) { + fpmm.err = err +} + +func (fpmm *FakeMasterMemberManager) SyncDM(dc *v1alpha1.DMCluster) error { + if fpmm.err != nil { + return fpmm.err + } + return nil +} diff --git a/pkg/manager/member/dm_master_member_manager_test.go b/pkg/manager/member/dm_master_member_manager_test.go new file mode 100644 index 0000000000..0ee394ae72 --- /dev/null +++ b/pkg/manager/member/dm_master_member_manager_test.go @@ -0,0 +1,2083 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/dmapi" + "github.com/pingcap/tidb-operator/pkg/label" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" +) + +func TestMasterMemberManagerSyncCreate(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + prepare func(cluster *v1alpha1.DMCluster) + errWhenCreateStatefulSet bool + errWhenCreateMasterService bool + errWhenCreateMasterPeerService bool + errExpectFn func(*GomegaWithT, error) + masterSvcCreated bool + masterPeerSvcCreated bool + setCreated bool + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + oldSpec := dc.Spec + if test.prepare != nil { + test.prepare(dc) + } + + mmm, fakeSetControl, fakeSvcControl, _, _, _, _ := newFakeMasterMemberManager() + + if test.errWhenCreateStatefulSet { + fakeSetControl.SetCreateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errWhenCreateMasterService { + fakeSvcControl.SetCreateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errWhenCreateMasterPeerService { + fakeSvcControl.SetCreateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 1) + } + + err := mmm.SyncDM(dc) + test.errExpectFn(g, err) + g.Expect(dc.Spec).To(Equal(oldSpec)) + + svc1, err := mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + eps1, eperr := mmm.epsLister.Endpoints(ns).Get(controller.DMMasterMemberName(dcName)) + if test.masterSvcCreated { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(svc1).NotTo(Equal(nil)) + g.Expect(eperr).NotTo(HaveOccurred()) + g.Expect(eps1).NotTo(Equal(nil)) + } else { + expectErrIsNotFound(g, err) + expectErrIsNotFound(g, eperr) + } + + svc2, err := mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + eps2, eperr := mmm.epsLister.Endpoints(ns).Get(controller.DMMasterPeerMemberName(dcName)) + if test.masterPeerSvcCreated { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(svc2).NotTo(Equal(nil)) + g.Expect(eperr).NotTo(HaveOccurred()) + g.Expect(eps2).NotTo(Equal(nil)) + } else { + expectErrIsNotFound(g, err) + expectErrIsNotFound(g, eperr) + } + + dc1, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + if test.setCreated { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(dc1).NotTo(Equal(nil)) + } else { + expectErrIsNotFound(g, err) + } + } + + tests := []testcase{ + { + name: "normal", + prepare: nil, + errWhenCreateStatefulSet: false, + errWhenCreateMasterService: false, + errWhenCreateMasterPeerService: false, + errExpectFn: errExpectRequeue, + masterSvcCreated: true, + masterPeerSvcCreated: true, + setCreated: true, + }, + { + name: "error when create statefulset", + prepare: nil, + errWhenCreateStatefulSet: true, + errWhenCreateMasterService: false, + errWhenCreateMasterPeerService: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "API server failed")).To(BeTrue()) + }, + masterSvcCreated: true, + masterPeerSvcCreated: true, + setCreated: false, + }, + { + name: "error when create dm-master service", + prepare: nil, + errWhenCreateStatefulSet: false, + errWhenCreateMasterService: true, + errWhenCreateMasterPeerService: false, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "API server failed")).To(BeTrue()) + }, + masterSvcCreated: false, + masterPeerSvcCreated: false, + setCreated: false, + }, + { + name: "error when create dm-master peer service", + prepare: nil, + errWhenCreateStatefulSet: false, + errWhenCreateMasterService: false, + errWhenCreateMasterPeerService: true, + errExpectFn: func(g *GomegaWithT, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "API server failed")).To(BeTrue()) + }, + masterSvcCreated: true, + masterPeerSvcCreated: false, + setCreated: false, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestMasterMemberManagerSyncUpdate(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + errWhenUpdateStatefulSet bool + errWhenUpdateMasterService bool + errWhenGetLeader bool + errWhenGetMasterInfos bool + statusChange func(*apps.StatefulSet) + err bool + expectMasterServiceFn func(*GomegaWithT, *corev1.Service, error) + expectMasterPeerServiceFn func(*GomegaWithT, *corev1.Service, error) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, fakeSetControl, fakeSvcControl, fakeMasterControl, _, _, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + if test.errWhenGetMasterInfos { + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("failed to get master infos of dm-master cluster") + }) + } else { + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + } + if test.errWhenGetLeader { + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("failed to get leader info of dm-master cluster") + }) + } else { + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + } + + if test.statusChange == nil { + fakeSetControl.SetStatusChange(func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }) + } else { + fakeSetControl.SetStatusChange(test.statusChange) + } + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.epsLister.Endpoints(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.epsLister.Endpoints(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + dc1 := dc.DeepCopy() + test.modify(dc1) + + if test.errWhenUpdateMasterService { + fakeSvcControl.SetUpdateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errWhenUpdateStatefulSet { + fakeSetControl.SetUpdateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + err = mmm.SyncDM(dc1) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectMasterServiceFn != nil { + svc, err := mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectMasterServiceFn(g, svc, err) + } + if test.expectMasterPeerServiceFn != nil { + svc, err := mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + test.expectMasterPeerServiceFn(g, svc, err) + } + if test.expectStatefulSetFn != nil { + set, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc1) + } + } + + tests := []testcase{ + { + name: "normal", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + masterNodePort := 30160 + dc.Spec.Master.Service = &v1alpha1.MasterServiceSpec{MasterNodePort: &masterNodePort} + dc.Spec.Master.Service.Type = corev1.ServiceTypeNodePort + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:2379", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:2379"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:2379"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:2379"}, Alive: false}, + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: false, + errWhenGetLeader: false, + errWhenGetMasterInfos: false, + err: false, + expectMasterServiceFn: func(g *GomegaWithT, svc *corev1.Service, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) + }, + expectMasterPeerServiceFn: func(g *GomegaWithT, svc *corev1.Service, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.ScalePhase)) + g.Expect(dc.Status.Master.StatefulSet.ObservedGeneration).To(Equal(int64(1))) + g.Expect(len(dc.Status.Master.Members)).To(Equal(3)) + g.Expect(dc.Status.Master.Members["master1"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master2"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master3"].Health).To(Equal(false)) + }, + }, + { + name: "error when update dm-master service", + modify: func(dc *v1alpha1.DMCluster) { + masterNodePort := 30160 + dc.Spec.Master.Service = &v1alpha1.MasterServiceSpec{MasterNodePort: &masterNodePort} + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:2379"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:2379"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:2379"}, Alive: false}, + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: true, + errWhenGetLeader: false, + errWhenGetMasterInfos: false, + err: true, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: nil, + }, + { + name: "error when update statefulset", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:2379"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:2379"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:2379"}, Alive: false}, + }, + errWhenUpdateStatefulSet: true, + errWhenUpdateMasterService: false, + errWhenGetLeader: false, + errWhenGetMasterInfos: false, + err: true, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + }, + { + name: "error when get dm-master leader", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: false, + errWhenGetLeader: true, + errWhenGetMasterInfos: false, + err: false, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Synced).To(BeFalse()) + g.Expect(dc.Status.Master.Members).To(BeNil()) + }, + }, + { + name: "error when sync dm-master infos", + modify: func(dc *v1alpha1.DMCluster) { + dc.Spec.Master.Replicas = 5 + }, + errWhenUpdateStatefulSet: false, + errWhenUpdateMasterService: false, + errWhenGetLeader: false, + errWhenGetMasterInfos: true, + err: false, + expectMasterServiceFn: nil, + expectMasterPeerServiceFn: nil, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Synced).To(BeFalse()) + g.Expect(dc.Status.Master.Members).To(BeNil()) + }, + }, + } + + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestMasterMemberManagerMasterStatefulSetIsUpgrading(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + setUpdate func(*apps.StatefulSet) + hasPod bool + updatePod func(*corev1.Pod) + errExpectFn func(*GomegaWithT, error) + expectUpgrading bool + } + testFn := func(test *testcase, t *testing.T) { + mmm, _, _, _, podIndexer, _, _ := newFakeMasterMemberManager() + dc := newDMClusterForMaster() + dc.Status.Master.StatefulSet = &apps.StatefulSetStatus{ + UpdateRevision: "v3", + } + + set := &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: metav1.NamespaceDefault, + }, + } + if test.setUpdate != nil { + test.setUpdate(set) + } + + if test.hasPod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMMasterMemberType, dc.GetName(), 0), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(dc.GetInstanceName()).DMMaster().Labels(), + }, + } + if test.updatePod != nil { + test.updatePod(pod) + } + podIndexer.Add(pod) + } + b, err := mmm.masterStatefulSetIsUpgrading(set, dc) + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + if test.expectUpgrading { + g.Expect(b).To(BeTrue()) + } else { + g.Expect(b).NotTo(BeTrue()) + } + } + tests := []testcase{ + { + name: "stateful set is upgrading", + setUpdate: func(set *apps.StatefulSet) { + set.Status.CurrentRevision = "v1" + set.Status.UpdateRevision = "v2" + set.Status.ObservedGeneration = 1000 + }, + hasPod: false, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod don't have revision hash", + setUpdate: nil, + hasPod: true, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: false, + }, + { + name: "pod have revision hash, not equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[apps.ControllerRevisionHashLabelKey] = "v2" + }, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod have revision hash, equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[apps.ControllerRevisionHashLabelKey] = "v3" + }, + errExpectFn: nil, + expectUpgrading: false, + }, + } + + for i := range tests { + t.Logf(tests[i].name) + testFn(&tests[i], t) + } +} + +func TestMasterMemberManagerUpgrade(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + err bool + statusChange func(*apps.StatefulSet) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, fakeSetControl, _, fakeMasterControl, _, _, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + + fakeSetControl.SetStatusChange(test.statusChange) + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + dc1 := dc.DeepCopy() + test.modify(dc1) + + err = mmm.SyncDM(dc1) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectStatefulSetFn != nil { + set, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc1) + } + } + tests := []testcase{ + { + name: "upgrade successful", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Master.BaseImage = "dm-test-image-2" + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:8261"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:8261"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:8261"}, Alive: false}, + }, + err: false, + statusChange: func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image-2:v2.0.0-rc.2")) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + g.Expect(len(dc.Status.Master.Members)).To(Equal(3)) + g.Expect(dc.Status.Master.Members["master1"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master2"].Health).To(Equal(true)) + g.Expect(dc.Status.Master.Members["master3"].Health).To(Equal(false)) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestMasterMemberManagerSyncMasterSts(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + err bool + statusChange func(*apps.StatefulSet) + expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, fakeSetControl, _, fakeMasterControl, _, _, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + + fakeSetControl.SetStatusChange(test.statusChange) + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + test.modify(dc) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return nil, fmt.Errorf("cannot get leader") + }) + err = mmm.syncMasterStatefulSetForDMCluster(dc) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectStatefulSetFn != nil { + set, err := mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc) + } + } + tests := []testcase{ + { + name: "force upgrade", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Master.BaseImage = "dm-test-image-2" + cluster.Spec.Master.Replicas = 1 + cluster.ObjectMeta.Annotations = make(map[string]string) + cluster.ObjectMeta.Annotations["tidb.pingcap.com/force-upgrade"] = "true" + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:8261"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:8261"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:8261"}, Alive: false}, + }, + err: true, + statusChange: func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image-2:v2.0.0-rc.2")) + // scale in one pd from 3 -> 2 + g.Expect(*set.Spec.Replicas).To(Equal(int32(2))) + g.Expect(*set.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(0))) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.UpgradePhase)) + }, + }, + { + name: "non force upgrade", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Master.BaseImage = "dm-test-image-2" + cluster.Spec.Master.Replicas = 1 + }, + leaderInfo: dmapi.MembersLeader{ + Name: "master1", + Addr: "http://master1:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "master1", MemberID: "1", ClientURLs: []string{"http://master1:8261"}, Alive: true}, + {Name: "master2", MemberID: "2", ClientURLs: []string{"http://master2:8261"}, Alive: true}, + {Name: "master3", MemberID: "3", ClientURLs: []string{"http://master3:8261"}, Alive: false}, + }, + err: true, + statusChange: func(set *apps.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-master-1" + set.Status.UpdateRevision = "dm-master-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *apps.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image:v2.0.0-rc.2")) + g.Expect(*set.Spec.Replicas).To(Equal(int32(3))) + g.Expect(*set.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(3))) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.Phase).To(Equal(v1alpha1.ScalePhase)) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func newFakeMasterMemberManager() (*masterMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *dmapi.FakeMasterControl, cache.Indexer, cache.Indexer, *controller.FakePodControl) { + kubeCli := kubefake.NewSimpleClientset() + setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() + svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() + podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() + epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() + pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims() + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) + podControl := controller.NewFakePodControl(podInformer) + masterControl := dmapi.NewFakeMasterControl(kubeCli) + masterScaler := NewFakeMasterScaler() + autoFailover := true + masterFailover := NewFakeMasterFailover() + masterUpgrader := NewFakeMasterUpgrader() + genericControll := controller.NewFakeGenericControl() + + return &masterMemberManager{ + masterControl, + setControl, + svcControl, + controller.NewTypedControl(genericControll), + setInformer.Lister(), + svcInformer.Lister(), + podInformer.Lister(), + epsInformer.Lister(), + pvcInformer.Lister(), + masterScaler, + masterUpgrader, + autoFailover, + masterFailover, + }, setControl, svcControl, masterControl, podInformer.Informer().GetIndexer(), pvcInformer.Informer().GetIndexer(), podControl +} + +func newDMClusterForMaster() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Discovery: v1alpha1.DMDiscoverySpec{Address: "http://basic-discovery.demo:10261"}, + Master: v1alpha1.MasterSpec{ + BaseImage: "dm-test-image", + StorageSize: "100Gi", + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "dm-test-image", + StorageSize: "100Gi", + Replicas: 3, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + }, + } +} + +func TestGetNewMasterHeadlessServiceForDMCluster(t *testing.T) { + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.Service + }{ + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master-peer", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "peer", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: "dm-master-peer", + Port: 8291, + TargetPort: intstr.FromInt(8291), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + PublishNotReadyAddresses: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := getNewMasterHeadlessServiceForDMCluster(&tt.dc) + if diff := cmp.Diff(tt.expected, *svc); diff != "" { + t.Errorf("unexpected Service (-want, +got): %s", diff) + } + }) + } +} + +func TestGetNewMasterSetForDMCluster(t *testing.T) { + enable := true + tests := []struct { + name string + dc v1alpha1.DMCluster + wantErr bool + nilCM bool + testSts func(sts *apps.StatefulSet) + }{ + { + name: "dm-master config map is nil", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + wantErr: true, + nilCM: true, + testSts: nil, + }, + { + name: "dm-master network is not host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-master network is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, true, v1.DNSClusterFirstWithHostNet), + }, + { + name: "dm-master network is not host when dm-worker is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + Master: v1alpha1.MasterSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-master should respect resources config", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + }, + StorageSize: "100Gi", + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(sts.Spec.VolumeClaimTemplates[0].Spec.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + })) + nameToContainer := MapContainers(&sts.Spec.Template.Spec) + masterContainer := nameToContainer[v1alpha1.DMMasterMemberType.String()] + g.Expect(masterContainer.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + })) + }, + }, + { + name: "set custom env", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + Env: []corev1.EnvVar{ + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + { + Name: "TZ", + Value: "ignored", + }, + }, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testContainerEnv(t, []corev1.EnvVar{ + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "PEER_SERVICE_NAME", + Value: "dc-dm-master-peer", + }, + { + Name: "SERVICE_NAME", + Value: "dc-dm-master", + }, + { + Name: "SET_NAME", + Value: "dc-dm-master", + }, + { + Name: "TZ", + Value: "UTC", + }, + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + }, + v1alpha1.DMMasterMemberType, + ), + }, + { + name: "dm version nightly, dm cluster tls is enabled", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-nightly", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + }, + Worker: &v1alpha1.WorkerSpec{}, + Version: "nightly", + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(hasClusterTLSVol(sts, "dm-master-tls")).To(BeTrue()) + g.Expect(hasClusterVolMount(sts, v1alpha1.DMMasterMemberType)).To(BeTrue()) + }, + }, + { + name: "dmcluster with failureMember nonDeleted", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Replicas: 3, + }, + Worker: &v1alpha1.WorkerSpec{}, + Version: "nightly", + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "test": { + MemberDeleted: false, + }, + }, + }, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(*sts.Spec.Replicas).To(Equal(int32(3))) + }, + }, + { + name: "dmcluster with failureMember Deleted", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + Replicas: 3, + }, + Worker: &v1alpha1.WorkerSpec{}, + Version: "nightly", + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "test": { + MemberDeleted: true, + }, + }, + }, + }, + }, + testSts: func(sts *apps.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(*sts.Spec.Replicas).To(Equal(int32(4))) + }, + }, + { + name: "dm-master additional containers", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalContainers: []corev1.Container{customSideCarContainers[0]}, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testAdditionalContainers(t, []corev1.Container{customSideCarContainers[0]}), + }, + { + name: "dm-master additional volumes", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalVolumes: []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testAdditionalVolumes(t, []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}), + }, + // TODO add more tests + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var cm *corev1.ConfigMap + if !tt.nilCM { + cm = &corev1.ConfigMap{} + } + sts, err := getNewMasterSetForDMCluster(&tt.dc, cm) + if (err != nil) != tt.wantErr { + t.Fatalf("error %v, wantErr %v", err, tt.wantErr) + } + if tt.testSts != nil { + tt.testSts(sts) + } + }) + } +} + +func TestGetMasterConfigMap(t *testing.T) { + g := NewGomegaWithT(t) + testCases := []struct { + name string + dc v1alpha1.DMCluster + expected *corev1.ConfigMap + }{ + { + name: "dm-master config is nil", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "startup-script": "", + "config-file": "", + }, + }, + }, + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Config: &v1alpha1.MasterConfig{ + LogLevel: pointer.StringPtr("debug"), + RPCTimeoutStr: pointer.StringPtr("40s"), + RPCRateLimit: pointer.Float64Ptr(15), + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "startup-script": "", + "config-file": `log-level = "debug" +rpc-timeout = "40s" +rpc-rate-limit = 15.0 +`, + }, + }, + }, + { + name: "dm version v2.0.0-rc.2, dm cluster tls is enabled", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-v2", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + BaseImage: "pingcap/dm", + }, + Worker: &v1alpha1.WorkerSpec{}, + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, + Version: "v2.0.0-rc.2", + }, + }, + expected: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-v2-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "tls-v2", + "app.kubernetes.io/component": "dm-master", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "tls-v2", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "startup-script": "", + "config-file": `ssl-ca = "/var/lib/dm-master-tls/ca.crt" +ssl-cert = "/var/lib/dm-master-tls/tls.crt" +ssl-key = "/var/lib/dm-master-tls/tls.key" +`, + }, + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + cm, err := getMasterConfigMap(&tt.dc) + g.Expect(err).To(Succeed()) + // startup-script is better to be tested in e2e + tt.expected.Data["startup-script"] = cm.Data["startup-script"] + g.Expect(AddConfigMapDigestSuffix(tt.expected)).To(Succeed()) + if diff := cmp.Diff(*tt.expected, *cm); diff != "" { + t.Errorf("unexpected plugin configuration (-want, +got): %s", diff) + } + }) + } +} + +func TestGetNewMasterServiceForDMCluster(t *testing.T) { + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.Service + }{ + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify ClusterIP type,clusterIP", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ServiceSpec: v1alpha1.ServiceSpec{ClusterIP: pointer.StringPtr("172.20.10.1")}}, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.20.10.1", + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify LoadBalancerIP type, LoadBalancerType", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ + ServiceSpec: v1alpha1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerIP: pointer.StringPtr("172.20.10.1"), + }}, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + LoadBalancerIP: "172.20.10.1", + Type: corev1.ServiceTypeLoadBalancer, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify dm-master service NodePort", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ + ServiceSpec: v1alpha1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + ClusterIP: pointer.StringPtr("172.20.10.1"), + }, + MasterNodePort: intPtr(30020), + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.20.10.1", + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + NodePort: 30020, + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + { + name: "basic and specify dm-master service portname", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Service: &v1alpha1.MasterServiceSpec{ + ServiceSpec: v1alpha1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: pointer.StringPtr("172.20.10.1"), + PortName: pointer.StringPtr("http-dm-master"), + }, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-master", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + "app.kubernetes.io/used-by": "end-user", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.20.10.1", + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "http-dm-master", + Port: 8261, + TargetPort: intstr.FromInt(8261), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-master", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mmm, _, _, _, _, _, _ := newFakeMasterMemberManager() + svc := mmm.getNewMasterServiceForDMCluster(&tt.dc) + if diff := cmp.Diff(tt.expected, *svc); diff != "" { + t.Errorf("unexpected Service (-want, +got): %s", diff) + } + }) + } +} + +func TestMasterMemberManagerSyncMasterStsWhenMasterNotJoinCluster(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) + leaderInfo dmapi.MembersLeader + masterInfos []*dmapi.MastersInfo + dcStatusChange func(cluster *v1alpha1.DMCluster) + err bool + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForMaster() + ns := dc.Namespace + dcName := dc.Name + + mmm, _, _, fakeMasterControl, podIndexer, pvcIndexer, _ := newFakeMasterMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + + masterClient.AddReaction(dmapi.GetMastersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.masterInfos, nil + }) + masterClient.AddReaction(dmapi.GetLeaderActionType, func(action *dmapi.Action) (interface{}, error) { + return test.leaderInfo, nil + }) + + err := mmm.SyncDM(dc) + g.Expect(controller.IsRequeueError(err)).To(BeTrue()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.svcLister.Services(ns).Get(controller.DMMasterPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = mmm.setLister.StatefulSets(ns).Get(controller.DMMasterMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + if test.dcStatusChange != nil { + test.dcStatusChange(dc) + } + test.modify(dc, podIndexer, pvcIndexer) + err = mmm.syncMasterStatefulSetForDMCluster(dc) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc) + } + } + tests := []testcase{ + { + name: "add dm-master unjoin cluster member info", + modify: func(cluster *v1alpha1.DMCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) { + for ordinal := 0; ordinal < 3; ordinal++ { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMMasterMemberType, cluster.GetName(), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + podIndexer.Add(pod) + } + for ordinal := 0; ordinal < 3; ordinal++ { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPVCName(v1alpha1.DMMasterMemberType, controller.DMMasterMemberName(cluster.GetName()), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + pvcIndexer.Add(pvc) + } + + }, + leaderInfo: dmapi.MembersLeader{ + Name: "test-dm-master-0", + Addr: "http://test-dm-master-0:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "test-dm-master-0", MemberID: "1", ClientURLs: []string{"http://test-dm-master-0:8261"}, Alive: false}, + {Name: "test-dm-master-1", MemberID: "2", ClientURLs: []string{"http://test-dm-master-1:8261"}, Alive: false}, + }, + err: false, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.UnjoinedMembers["test-dm-master-2"]).NotTo(BeNil()) + }, + }, + { + name: "clear unjoin cluster member info when the member join the cluster", + dcStatusChange: func(cluster *v1alpha1.DMCluster) { + cluster.Status.Master.UnjoinedMembers = map[string]v1alpha1.UnjoinedMember{ + "test-dm-master-0": { + PodName: "test-dm-master-0", + CreatedAt: metav1.Now(), + }, + } + }, + modify: func(cluster *v1alpha1.DMCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) { + for ordinal := 0; ordinal < 3; ordinal++ { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMMasterMemberType, cluster.GetName(), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + podIndexer.Add(pod) + } + for ordinal := 0; ordinal < 3; ordinal++ { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPVCName(v1alpha1.DMMasterMemberType, controller.DMMasterMemberName(cluster.GetName()), int32(ordinal)), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(cluster.GetInstanceName()).DMMaster().Labels(), + }, + } + pvcIndexer.Add(pvc) + } + }, + leaderInfo: dmapi.MembersLeader{ + Name: "test-dm-master-0", + Addr: "http://test-dm-master-0:8261", + }, + masterInfos: []*dmapi.MastersInfo{ + {Name: "test-dm-master-0", MemberID: "1", ClientURLs: []string{"http://test-dm-master-0:8261"}, Alive: false}, + {Name: "test-dm-master-1", MemberID: "2", ClientURLs: []string{"http://test-dm-master-1:8261"}, Alive: false}, + {Name: "test-dm-master-2", MemberID: "3", ClientURLs: []string{"http://test-dm-master-2:8261"}, Alive: false}, + }, + err: false, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(dc.Status.Master.UnjoinedMembers).To(BeEmpty()) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestMasterShouldRecover(t *testing.T) { + pods := []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "failover-dm-master-0", + Namespace: v1.NamespaceDefault, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "failover-dm-master-1", + Namespace: v1.NamespaceDefault, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + } + podsWithFailover := append(pods, &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover-dm-master-2", + Namespace: v1.NamespaceDefault, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }) + tests := []struct { + name string + dc *v1alpha1.DMCluster + pods []*v1.Pod + want bool + }{ + { + name: "should not recover if no failure members", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Status: v1alpha1.DMClusterStatus{}, + }, + pods: pods, + want: false, + }, + { + name: "should not recover if a member is not healthy", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 2, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "failover-dm-master-0": { + Name: "failover-dm-master-0", + Health: false, + }, + "failover-dm-master-1": { + Name: "failover-dm-master-1", + Health: true, + }, + }, + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "failover-dm-master-0": { + PodName: "failover-dm-master-0", + }, + }, + }, + }, + }, + pods: pods, + want: false, + }, + { + name: "should recover if all members are ready and healthy", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 2, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "failover-dm-master-0": { + Name: "failover-dm-master-0", + Health: true, + }, + "failover-dm-master-1": { + Name: "failover-dm-master-1", + Health: true, + }, + }, + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "failover-dm-master-0": { + PodName: "failover-dm-master-0", + }, + }, + }, + }, + }, + pods: pods, + want: true, + }, + { + name: "should recover if all members are ready and healthy (ignore auto-created failover pods)", + dc: &v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failover", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + Replicas: 2, + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Members: map[string]v1alpha1.MasterMember{ + "failover-dm-master-0": { + Name: "failover-dm-master-0", + Health: true, + }, + "failover-dm-master-1": { + Name: "failover-dm-master-1", + Health: true, + }, + "failover-dm-master-2": { + Name: "failover-dm-master-1", + Health: false, + }, + }, + FailureMembers: map[string]v1alpha1.MasterFailureMember{ + "failover-dm-master-0": { + PodName: "failover-dm-master-0", + }, + }, + }, + }, + }, + pods: podsWithFailover, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client := kubefake.NewSimpleClientset() + for _, pod := range tt.pods { + client.CoreV1().Pods(pod.Namespace).Create(pod) + } + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(client, 0) + podLister := kubeInformerFactory.Core().V1().Pods().Lister() + kubeInformerFactory.Start(ctx.Done()) + kubeInformerFactory.WaitForCacheSync(ctx.Done()) + masterMemberManager := &masterMemberManager{podLister: podLister} + got := masterMemberManager.shouldRecover(tt.dc) + if got != tt.want { + t.Fatalf("wants %v, got %v", tt.want, got) + } + }) + } +} + +func intPtr(i int) *int { + return &i +} + +func hasClusterTLSVol(sts *apps.StatefulSet, volName string) bool { + for _, vol := range sts.Spec.Template.Spec.Volumes { + if vol.Name == volName { + return true + } + } + return false +} + +func hasClusterVolMount(sts *apps.StatefulSet, memberType v1alpha1.MemberType) bool { + var vmName string + switch memberType { + case v1alpha1.DMMasterMemberType: + vmName = "dm-master-tls" + case v1alpha1.DMWorkerMemberType: + vmName = "dm-worker-tls" + default: + return false + } + for _, container := range sts.Spec.Template.Spec.Containers { + if container.Name == memberType.String() { + for _, vm := range container.VolumeMounts { + if vm.Name == vmName { + return true + } + } + } + } + return false +} diff --git a/pkg/manager/member/dm_master_scaler.go b/pkg/manager/member/dm_master_scaler.go index a371c88088..db47735bb4 100644 --- a/pkg/manager/member/dm_master_scaler.go +++ b/pkg/manager/member/dm_master_scaler.go @@ -177,3 +177,33 @@ func (msd *masterScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, n func (msd *masterScaler) SyncAutoScalerAnn(meta metav1.Object, oldSet *apps.StatefulSet) error { return nil } + +type fakeMasterScaler struct{} + +// NewFakeMasterScaler returns a fake Scaler +func NewFakeMasterScaler() Scaler { + return &fakeMasterScaler{} +} + +func (fms *fakeMasterScaler) Scale(meta metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + if *newSet.Spec.Replicas > *oldSet.Spec.Replicas { + return fms.ScaleOut(meta, oldSet, newSet) + } else if *newSet.Spec.Replicas < *oldSet.Spec.Replicas { + return fms.ScaleIn(meta, oldSet, newSet) + } + return nil +} + +func (fms *fakeMasterScaler) ScaleOut(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas+1, nil) + return nil +} + +func (fms *fakeMasterScaler) ScaleIn(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil) + return nil +} + +func (fms *fakeMasterScaler) SyncAutoScalerAnn(dc metav1.Object, actual *apps.StatefulSet) error { + return nil +} diff --git a/pkg/manager/member/dm_worker_failover.go b/pkg/manager/member/dm_worker_failover.go index 5d4da53c17..a05cb522b5 100644 --- a/pkg/manager/member/dm_worker_failover.go +++ b/pkg/manager/member/dm_worker_failover.go @@ -97,3 +97,20 @@ func (wf *workerFailover) RemoveUndesiredFailures(dc *v1alpha1.DMCluster) { } } } + +type fakeWorkerFailover struct{} + +// NewFakeMasterFailover returns a fake Failover +func NewFakeWorkerFailover() DMFailover { + return &fakeWorkerFailover{} +} + +func (fwf *fakeWorkerFailover) Failover(_ *v1alpha1.DMCluster) error { + return nil +} + +func (fwf *fakeWorkerFailover) Recover(_ *v1alpha1.DMCluster) { +} + +func (fwf *fakeWorkerFailover) RemoveUndesiredFailures(_ *v1alpha1.DMCluster) { +} diff --git a/pkg/manager/member/dm_worker_member_manager.go b/pkg/manager/member/dm_worker_member_manager.go index 172482dcc9..a928255cc2 100644 --- a/pkg/manager/member/dm_worker_member_manager.go +++ b/pkg/manager/member/dm_worker_member_manager.go @@ -272,7 +272,7 @@ func (wmm *workerMemberManager) syncDMClusterStatus(dc *v1alpha1.DMCluster, set workersInfo, err := dmClient.GetWorkers() if err != nil { - dc.Status.Master.Synced = false + dc.Status.Worker.Synced = false return err } @@ -345,9 +345,6 @@ func (wmm *workerMemberManager) workerStatefulSetIsUpgrading(set *apps.StatefulS // syncWorkerConfigMap syncs the configmap of dm-worker func (wmm *workerMemberManager) syncWorkerConfigMap(dc *v1alpha1.DMCluster, set *apps.StatefulSet) (*corev1.ConfigMap, error) { - if dc.Spec.Worker.Config == nil { - return nil, nil - } newCm, err := getWorkerConfigMap(dc) if err != nil { return nil, err @@ -360,6 +357,9 @@ func getNewWorkerSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( dcName := dc.Name baseWorkerSpec := dc.BaseWorkerSpec() instanceName := dc.GetInstanceName() + if cm == nil { + return nil, fmt.Errorf("config map for dm-worker is not found, dmcluster %s/%s", dc.Namespace, dc.Name) + } workerConfigMap := cm.Name annMount, annVolume := annotationsMountVolume() @@ -542,10 +542,9 @@ func getNewWorkerSetForDMCluster(dc *v1alpha1.DMCluster, cm *corev1.ConfigMap) ( } func getWorkerConfigMap(dc *v1alpha1.DMCluster) (*corev1.ConfigMap, error) { - // For backward compatibility, only sync dm configmap when .worker.config is non-nil config := dc.Spec.Worker.Config if config == nil { - return nil, nil + config = &v1alpha1.WorkerConfig{} } // override CA if tls enabled @@ -597,3 +596,22 @@ func isWorkerPodDesired(dc *v1alpha1.DMCluster, podName string) bool { } return ordinals.Has(ordinal) } + +type FakeWorkerMemberManager struct { + err error +} + +func NewFakeWorkerMemberManager() *FakeWorkerMemberManager { + return &FakeWorkerMemberManager{} +} + +func (ftmm *FakeWorkerMemberManager) SetSyncError(err error) { + ftmm.err = err +} + +func (ftmm *FakeWorkerMemberManager) SyncDM(dc *v1alpha1.DMCluster) error { + if ftmm.err != nil { + return ftmm.err + } + return nil +} diff --git a/pkg/manager/member/dm_worker_member_manager_test.go b/pkg/manager/member/dm_worker_member_manager_test.go new file mode 100644 index 0000000000..4d5892335c --- /dev/null +++ b/pkg/manager/member/dm_worker_member_manager_test.go @@ -0,0 +1,1236 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/pingcap/tidb-operator/pkg/label" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/intstr" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/controller" + "github.com/pingcap/tidb-operator/pkg/dmapi" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kubeinformers "k8s.io/client-go/informers" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestWorkerMemberManagerSyncCreate(t *testing.T) { + g := NewGomegaWithT(t) + + type result struct { + sync error + svc *corev1.Service + getSvc error + set *appsv1.StatefulSet + getSet error + cm *corev1.ConfigMap + getCm error + } + + type testcase struct { + name string + prepare func(cluster *v1alpha1.DMCluster) + errOnCreateSet bool + errOnCreateCm bool + errOnCreateSvc bool + expectFn func(*GomegaWithT, *result) + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + if test.prepare != nil { + test.prepare(dc) + } + + wmm, ctls, _, _ := newFakeWorkerMemberManager() + + if test.errOnCreateSet { + ctls.set.SetCreateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnCreateSvc { + ctls.svc.SetCreateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnCreateCm { + ctls.generic.SetCreateOrUpdateError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + syncErr := wmm.SyncDM(dc) + svc, getSvcErr := wmm.svcLister.Services(ns).Get(controller.DMWorkerPeerMemberName(dcName)) + set, getStsErr := wmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + + cmName := controller.DMWorkerMemberName(dcName) + if dc.Spec.Worker != nil { + cmGen, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + cmName = cmGen.Name + g.Expect(strings.HasPrefix(cmName, controller.DMWorkerMemberName(dcName))).To(BeTrue()) + } + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: cmName}} + key, err := client.ObjectKeyFromObject(cm) + g.Expect(err).To(Succeed()) + getCmErr := ctls.generic.FakeCli.Get(context.TODO(), key, cm) + result := result{syncErr, svc, getSvcErr, set, getStsErr, cm, getCmErr} + test.expectFn(g, &result) + } + + tests := []*testcase{ + { + name: "basic", + prepare: nil, + errOnCreateSet: false, + errOnCreateCm: false, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.getCm).To(Succeed()) + g.Expect(r.getSet).To(Succeed()) + g.Expect(r.getSvc).To(Succeed()) + }, + }, + { + name: "do not sync if dm-worker spec is nil", + prepare: func(dc *v1alpha1.DMCluster) { + dc.Spec.Worker = nil + }, + errOnCreateSet: false, + errOnCreateCm: false, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.getCm).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getSvc).NotTo(Succeed()) + }, + }, + { + name: "error when create dm-worker statefulset", + prepare: nil, + errOnCreateSet: true, + errOnCreateCm: false, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getCm).To(Succeed()) + g.Expect(r.getSvc).To(Succeed()) + }, + }, + { + name: "error when create dm-worker peer service", + prepare: nil, + errOnCreateSet: false, + errOnCreateCm: false, + errOnCreateSvc: true, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getCm).NotTo(Succeed()) + g.Expect(r.getSvc).NotTo(Succeed()) + }, + }, + { + name: "error when create dm-worker configmap", + prepare: nil, + errOnCreateSet: false, + errOnCreateCm: true, + errOnCreateSvc: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.getSet).NotTo(Succeed()) + g.Expect(r.getCm).NotTo(Succeed()) + g.Expect(r.getSvc).To(Succeed()) + }, + }, + } + + for _, tt := range tests { + testFn(tt, t) + } +} + +func TestWorkerMemberManagerSyncUpdate(t *testing.T) { + g := NewGomegaWithT(t) + + type result struct { + sync error + oldSvc *corev1.Service + svc *corev1.Service + getSvc error + oldSet *appsv1.StatefulSet + set *appsv1.StatefulSet + getSet error + oldCm *corev1.ConfigMap + cm *corev1.ConfigMap + getCm error + } + type testcase struct { + name string + prepare func(*v1alpha1.DMCluster, *workerFakeIndexers) + errOnUpdateSet bool + errOnUpdateCm bool + errOnUpdateSvc bool + expectFn func(*GomegaWithT, *result) + workerInfos []*dmapi.WorkersInfo + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + + mmm, ctls, indexers, fakeMasterControl := newFakeWorkerMemberManager() + + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.workerInfos, nil + }) + + if test.errOnUpdateSet { + ctls.set.SetUpdateStatefulSetError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnUpdateSvc { + ctls.svc.SetUpdateServiceError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + if test.errOnUpdateCm { + ctls.generic.SetCreateOrUpdateError(errors.NewInternalError(fmt.Errorf("API server failed")), 0) + } + + oldCm, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + oldSvc := getNewWorkerHeadlessServiceForDMCluster(dc) + oldSvc.Spec.Ports[0].Port = 8888 + oldSet, err := getNewWorkerSetForDMCluster(dc, oldCm) + g.Expect(err).To(Succeed()) + + g.Expect(indexers.set.Add(oldSet)).To(Succeed()) + g.Expect(indexers.svc.Add(oldSvc)).To(Succeed()) + + g.Expect(ctls.generic.AddObject(oldCm)).To(Succeed()) + + if test.prepare != nil { + test.prepare(dc, indexers) + } + + syncErr := mmm.SyncDM(dc) + svc, getSvcErr := mmm.svcLister.Services(ns).Get(controller.DMWorkerPeerMemberName(dcName)) + set, getStsErr := mmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + + cmName := controller.DMWorkerMemberName(dcName) + if dc.Spec.Worker != nil { + cmGen, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + cmName = cmGen.Name + g.Expect(strings.HasPrefix(cmName, controller.DMWorkerMemberName(dcName))).To(BeTrue()) + } + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: cmName}} + key, err := client.ObjectKeyFromObject(cm) + g.Expect(err).To(Succeed()) + getCmErr := ctls.generic.FakeCli.Get(context.TODO(), key, cm) + result := result{syncErr, oldSvc, svc, getSvcErr, oldSet, set, getStsErr, oldCm, cm, getCmErr} + test.expectFn(g, &result) + } + + tests := []*testcase{ + { + name: "basic", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: false, + errOnUpdateSvc: false, + errOnUpdateSet: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(4))) + }, + workerInfos: nil, + }, + { + name: "error on update configmap", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: true, + errOnUpdateSvc: false, + errOnUpdateSet: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).NotTo(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + { + name: "error on update service", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: false, + errOnUpdateSvc: true, + errOnUpdateSet: false, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).To(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).NotTo(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + { + name: "error on update statefulset", + prepare: func(dc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + dc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + dc.Spec.Worker.Replicas = 4 + }, + errOnUpdateCm: false, + errOnUpdateSvc: false, + errOnUpdateSet: true, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).NotTo(Succeed()) + g.Expect(r.svc.Spec.Ports[0].Port).NotTo(Equal(int32(8888))) + g.Expect(r.cm.Data["config-file"]).To(ContainSubstring("keepalive-ttl")) + g.Expect(*r.set.Spec.Replicas).To(Equal(int32(3))) + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + } + + for _, tt := range tests { + testFn(tt, t) + } +} + +func TestWorkerMemberManagerWorkerStatefulSetIsUpgrading(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + setUpdate func(*appsv1.StatefulSet) + hasPod bool + updatePod func(*corev1.Pod) + errExpectFn func(*GomegaWithT, error) + expectUpgrading bool + } + testFn := func(test *testcase, t *testing.T) { + mmm, _, indexers, _ := newFakeWorkerMemberManager() + dc := newDMClusterForWorker() + dc.Status.Worker.StatefulSet = &appsv1.StatefulSetStatus{ + UpdateRevision: "v3", + } + + set := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: metav1.NamespaceDefault, + }, + } + if test.setUpdate != nil { + test.setUpdate(set) + } + + if test.hasPod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ordinalPodName(v1alpha1.DMWorkerMemberType, dc.GetName(), 0), + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{}, + Labels: label.NewDM().Instance(dc.GetInstanceName()).DMWorker().Labels(), + }, + } + if test.updatePod != nil { + test.updatePod(pod) + } + indexers.pod.Add(pod) + } + b, err := mmm.workerStatefulSetIsUpgrading(set, dc) + if test.errExpectFn != nil { + test.errExpectFn(g, err) + } + if test.expectUpgrading { + g.Expect(b).To(BeTrue()) + } else { + g.Expect(b).NotTo(BeTrue()) + } + } + tests := []testcase{ + { + name: "stateful set is upgrading", + setUpdate: func(set *appsv1.StatefulSet) { + set.Status.CurrentRevision = "v1" + set.Status.UpdateRevision = "v2" + set.Status.ObservedGeneration = 1000 + }, + hasPod: false, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod don't have revision hash", + setUpdate: nil, + hasPod: true, + updatePod: nil, + errExpectFn: nil, + expectUpgrading: false, + }, + { + name: "pod have revision hash, not equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[appsv1.ControllerRevisionHashLabelKey] = "v2" + }, + errExpectFn: nil, + expectUpgrading: true, + }, + { + name: "pod have revision hash, equal statefulset's", + setUpdate: nil, + hasPod: true, + updatePod: func(pod *corev1.Pod) { + pod.Labels[appsv1.ControllerRevisionHashLabelKey] = "v3" + }, + errExpectFn: nil, + expectUpgrading: false, + }, + } + + for i := range tests { + t.Logf(tests[i].name) + testFn(&tests[i], t) + } +} + +func TestWorkerMemberManagerUpgrade(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + modify func(cluster *v1alpha1.DMCluster) + workerInfos []*dmapi.WorkersInfo + err bool + statusChange func(*appsv1.StatefulSet) + expectStatefulSetFn func(*GomegaWithT, *appsv1.StatefulSet, error) + expectDMClusterFn func(*GomegaWithT, *v1alpha1.DMCluster) + } + + testFn := func(test *testcase, t *testing.T) { + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + + wmm, ctls, _, fakeMasterControl := newFakeWorkerMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.workerInfos, nil + }) + + ctls.set.SetStatusChange(test.statusChange) + + err := wmm.SyncDM(dc) + g.Expect(err).To(Succeed()) + + _, err = wmm.svcLister.Services(ns).Get(controller.DMWorkerPeerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + _, err = wmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + g.Expect(err).NotTo(HaveOccurred()) + + dc1 := dc.DeepCopy() + test.modify(dc1) + + err = wmm.SyncDM(dc1) + if test.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + + if test.expectStatefulSetFn != nil { + set, err := wmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + test.expectStatefulSetFn(g, set, err) + } + if test.expectDMClusterFn != nil { + test.expectDMClusterFn(g, dc1) + } + } + tests := []testcase{ + { + name: "upgrade successful", + modify: func(cluster *v1alpha1.DMCluster) { + cluster.Spec.Worker.BaseImage = "dm-test-image-2" + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateBound, Source: "mysql1"}, + }, + err: false, + statusChange: func(set *appsv1.StatefulSet) { + set.Status.Replicas = *set.Spec.Replicas + set.Status.CurrentRevision = "dm-worker-1" + set.Status.UpdateRevision = "dm-worker-1" + observedGeneration := int64(1) + set.Status.ObservedGeneration = observedGeneration + }, + expectStatefulSetFn: func(g *GomegaWithT, set *appsv1.StatefulSet, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(set.Spec.Template.Spec.Containers[0].Image).To(Equal("dm-test-image-2:v2.0.0-rc.2")) + }, + expectDMClusterFn: func(g *GomegaWithT, dc *v1alpha1.DMCluster) { + g.Expect(len(dc.Status.Worker.Members)).To(Equal(3)) + g.Expect(dc.Status.Worker.Members["worker1"].Stage).To(Equal(v1alpha1.DMWorkerStateFree)) + g.Expect(dc.Status.Worker.Members["worker2"].Stage).To(Equal(v1alpha1.DMWorkerStateFree)) + g.Expect(dc.Status.Worker.Members["worker3"].Stage).To(Equal(v1alpha1.DMWorkerStateBound)) + }, + }, + } + for i := range tests { + t.Logf("begin: %s", tests[i].name) + testFn(&tests[i], t) + t.Logf("end: %s", tests[i].name) + } +} + +func TestWorkerSyncConfigUpdate(t *testing.T) { + g := NewGomegaWithT(t) + + type result struct { + sync error + oldSet *appsv1.StatefulSet + set *appsv1.StatefulSet + getSet error + oldCm *corev1.ConfigMap + cms []corev1.ConfigMap + listCm error + } + type testcase struct { + name string + prepare func(*v1alpha1.DMCluster, *workerFakeIndexers) + expectFn func(*GomegaWithT, *result) + workerInfos []*dmapi.WorkersInfo + } + + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + dc := newDMClusterForWorker() + ns := dc.Namespace + dcName := dc.Name + + mmm, controls, indexers, fakeMasterControl := newFakeWorkerMemberManager() + masterClient := controller.NewFakeMasterClient(fakeMasterControl, dc) + masterClient.AddReaction(dmapi.GetWorkersActionType, func(action *dmapi.Action) (interface{}, error) { + return test.workerInfos, nil + }) + + oldCm, err := getWorkerConfigMap(dc) + g.Expect(err).To(Succeed()) + oldSvc := getNewWorkerHeadlessServiceForDMCluster(dc) + oldSvc.Spec.Ports[0].Port = 8888 + oldSet, err := getNewWorkerSetForDMCluster(dc, oldCm) + g.Expect(err).To(Succeed()) + + g.Expect(indexers.set.Add(oldSet)).To(Succeed()) + g.Expect(indexers.svc.Add(oldSvc)).To(Succeed()) + g.Expect(controls.generic.AddObject(oldCm)).To(Succeed()) + + if test.prepare != nil { + test.prepare(dc, indexers) + } + + syncErr := mmm.SyncDM(dc) + set, getStsErr := mmm.setLister.StatefulSets(ns).Get(controller.DMWorkerMemberName(dcName)) + cmList := &corev1.ConfigMapList{} + g.Expect(err).To(Succeed()) + listCmErr := controls.generic.FakeCli.List(context.TODO(), cmList) + result := result{syncErr, oldSet, set, getStsErr, oldCm, cmList.Items, listCmErr} + test.expectFn(g, &result) + } + + tests := []*testcase{ + { + name: "basic", + prepare: func(tc *v1alpha1.DMCluster, _ *workerFakeIndexers) { + tc.Spec.Worker.Config = &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + } + }, + expectFn: func(g *GomegaWithT, r *result) { + g.Expect(r.sync).To(Succeed()) + g.Expect(r.listCm).To(Succeed()) + g.Expect(r.cms).To(HaveLen(2)) + g.Expect(r.getSet).To(Succeed()) + using := FindConfigMapVolume(&r.set.Spec.Template.Spec, func(name string) bool { + return strings.HasPrefix(name, controller.DMWorkerMemberName("test")) + }) + g.Expect(using).NotTo(BeEmpty()) + var usingCm *corev1.ConfigMap + for _, cm := range r.cms { + if cm.Name == using { + usingCm = &cm + } + } + g.Expect(usingCm).NotTo(BeNil(), "The configmap used by statefulset must be created") + g.Expect(usingCm.Data["config-file"]).To(ContainSubstring("keepalive-ttl"), + "The configmap used by statefulset should be the latest one") + }, + workerInfos: []*dmapi.WorkersInfo{ + {Name: "worker1", Addr: "http://worker1:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker2", Addr: "http://worker2:8262", Stage: v1alpha1.DMWorkerStateFree}, + {Name: "worker3", Addr: "http://worker3:8262", Stage: v1alpha1.DMWorkerStateFree}, + }, + }, + } + + for _, tt := range tests { + testFn(tt, t) + } +} + +type workerFakeIndexers struct { + svc cache.Indexer + set cache.Indexer + pod cache.Indexer +} + +type workerFakeControls struct { + svc *controller.FakeServiceControl + set *controller.FakeStatefulSetControl + generic *controller.FakeGenericControl +} + +func newFakeWorkerMemberManager() (*workerMemberManager, *workerFakeControls, *workerFakeIndexers, *dmapi.FakeMasterControl) { + // cli := fake.NewSimpleClientset() + kubeCli := kubefake.NewSimpleClientset() + setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() + // dcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().DMClusters() + svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() + epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() + podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) + genericControl := controller.NewFakeGenericControl() + masterControl := dmapi.NewFakeMasterControl(kubeCli) + workerScaler := NewFakeWorkerScaler() + autoFailover := true + workerFailover := NewFakeWorkerFailover() + pmm := &workerMemberManager{ + masterControl, + setControl, + svcControl, + controller.NewTypedControl(genericControl), + setInformer.Lister(), + svcInformer.Lister(), + podInformer.Lister(), + workerScaler, + autoFailover, + workerFailover, + } + controls := &workerFakeControls{ + svc: svcControl, + set: setControl, + generic: genericControl, + } + indexers := &workerFakeIndexers{ + svc: svcInformer.Informer().GetIndexer(), + set: setInformer.Informer().GetIndexer(), + pod: podInformer.Informer().GetIndexer(), + } + + return pmm, controls, indexers, masterControl +} + +func newDMClusterForWorker() *v1alpha1.DMCluster { + return &v1alpha1.DMCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "DMCluster", + APIVersion: "pingcap.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: v1alpha1.DMClusterSpec{ + Version: "v2.0.0-rc.2", + Master: v1alpha1.MasterSpec{ + BaseImage: "dm-test-image", + Replicas: 1, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "dm-test-image", + Replicas: 3, + Config: &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("debug"), + KeepAliveTTL: pointer.Int64Ptr(15), + }, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + }, + StorageClassName: pointer.StringPtr("my-storage-class"), + }, + }, + Status: v1alpha1.DMClusterStatus{ + Master: v1alpha1.MasterStatus{ + Synced: true, + Members: map[string]v1alpha1.MasterMember{"test-dm-master-0": { + Name: "test-dm-master-0", + Health: true, + }}, + StatefulSet: &appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + }, + } +} + +func TestGetNewWorkerHeadlessService(t *testing.T) { + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.Service + }{ + { + name: "basic", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + expected: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-worker-peer", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Ports: []corev1.ServicePort{ + { + Name: "dm-worker", + Port: 8262, + TargetPort: intstr.FromInt(8262), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + PublishNotReadyAddresses: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := getNewWorkerHeadlessServiceForDMCluster(&tt.dc) + if diff := cmp.Diff(tt.expected, *svc); diff != "" { + t.Errorf("unexpected Service (-want, +got): %s", diff) + } + }) + } +} + +func TestGetNewWorkerSetForDMCluster(t *testing.T) { + enable := true + tests := []struct { + name string + dc v1alpha1.DMCluster + wantErr bool + nilCM bool + testSts func(sts *appsv1.StatefulSet) + }{ + { + name: "dm-worker config map is nil", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + wantErr: true, + nilCM: true, + testSts: nil, + }, + { + name: "dm-worker network is not host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-worker network is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + }, + }, + testSts: testHostNetwork(t, true, v1.DNSClusterFirstWithHostNet), + }, + { + name: "dm-worker network is not host when dm-master is host", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, + }, + }, + Worker: &v1alpha1.WorkerSpec{}, + }, + }, + testSts: testHostNetwork(t, false, ""), + }, + { + name: "dm-worker should respect resources config", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + }, + StorageSize: "100Gi", + }, + Master: v1alpha1.MasterSpec{}, + }, + }, + testSts: func(sts *appsv1.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(sts.Spec.VolumeClaimTemplates[0].Spec.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("100Gi"), + }, + })) + nameToContainer := MapContainers(&sts.Spec.Template.Spec) + masterContainer := nameToContainer[v1alpha1.DMWorkerMemberType.String()] + g.Expect(masterContainer.Resources).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + }, + })) + }, + }, + { + name: "set custom env", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + Env: []corev1.EnvVar{ + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + { + Name: "TZ", + Value: "ignored", + }, + }, + }, + }, + Master: v1alpha1.MasterSpec{}, + }, + }, + testSts: testContainerEnv(t, []corev1.EnvVar{ + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "CLUSTER_NAME", + Value: "dc", + }, + { + Name: "HEADLESS_SERVICE_NAME", + Value: "dc-dm-worker-peer", + }, + { + Name: "SET_NAME", + Value: "dc-dm-worker", + }, + { + Name: "TZ", + Value: "UTC", + }, + { + Name: "SOURCE1", + Value: "mysql_replica1", + }, + }, + v1alpha1.DMWorkerMemberType, + ), + }, + { + name: "dm version nightly, dm cluster tls is enabled", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tls-nightly", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + }, + Version: "nightly", + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, + }, + }, + testSts: func(sts *appsv1.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(hasClusterTLSVol(sts, "dm-worker-tls")).To(BeTrue()) + g.Expect(hasClusterVolMount(sts, v1alpha1.DMWorkerMemberType)).To(BeTrue()) + }, + }, + { + name: "dmcluster worker with failureMember", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + BaseImage: "pingcap/dm", + Replicas: 3, + }, + Version: "nightly", + }, + Status: v1alpha1.DMClusterStatus{ + Worker: v1alpha1.WorkerStatus{ + FailureMembers: map[string]v1alpha1.WorkerFailureMember{ + "test": { + PodName: "test", + }, + }, + }, + }, + }, + testSts: func(sts *appsv1.StatefulSet) { + g := NewGomegaWithT(t) + g.Expect(*sts.Spec.Replicas).To(Equal(int32(4))) + }, + }, + { + name: "dm-worker additional containers", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalContainers: []corev1.Container{customSideCarContainers[0]}, + }, + }, + }, + }, + testSts: testAdditionalContainers(t, []corev1.Container{customSideCarContainers[0]}), + }, + { + name: "dm-worker additional volumes", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Master: v1alpha1.MasterSpec{}, + Worker: &v1alpha1.WorkerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ + AdditionalVolumes: []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + }, + }, + }, + }, + testSts: testAdditionalVolumes(t, []corev1.Volume{{Name: "test", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}), + }, + // TODO add more tests + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var cm *corev1.ConfigMap + if !tt.nilCM { + cm = &corev1.ConfigMap{} + } + sts, err := getNewWorkerSetForDMCluster(&tt.dc, cm) + if (err != nil) != tt.wantErr { + t.Fatalf("error %v, wantErr %v", err, tt.wantErr) + } + if tt.testSts != nil { + tt.testSts(sts) + } + }) + } +} + +func TestGetNewWorkerConfigMap(t *testing.T) { + g := NewGomegaWithT(t) + + tests := []struct { + name string + dc v1alpha1.DMCluster + expected corev1.ConfigMap + }{ + { + name: "empty config", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + Config: nil, + }, + }, + }, + expected: corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-worker", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "config-file": "", + "startup-script": "", + }, + }, + }, + { + name: "rolling update", + dc: v1alpha1.DMCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns", + }, + Spec: v1alpha1.DMClusterSpec{ + Worker: &v1alpha1.WorkerSpec{ + Config: &v1alpha1.WorkerConfig{ + LogLevel: pointer.StringPtr("info"), + KeepAliveTTL: pointer.Int64Ptr(25), + }, + }, + }, + }, + expected: corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-dm-worker", + Namespace: "ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "dm-cluster", + "app.kubernetes.io/managed-by": "tidb-operator", + "app.kubernetes.io/instance": "foo", + "app.kubernetes.io/component": "dm-worker", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "pingcap.com/v1alpha1", + Kind: "DMCluster", + Name: "foo", + UID: "", + Controller: func(b bool) *bool { + return &b + }(true), + BlockOwnerDeletion: func(b bool) *bool { + return &b + }(true), + }, + }, + }, + Data: map[string]string{ + "config-file": `log-level = "info" +keepalive-ttl = 25 +`, + "startup-script": "", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm, err := getWorkerConfigMap(&tt.dc) + g.Expect(err).To(Succeed()) + g.Expect(strings.HasPrefix(cm.Name, "foo-dm-worker")).To(BeTrue()) + tt.expected.Name = cm.Name + // startup-script is better to be validated in e2e test + cm.Data["startup-script"] = "" + if diff := cmp.Diff(tt.expected, *cm); diff != "" { + t.Errorf("unexpected ConfigMap (-want, +got): %s", diff) + } + }) + } +} diff --git a/pkg/manager/member/dm_worker_scaler.go b/pkg/manager/member/dm_worker_scaler.go index 3fa3dda3c2..91d9961b14 100644 --- a/pkg/manager/member/dm_worker_scaler.go +++ b/pkg/manager/member/dm_worker_scaler.go @@ -132,3 +132,33 @@ func (wsd *workerScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, n func (wsd *workerScaler) SyncAutoScalerAnn(meta metav1.Object, oldSet *apps.StatefulSet) error { return nil } + +type fakeWorkerScaler struct{} + +// NewFakeWorkerScaler returns a fake Scaler +func NewFakeWorkerScaler() Scaler { + return &fakeWorkerScaler{} +} + +func (fws *fakeWorkerScaler) Scale(meta metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + if *newSet.Spec.Replicas > *oldSet.Spec.Replicas { + return fws.ScaleOut(meta, oldSet, newSet) + } else if *newSet.Spec.Replicas < *oldSet.Spec.Replicas { + return fws.ScaleIn(meta, oldSet, newSet) + } + return nil +} + +func (fws *fakeWorkerScaler) ScaleOut(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas+1, nil) + return nil +} + +func (fws *fakeWorkerScaler) ScaleIn(_ metav1.Object, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error { + setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil) + return nil +} + +func (fws *fakeWorkerScaler) SyncAutoScalerAnn(dc metav1.Object, actual *apps.StatefulSet) error { + return nil +} diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index f459aedde6..11334b747f 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -23,8 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" - informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" @@ -740,16 +738,14 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { } func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *pdapi.FakePDControl, cache.Indexer, cache.Indexer, *controller.FakePodControl) { - cli := fake.NewSimpleClientset() kubeCli := kubefake.NewSimpleClientset() setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims() - tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) podControl := controller.NewFakePodControl(podInformer) pdControl := pdapi.NewFakePDControl(kubeCli) pdScaler := NewFakePDScaler() @@ -917,11 +913,11 @@ func testAnnotations(t *testing.T, annotations map[string]string) func(sts *apps } } -func testPDContainerEnv(t *testing.T, env []corev1.EnvVar) func(sts *apps.StatefulSet) { +func testContainerEnv(t *testing.T, env []corev1.EnvVar, memberType v1alpha1.MemberType) func(sts *apps.StatefulSet) { return func(sts *apps.StatefulSet) { got := []corev1.EnvVar{} for _, c := range sts.Spec.Template.Spec.Containers { - if c.Name == v1alpha1.PDMemberType.String() { + if c.Name == memberType.String() { got = c.Env } } @@ -1113,7 +1109,7 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { TiDB: &v1alpha1.TiDBSpec{}, }, }, - testSts: testPDContainerEnv(t, []corev1.EnvVar{ + testSts: testContainerEnv(t, []corev1.EnvVar{ { Name: "NAMESPACE", ValueFrom: &corev1.EnvVarSource{ @@ -1148,7 +1144,9 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { }, }, }, - }), + }, + v1alpha1.PDMemberType, + ), }, { name: "tidb version v3.1.0, tidb client tls is enabled", diff --git a/pkg/manager/member/pump_member_manager_test.go b/pkg/manager/member/pump_member_manager_test.go index a14064b7ca..c7514091f4 100644 --- a/pkg/manager/member/pump_member_manager_test.go +++ b/pkg/manager/member/pump_member_manager_test.go @@ -444,8 +444,8 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) cmControl := controller.NewFakeConfigMapControl(cmInformer) genericControl := controller.NewFakeGenericControl() pmm := &pumpMemberManager{ diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index 39a3e7d2d0..0194810ea2 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -802,8 +802,8 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets() cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) genericControl := controller.NewFakeGenericControl() tidbUpgrader := NewFakeTiDBUpgrader() tidbFailover := NewFakeTiDBFailover() diff --git a/pkg/manager/member/tiflash_member_manager_test.go b/pkg/manager/member/tiflash_member_manager_test.go index a784bc1fe7..802ca51e02 100644 --- a/pkg/manager/member/tiflash_member_manager_test.go +++ b/pkg/manager/member/tiflash_member_manager_test.go @@ -23,8 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" - informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" @@ -1127,16 +1125,14 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { func newFakeTiFlashMemberManager(tc *v1alpha1.TidbCluster) ( *tiflashMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *pdapi.FakePDClient, cache.Indexer, cache.Indexer) { - cli := fake.NewSimpleClientset() kubeCli := kubefake.NewSimpleClientset() pdControl := pdapi.NewFakePDControl(kubeCli) pdClient := controller.NewFakePDClient(pdControl, tc) setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() - tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() nodeInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Nodes() tiflashScaler := NewFakeTiFlashScaler() diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index 5fc44da47b..d7a69af3d3 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -23,8 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" - informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/pdapi" @@ -1481,16 +1479,14 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { func newFakeTiKVMemberManager(tc *v1alpha1.TidbCluster) ( *tikvMemberManager, *controller.FakeStatefulSetControl, *controller.FakeServiceControl, *pdapi.FakePDClient, cache.Indexer, cache.Indexer) { - cli := fake.NewSimpleClientset() kubeCli := kubefake.NewSimpleClientset() pdControl := pdapi.NewFakePDControl(kubeCli) pdClient := controller.NewFakePDClient(pdControl, tc) setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() - tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() - setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) + setControl := controller.NewFakeStatefulSetControl(setInformer) + svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer) podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() nodeInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Nodes() tikvScaler := NewFakeTiKVScaler() diff --git a/pkg/manager/meta/reclaim_policy_manager.go b/pkg/manager/meta/reclaim_policy_manager.go index e95b31427e..c5e8c87a1c 100644 --- a/pkg/manager/meta/reclaim_policy_manager.go +++ b/pkg/manager/meta/reclaim_policy_manager.go @@ -150,3 +150,7 @@ func (frpm *FakeReclaimPolicyManager) SetSyncError(err error) { func (frpm *FakeReclaimPolicyManager) Sync(_ *v1alpha1.TidbCluster) error { return frpm.err } + +func (frpm *FakeReclaimPolicyManager) SyncDM(_ *v1alpha1.DMCluster) error { + return frpm.err +}