Skip to content

Commit

Permalink
Fix TidbMonitor Service Label (#2051) (#2065)
Browse files Browse the repository at this point in the history
* fix label selector and deployment util

* Revert "fix label selector and deployment util"

This reverts commit 727988d.

* fix labels

* fix label

* fmt lint

* fix pv label

* fix monitor e2e

* add pvc label manually

* revert pv change

* revise monitor label

* fix selector

* fix e2e

Co-authored-by: Song Gao <[email protected]>
  • Loading branch information
sre-bot and Song Gao authored Mar 30, 2020
1 parent f6937cb commit f4fb19d
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 51 deletions.
4 changes: 2 additions & 2 deletions pkg/label/label.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,10 +191,10 @@ func NewBackupSchedule() Label {
}
}

// NewMonitor initialize a new label for monitor of tidb-monitor
func NewMonitor() Label {
return Label{
NameLabelKey: TiDBMonitorVal,
// NameLabelKey is used to be compatible with helm monitor
NameLabelKey: "tidb-cluster",
ManagedByLabelKey: TiDBOperator,
}
}
Expand Down
70 changes: 33 additions & 37 deletions pkg/monitor/monitor/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ func GetMonitorObjectName(monitor *v1alpha1.TidbMonitor) string {
return fmt.Sprintf("%s-monitor", monitor.Name)
}

func buildTidbMonitorLabel(name string) map[string]string {
return label.NewMonitor().Instance(name).Monitor().Labels()
}

// getMonitorConfigMap generate the Prometheus config and Grafana config for TidbMonitor,
// If the namespace in ClusterRef is empty, we would set the TidbMonitor's namespace in the default
func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor) (*core.ConfigMap, error) {
Expand Down Expand Up @@ -70,12 +74,11 @@ func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor
return nil, err
}

monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
cm := &core.ConfigMap{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: map[string]string{
Expand All @@ -89,12 +92,11 @@ func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor
}

func getMonitorSecret(monitor *v1alpha1.TidbMonitor) *core.Secret {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &core.Secret{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: map[string][]byte{
Expand All @@ -105,25 +107,23 @@ func getMonitorSecret(monitor *v1alpha1.TidbMonitor) *core.Secret {
}

func getMonitorServiceAccount(monitor *v1alpha1.TidbMonitor) *core.ServiceAccount {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
sa := &core.ServiceAccount{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
}
return sa
}

func getMonitorClusterRole(monitor *v1alpha1.TidbMonitor) *rbac.ClusterRole {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.ClusterRole{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Rules: []rbac.PolicyRule{
Expand All @@ -141,12 +141,11 @@ func getMonitorClusterRole(monitor *v1alpha1.TidbMonitor) *rbac.ClusterRole {
}

func getMonitorRole(monitor *v1alpha1.TidbMonitor) *rbac.Role {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.Role{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Rules: []rbac.PolicyRule{
Expand All @@ -160,12 +159,11 @@ func getMonitorRole(monitor *v1alpha1.TidbMonitor) *rbac.Role {
}

func getMonitorClusterRoleBinding(sa *core.ServiceAccount, cr *rbac.ClusterRole, monitor *v1alpha1.TidbMonitor) *rbac.ClusterRoleBinding {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.ClusterRoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Subjects: []rbac.Subject{
Expand All @@ -185,12 +183,11 @@ func getMonitorClusterRoleBinding(sa *core.ServiceAccount, cr *rbac.ClusterRole,
}

func getMonitorRoleBinding(sa *core.ServiceAccount, role *rbac.Role, monitor *v1alpha1.TidbMonitor) *rbac.RoleBinding {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.RoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Subjects: []rbac.Subject{
Expand Down Expand Up @@ -234,15 +231,13 @@ func getMonitorDeployment(sa *core.ServiceAccount, config *core.ConfigMap, secre
}

func getMonitorDeploymentSkeleton(sa *core.ServiceAccount, monitor *v1alpha1.TidbMonitor) *apps.Deployment {
monitorLabel := label.New().Instance(monitor.Name).Monitor()
replicas := int32(1)
labels := label.NewMonitor().Instance(monitor.Name).Monitor()

deployment := &apps.Deployment{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Annotations,
},
Expand All @@ -252,11 +247,11 @@ func getMonitorDeploymentSkeleton(sa *core.ServiceAccount, monitor *v1alpha1.Tid
Type: apps.RecreateDeploymentStrategyType,
},
Selector: &meta.LabelSelector{
MatchLabels: labels,
MatchLabels: buildTidbMonitorLabel(monitor.Name),
},
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: labels,
Labels: buildTidbMonitorLabel(monitor.Name),
},

Spec: core.PodSpec{
Expand Down Expand Up @@ -653,13 +648,20 @@ func getMonitorVolumes(config *core.ConfigMap, monitor *v1alpha1.TidbMonitor, tc

func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
var services []*core.Service
monitorLabel := label.New().Instance(monitor.Name).Monitor()
labels := label.NewMonitor().Instance(monitor.Name).Monitor()

reloaderPortName := "tcp-reloader"
prometheusPortName := "http-prometheus"
grafanaPortName := "http-grafana"

// currently monitor label haven't managedBy label due to 1.0 historical problem.
// In order to be compatible with 1.0 release monitor, we have removed managedBy label for now.
// We would add managedBy label key during released 1.2 version
selector := map[string]string{
label.InstanceLabelKey: monitor.Name,
label.NameLabelKey: "tidb-cluster",
label.ComponentLabelKey: label.TiDBMonitorVal,
}

if monitor.BaseReloaderSpec().PortName() != nil {
reloaderPortName = *monitor.BaseReloaderSpec().PortName()
}
Expand All @@ -675,7 +677,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
ObjectMeta: meta.ObjectMeta{
Name: promethuesName,
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Prometheus.Service.Annotations,
},
Expand All @@ -689,7 +691,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
},
},
Type: monitor.Spec.Prometheus.Service.Type,
Selector: labels,
Selector: selector,
},
}
if monitor.BasePrometheusSpec().ServiceType() == core.ServiceTypeLoadBalancer {
Expand All @@ -702,7 +704,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
ObjectMeta: meta.ObjectMeta{
Name: fmt.Sprintf("%s-monitor-reloader", monitor.Name),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Prometheus.Service.Annotations,
},
Expand All @@ -715,11 +717,8 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
TargetPort: intstr.FromInt(9089),
},
},
Type: monitor.Spec.Reloader.Service.Type,
Selector: map[string]string{
label.InstanceLabelKey: monitor.Name,
label.ComponentLabelKey: label.TiDBMonitorVal,
},
Type: monitor.Spec.Reloader.Service.Type,
Selector: selector,
},
}

Expand All @@ -735,7 +734,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
ObjectMeta: meta.ObjectMeta{
Name: fmt.Sprintf("%s-grafana", monitor.Name),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Grafana.Service.Annotations,
},
Expand All @@ -748,11 +747,8 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
TargetPort: intstr.FromInt(3000),
},
},
Type: monitor.Spec.Grafana.Service.Type,
Selector: map[string]string{
label.InstanceLabelKey: monitor.Name,
label.ComponentLabelKey: label.TiDBMonitorVal,
},
Type: monitor.Spec.Grafana.Service.Type,
Selector: selector,
},
}

Expand All @@ -768,12 +764,12 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
}

func getMonitorPVC(monitor *v1alpha1.TidbMonitor) *core.PersistentVolumeClaim {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
l := buildTidbMonitorLabel(monitor.Name)
return &core.PersistentVolumeClaim{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: l,
Annotations: monitor.Spec.Annotations,
},

Expand Down
34 changes: 22 additions & 12 deletions tests/e2e/tidbcluster/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -756,18 +756,28 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
pvName := pvc.Spec.VolumeName
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
framework.ExpectNoError(err, "Expected fetch tidbmonitor pv success")
value, existed := pv.Labels[label.ComponentLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, label.TiDBMonitorVal)
value, existed = pv.Labels[label.InstanceLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, "e2e-monitor")
value, existed = pv.Labels[label.InstanceLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, "e2e-monitor")
value, existed = pv.Labels[label.ManagedByLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, label.TiDBOperator)

err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
value, existed := pv.Labels[label.ComponentLabelKey]
if !existed || value != label.TiDBMonitorVal {
return false, nil
}
value, existed = pv.Labels[label.InstanceLabelKey]
if !existed || value != "e2e-monitor" {
return false, nil
}

value, existed = pv.Labels[label.NameLabelKey]
if !existed || value != "tidb-cluster" {
return false, nil
}
value, existed = pv.Labels[label.ManagedByLabelKey]
if !existed || value != label.TiDBOperator {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "monitor pv label error")

// update TidbMonitor and check whether portName is updated and the nodePort is unchanged
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
Expand Down

0 comments on commit f4fb19d

Please sign in to comment.