Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix TidbMonitor Service Label #2051

Merged
merged 15 commits into from
Mar 30, 2020
4 changes: 3 additions & 1 deletion pkg/controller/pv_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,9 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste
pv.Labels[label.NamespaceLabelKey] = ns
pv.Labels[label.ComponentLabelKey] = component
pv.Labels[label.NameLabelKey] = pvc.Labels[label.NameLabelKey]
pv.Labels[label.ManagedByLabelKey] = pvc.Labels[label.ManagedByLabelKey]
// TidbMonitor's pod label don't have managedBy label in order to be compatible with 1.0 release monitor
// so we add manager-key label with `tidb-operator` values manually.
pv.Labels[label.ManagedByLabelKey] = "tidb-operator"
Yisaer marked this conversation as resolved.
Show resolved Hide resolved
pv.Labels[label.InstanceLabelKey] = pvc.Labels[label.InstanceLabelKey]

setIfNotEmpty(pv.Labels, label.ClusterIDLabelKey, clusterID)
Expand Down
7 changes: 4 additions & 3 deletions pkg/label/label.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,11 +191,12 @@ func NewBackupSchedule() Label {
}
}

// NewMonitor initialize a new label for monitor of tidb-monitor
// NewMonitor initialize a new label for monitor of tidbmonitor
// Because there is no managed-by label in 1.0 tidbcluster release for monitor deployment label selector, which is a historical bug
// we have to remove the managed-by label in NewMonitor to be compatible with.
func NewMonitor() Label {
return Label{
NameLabelKey: TiDBMonitorVal,
ManagedByLabelKey: TiDBOperator,
NameLabelKey: "tidb-cluster",
}
}

Expand Down
60 changes: 23 additions & 37 deletions pkg/monitor/monitor/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ func GetMonitorObjectName(monitor *v1alpha1.TidbMonitor) string {
return fmt.Sprintf("%s-monitor", monitor.Name)
}

func buildTidbMonitorLabel(name string) map[string]string {
return label.NewMonitor().Instance(name).Monitor().Labels()
}

// getMonitorConfigMap generate the Prometheus config and Grafana config for TidbMonitor,
// If the namespace in ClusterRef is empty, we would set the TidbMonitor's namespace in the default
func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor) (*core.ConfigMap, error) {
Expand Down Expand Up @@ -70,12 +74,11 @@ func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor
return nil, err
}

monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
cm := &core.ConfigMap{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: map[string]string{
Expand All @@ -89,12 +92,11 @@ func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor
}

func getMonitorSecret(monitor *v1alpha1.TidbMonitor) *core.Secret {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &core.Secret{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: map[string][]byte{
Expand All @@ -105,25 +107,23 @@ func getMonitorSecret(monitor *v1alpha1.TidbMonitor) *core.Secret {
}

func getMonitorServiceAccount(monitor *v1alpha1.TidbMonitor) *core.ServiceAccount {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
sa := &core.ServiceAccount{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
}
return sa
}

func getMonitorClusterRole(monitor *v1alpha1.TidbMonitor) *rbac.ClusterRole {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.ClusterRole{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Rules: []rbac.PolicyRule{
Expand All @@ -141,12 +141,11 @@ func getMonitorClusterRole(monitor *v1alpha1.TidbMonitor) *rbac.ClusterRole {
}

func getMonitorRole(monitor *v1alpha1.TidbMonitor) *rbac.Role {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.Role{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Rules: []rbac.PolicyRule{
Expand All @@ -160,12 +159,11 @@ func getMonitorRole(monitor *v1alpha1.TidbMonitor) *rbac.Role {
}

func getMonitorClusterRoleBinding(sa *core.ServiceAccount, cr *rbac.ClusterRole, monitor *v1alpha1.TidbMonitor) *rbac.ClusterRoleBinding {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.ClusterRoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Subjects: []rbac.Subject{
Expand All @@ -185,12 +183,11 @@ func getMonitorClusterRoleBinding(sa *core.ServiceAccount, cr *rbac.ClusterRole,
}

func getMonitorRoleBinding(sa *core.ServiceAccount, role *rbac.Role, monitor *v1alpha1.TidbMonitor) *rbac.RoleBinding {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.RoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Subjects: []rbac.Subject{
Expand Down Expand Up @@ -234,15 +231,13 @@ func getMonitorDeployment(sa *core.ServiceAccount, config *core.ConfigMap, secre
}

func getMonitorDeploymentSkeleton(sa *core.ServiceAccount, monitor *v1alpha1.TidbMonitor) *apps.Deployment {
monitorLabel := label.New().Instance(monitor.Name).Monitor()
replicas := int32(1)
labels := label.NewMonitor().Instance(monitor.Name).Monitor()

deployment := &apps.Deployment{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Annotations,
},
Expand All @@ -252,11 +247,11 @@ func getMonitorDeploymentSkeleton(sa *core.ServiceAccount, monitor *v1alpha1.Tid
Type: apps.RecreateDeploymentStrategyType,
},
Selector: &meta.LabelSelector{
MatchLabels: labels,
MatchLabels: buildTidbMonitorLabel(monitor.Name),
},
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: labels,
Labels: buildTidbMonitorLabel(monitor.Name),
},

Spec: core.PodSpec{
Expand Down Expand Up @@ -653,8 +648,6 @@ func getMonitorVolumes(config *core.ConfigMap, monitor *v1alpha1.TidbMonitor, tc

func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
var services []*core.Service
monitorLabel := label.New().Instance(monitor.Name).Monitor()
labels := label.NewMonitor().Instance(monitor.Name).Monitor()

reloaderPortName := "tcp-reloader"
prometheusPortName := "http-prometheus"
Expand All @@ -675,7 +668,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
ObjectMeta: meta.ObjectMeta{
Name: promethuesName,
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Prometheus.Service.Annotations,
},
Expand All @@ -689,7 +682,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
},
},
Type: monitor.Spec.Prometheus.Service.Type,
Selector: labels,
Selector: buildTidbMonitorLabel(monitor.Name),
},
}
if monitor.BasePrometheusSpec().ServiceType() == core.ServiceTypeLoadBalancer {
Expand All @@ -702,7 +695,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
ObjectMeta: meta.ObjectMeta{
Name: fmt.Sprintf("%s-monitor-reloader", monitor.Name),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Prometheus.Service.Annotations,
},
Expand All @@ -715,11 +708,8 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
TargetPort: intstr.FromInt(9089),
},
},
Type: monitor.Spec.Reloader.Service.Type,
Selector: map[string]string{
label.InstanceLabelKey: monitor.Name,
label.ComponentLabelKey: label.TiDBMonitorVal,
},
Type: monitor.Spec.Reloader.Service.Type,
Selector: buildTidbMonitorLabel(monitor.Name),
},
}

Expand All @@ -735,7 +725,7 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
ObjectMeta: meta.ObjectMeta{
Name: fmt.Sprintf("%s-grafana", monitor.Name),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Grafana.Service.Annotations,
},
Expand All @@ -748,11 +738,8 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
TargetPort: intstr.FromInt(3000),
},
},
Type: monitor.Spec.Grafana.Service.Type,
Selector: map[string]string{
label.InstanceLabelKey: monitor.Name,
label.ComponentLabelKey: label.TiDBMonitorVal,
},
Type: monitor.Spec.Grafana.Service.Type,
Selector: buildTidbMonitorLabel(monitor.Name),
},
}

Expand All @@ -768,12 +755,11 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
}

func getMonitorPVC(monitor *v1alpha1.TidbMonitor) *core.PersistentVolumeClaim {
monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &core.PersistentVolumeClaim{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
Labels: monitorLabel,
Labels: buildTidbMonitorLabel(monitor.Name),
Annotations: monitor.Spec.Annotations,
},

Expand Down
34 changes: 22 additions & 12 deletions tests/e2e/tidbcluster/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -753,18 +753,28 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
pvName := pvc.Spec.VolumeName
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
framework.ExpectNoError(err, "Expected fetch tidbmonitor pv success")
value, existed := pv.Labels[label.ComponentLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, label.TiDBMonitorVal)
value, existed = pv.Labels[label.InstanceLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, "e2e-monitor")
value, existed = pv.Labels[label.InstanceLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, "e2e-monitor")
value, existed = pv.Labels[label.ManagedByLabelKey]
framework.ExpectEqual(existed, true)
framework.ExpectEqual(value, label.TiDBOperator)

err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
value, existed := pv.Labels[label.ComponentLabelKey]
if !existed || value != label.TiDBMonitorVal {
return false, nil
}
value, existed = pv.Labels[label.InstanceLabelKey]
if !existed || value != "e2e-monitor" {
return false, nil
}

value, existed = pv.Labels[label.NameLabelKey]
if !existed || value != "tidb-clustr" {
return false, nil
}
value, existed = pv.Labels[label.ManagedByLabelKey]
if !existed || value != label.TiDBOperator {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "monitor pv label error")

// update TidbMonitor and check whether portName is updated and the nodePort is unchanged
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
Expand Down