diff --git a/charts/tidb-cluster/templates/NOTES.txt b/charts/tidb-cluster/templates/NOTES.txt index 4ae6c548cd..b31da27fb6 100644 --- a/charts/tidb-cluster/templates/NOTES.txt +++ b/charts/tidb-cluster/templates/NOTES.txt @@ -12,7 +12,7 @@ Cluster Startup Cluster access * Access tidb-cluster using the MySQL client - kubectl port-forward -n {{ .Release.Namespace }} svc/{{ .Values.clusterName }}-tidb 4000:4000 & + kubectl port-forward -n {{ .Release.Namespace }} svc/{{ template "cluster.name" . }}-tidb 4000:4000 & {{- if .Values.tidb.passwordSecretName }} mysql -h 127.0.0.1 -P 4000 -u root -D test -p {{- else -}} @@ -22,6 +22,6 @@ Cluster access {{- end -}} {{- if .Values.monitor.create }} * View monitor dashboard for TiDB cluster - kubectl port-forward -n {{ .Release.Namespace }} svc/{{ .Values.clusterName }}-grafana 3000:3000 + kubectl port-forward -n {{ .Release.Namespace }} svc/{{ template "cluster.name" . }}-grafana 3000:3000 Open browser at http://localhost:3000. The default username and password is admin/admin. {{- end -}} diff --git a/charts/tidb-cluster/templates/_helpers.tpl b/charts/tidb-cluster/templates/_helpers.tpl index 577ce13793..0791ff1541 100644 --- a/charts/tidb-cluster/templates/_helpers.tpl +++ b/charts/tidb-cluster/templates/_helpers.tpl @@ -22,3 +22,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- $wtf := $context.Template.Name | replace $last $name -}} {{ include $wtf $context }} {{- end -}} + +{{- define "cluster.name" -}} +{{- default .Release.Name .Values.clusterName }} +{{- end -}} \ No newline at end of file diff --git a/charts/tidb-cluster/templates/config/_alert-rules-config.tpl b/charts/tidb-cluster/templates/config/_alert-rules-config.tpl index 2f657f55c1..0955dc43d3 100644 --- a/charts/tidb-cluster/templates/config/_alert-rules-config.tpl +++ b/charts/tidb-cluster/templates/config/_alert-rules-config.tpl @@ -5,7 +5,7 @@ groups: expr: sum ( pd_cluster_status{type="store_down_count"} ) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: sum ( pd_cluster_status{type="store_down_count"} ) > 0 annotations: @@ -17,7 +17,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])) by (instance,job,le) ) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])) by (instance,job,le) ) > 1 annotations: @@ -29,7 +29,7 @@ groups: expr: sum( pd_regions_status{type="miss_peer_region_count"} ) > 100 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: sum( pd_regions_status{type="miss_peer_region_count"} ) > 100 annotations: @@ -41,7 +41,7 @@ groups: expr: sum ( pd_cluster_status{type="store_disconnected_count"} ) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum ( pd_cluster_status{type="store_disconnected_count"} ) > 0 annotations: @@ -53,7 +53,7 @@ groups: expr: sum ( pd_cluster_status{type="store_low_space_count"} ) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum ( pd_cluster_status{type="store_low_space_count"} ) > 0 annotations: @@ -65,7 +65,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(etcd_network_peer_round_trip_time_seconds_bucket[1m])) by (To,instance,job,le) ) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: histogram_quantile(0.99, sum(rate(etcd_network_peer_round_trip_time_seconds_bucket[1m])) by (To,instance,job,le) ) > 1 annotations: @@ -77,7 +77,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(pd_client_request_handle_requests_duration_seconds_bucket{type="tso"}[1m])) by (instance,job,le) ) > 0.1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: histogram_quantile(0.99, sum(rate(pd_client_request_handle_requests_duration_seconds_bucket{type="tso"}[1m])) by (instance,job,le) ) > 0.1 annotations: @@ -89,7 +89,7 @@ groups: expr: sum ( pd_regions_status{type="down_peer_region_count"} ) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum ( pd_regions_status{type="down_peer_region_count"} ) > 0 annotations: @@ -101,7 +101,7 @@ groups: expr: sum ( pd_regions_status{type="incorrect_namespace_region_count"} ) > 100 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum ( pd_regions_status{type="incorrect_namespace_region_count"} ) > 0 annotations: @@ -113,7 +113,7 @@ groups: expr: sum( pd_regions_status{type="pending_peer_region_count"} ) > 100 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum( pd_regions_status{type="pending_peer_region_count"} ) > 100 annotations: @@ -125,7 +125,7 @@ groups: expr: count( changes(pd_server_tso{type="save"}[10m]) > 0 ) >= 2 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: count( changes(pd_server_tso{type="save"}[10m]) > 0 ) >= 2 annotations: @@ -137,7 +137,7 @@ groups: expr: sum(pd_cluster_status{type="storage_size"}) / sum(pd_cluster_status{type="storage_capacity"}) * 100 > 80 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum(pd_cluster_status{type="storage_size"}) / sum(pd_cluster_status{type="storage_capacity"}) * 100 > 80 annotations: @@ -148,7 +148,7 @@ groups: expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 annotations: @@ -160,7 +160,7 @@ groups: expr: increase( tidb_tikvclient_region_err_total[10m] ) > 6000 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: increase( tidb_tikvclient_region_err_total[10m] ) > 6000 annotations: @@ -172,7 +172,7 @@ groups: expr: increase( tidb_domain_load_schema_total{type="failed"}[10m] ) > 10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: increase( tidb_domain_load_schema_total{type="failed"}[10m] ) > 10 annotations: @@ -184,7 +184,7 @@ groups: expr: increase(tidb_monitor_keep_alive_total[10m]) < 100 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: increase(tidb_monitor_keep_alive_total[10m]) < 100 annotations: @@ -196,7 +196,7 @@ groups: expr: increase(tidb_server_panic_total[10m]) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: increase(tidb_server_panic_total[10m]) > 0 annotations: @@ -208,7 +208,7 @@ groups: expr: go_memstats_heap_inuse_bytes{job="tidb"} > 1e+10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: go_memstats_heap_inuse_bytes{job="tidb"} > 1e+10 annotations: @@ -220,7 +220,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) BY (le, instance)) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: histogram_quantile(0.99, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) BY (le, instance)) > 1 annotations: @@ -232,7 +232,7 @@ groups: expr: increase(tidb_server_server_event{type=~"server_start|server_hang"}[15m]) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: increase(tidb_server_server_event{type=~"server_start|server_hang"}[15m]) > 0 annotations: @@ -244,7 +244,7 @@ groups: expr: increase( tidb_tikvclient_backoff_count[10m] ) > 10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: increase( tidb_tikvclient_backoff_count[10m] ) > 10 annotations: @@ -256,7 +256,7 @@ groups: expr: increase(tidb_monitor_time_jump_back_total[10m]) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: increase(tidb_monitor_time_jump_back_total[10m]) > 0 annotations: @@ -268,7 +268,7 @@ groups: expr: sum(tidb_ddl_waiting_jobs) > 5 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum(tidb_ddl_waiting_jobs) > 5 annotations: @@ -280,7 +280,7 @@ groups: expr: (node_memory_MemAvailable offset 5m) - node_memory_MemAvailable > 5*1024*1024*1024 for: 5m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: (node_memory_MemAvailable offset 5m) - node_memory_MemAvailable > 5*1024*1024*1024 annotations: @@ -292,7 +292,7 @@ groups: expr: sum(increase(tidb_tikvclient_gc_action_result{type="success"}[6h])) < 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: emergency expr: sum(increase(tidb_tikvclient_gc_action_result{type="success"}[6h])) < 1 annotations: @@ -304,7 +304,7 @@ groups: expr: sum(rate(tikv_server_report_failure_msg_total{type="unreachable"}[10m])) BY (store_id) > 10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: sum(rate(tikv_server_report_failure_msg_total{type="unreachable"}[10m])) BY (store_id) > 10 annotations: @@ -316,7 +316,7 @@ groups: expr: sum(rate(tikv_channel_full_total[10m])) BY (type, instance) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: sum(rate(tikv_channel_full_total[10m])) BY (type, instance) > 0 annotations: @@ -328,7 +328,7 @@ groups: expr: delta( tikv_engine_write_stall[10m]) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: delta( tikv_engine_write_stall[10m]) > 0 annotations: @@ -340,7 +340,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_raftstore_log_lag_bucket[1m])) by (le, instance, job)) > 5000 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(tikv_raftstore_log_lag_bucket[1m])) by (le, instance, job)) > 5000 annotations: @@ -352,7 +352,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_storage_engine_async_request_duration_seconds_bucket{type="snapshot"}[1m])) by (le, instance, job,type)) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(tikv_storage_engine_async_request_duration_seconds_bucket{type="snapshot"}[1m])) by (le, instance, job,type)) > 1 annotations: @@ -364,7 +364,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_storage_engine_async_request_duration_seconds_bucket{type="write"}[1m])) by (le, instance, job,type)) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(tikv_storage_engine_async_request_duration_seconds_bucket{type="write"}[1m])) by (le, instance, job,type)) > 1 annotations: @@ -376,7 +376,7 @@ groups: expr: histogram_quantile(0.9999, sum(rate(tikv_coprocessor_request_wait_seconds_bucket[1m])) by (le, instance, job,req)) > 10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.9999, sum(rate(tikv_coprocessor_request_wait_seconds_bucket[1m])) by (le, instance, job,req)) > 10 annotations: @@ -388,7 +388,7 @@ groups: expr: sum(rate(tikv_thread_cpu_seconds_total{name=~"raftstore_.*"}[1m])) by (job, name) > 0.8 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: sum(rate(tikv_thread_cpu_seconds_total{name=~"raftstore_.*"}[1m])) by (job, name) > 0.8 annotations: @@ -400,7 +400,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_raftstore_append_log_duration_seconds_bucket[1m])) by (le, instance, job)) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(tikv_raftstore_append_log_duration_seconds_bucket[1m])) by (le, instance, job)) > 1 annotations: @@ -412,7 +412,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_raftstore_apply_log_duration_seconds_bucket[1m])) by (le, instance, job)) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(tikv_raftstore_apply_log_duration_seconds_bucket[1m])) by (le, instance, job)) > 1 annotations: @@ -424,7 +424,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_scheduler_latch_wait_duration_seconds_bucket[1m])) by (le, instance, job,type)) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: histogram_quantile(0.99, sum(rate(tikv_scheduler_latch_wait_duration_seconds_bucket[1m])) by (le, instance, job,type)) > 1 annotations: @@ -437,7 +437,7 @@ groups: expr: sum(rate(tikv_thread_cpu_seconds_total{name="apply_worker"}[1m])) by (job) > 0.9 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: sum(rate(tikv_thread_cpu_seconds_total{name="apply_worker"}[1m])) by (job) > 0.9 annotations: @@ -449,7 +449,7 @@ groups: expr: sum(increase(tidb_tikvclient_gc_action_result{type="fail"}[1m])) > 10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: critical expr: sum(increase(tidb_tikvclient_gc_action_result{type="fail"}[1m])) > 10 annotations: @@ -461,7 +461,7 @@ groups: expr: delta(tikv_pd_heartbeat_tick_total{type="leader"}[30s]) < -10 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: delta(tikv_pd_heartbeat_tick_total{type="leader"}[30s]) < -10 annotations: @@ -473,7 +473,7 @@ groups: expr: histogram_quantile(0.999, sum(rate(tikv_raftstore_raft_process_duration_secs_bucket{type='ready'}[1m])) by (le, instance, job,type)) > 2 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: histogram_quantile(0.999, sum(rate(tikv_raftstore_raft_process_duration_secs_bucket{type='ready'}[1m])) by (le, instance, job,type)) > 2 annotations: @@ -485,7 +485,7 @@ groups: expr: histogram_quantile(0.999, sum(rate(tikv_raftstore_raft_process_duration_secs_bucket{type='tick'}[1m])) by (le, instance, job,type)) > 2 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: histogram_quantile(0.999, sum(rate(tikv_raftstore_raft_process_duration_secs_bucket{type='tick'}[1m])) by (le, instance, job,type)) > 2 annotations: @@ -497,7 +497,7 @@ groups: expr: abs(delta( tikv_scheduler_contex_total[5m])) > 1000 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: abs(delta( tikv_scheduler_contex_total[5m])) > 1000 annotations: @@ -509,7 +509,7 @@ groups: expr: histogram_quantile(0.99, sum(rate(tikv_scheduler_command_duration_seconds_bucket[1m])) by (le, instance, job,type) / 1000) > 1 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: histogram_quantile(0.99, sum(rate(tikv_scheduler_command_duration_seconds_bucket[1m])) by (le, instance, job,type) / 1000) > 1 annotations: @@ -521,7 +521,7 @@ groups: expr: sum(rate(tikv_thread_cpu_seconds_total{name=~"storage_schedul.*"}[1m])) by (job) > 0.8 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum(rate(tikv_thread_cpu_seconds_total{name=~"storage_schedul.*"}[1m])) by (job) > 0.8 annotations: @@ -533,7 +533,7 @@ groups: expr: delta( tikv_coprocessor_outdated_request_wait_seconds_count[10m] ) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: delta( tikv_coprocessor_outdated_request_wait_seconds_count[10m] ) > 0 annotations: @@ -545,7 +545,7 @@ groups: expr: increase(tikv_coprocessor_request_error[10m]) > 100 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: increase(tikv_coprocessor_request_error[10m]) > 100 annotations: @@ -557,7 +557,7 @@ groups: expr: delta( tikv_coprocessor_pending_request[10m]) > 5000 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: delta( tikv_coprocessor_pending_request[10m]) > 5000 annotations: @@ -569,7 +569,7 @@ groups: expr: sum(rate(tikv_thread_cpu_seconds_total{name=~"cop_.*"}[1m])) by (job) / ( count(tikv_thread_cpu_seconds_total{name=~"cop_.*"}) * 0.9 ) / count(count(tikv_thread_cpu_seconds_total) by (instance)) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum(rate(tikv_thread_cpu_seconds_total{name=~"cop_.*"}[1m])) by (job) / ( count(tikv_thread_cpu_seconds_total{name=~"cop_.*"}) * 0.9 ) / count(count(tikv_thread_cpu_seconds_total) by (instance)) > 0 annotations: @@ -581,7 +581,7 @@ groups: expr: sum(tikv_worker_pending_task_total) BY (job,instance,name) > 1000 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: sum(tikv_worker_pending_task_total) BY (job,instance,name) > 1000 annotations: @@ -593,7 +593,7 @@ groups: expr: count( (sum(tikv_store_size_bytes{type="available"}) by (job) / sum(tikv_store_size_bytes{type="capacity"}) by (job) < 0.2) and (sum(tikv_raftstore_snapshot_traffic_total{type="applying"}) by (job) > 0 ) ) > 0 for: 1m labels: - env: '{{ .Values.clusterName }}' + env: '{{ template "cluster.name" . }}' level: warning expr: count( (sum(tikv_store_size_bytes{type="available"}) by (job) / sum(tikv_store_size_bytes{type="capacity"}) by (job) < 0.2) and (sum(tikv_raftstore_snapshot_traffic_total{type="applying"}) by (job) > 0 ) ) > 0 annotations: diff --git a/charts/tidb-cluster/templates/config/_drainer-config.tpl b/charts/tidb-cluster/templates/config/_drainer-config.tpl index addc1d1f3f..59f4c7009a 100644 --- a/charts/tidb-cluster/templates/config/_drainer-config.tpl +++ b/charts/tidb-cluster/templates/config/_drainer-config.tpl @@ -11,7 +11,7 @@ detect-interval = {{ .Values.binlog.drainer.detectInterval | default 10 }} data-dir = "/data" # a comma separated list of PD endpoints -pd-urls = "http://{{ .Values.clusterName }}-pd:2379" +pd-urls = "http://{{ template "cluster.name" . }}-pd:2379" #[security] # Path of file that contains list of trusted SSL CAs for connection with cluster components. diff --git a/charts/tidb-cluster/templates/config/_pump-config.tpl b/charts/tidb-cluster/templates/config/_pump-config.tpl index 3d6cfa2cbd..241f0edd73 100644 --- a/charts/tidb-cluster/templates/config/_pump-config.tpl +++ b/charts/tidb-cluster/templates/config/_pump-config.tpl @@ -17,7 +17,7 @@ data-dir = "/data" heartbeat-interval = {{ .Values.binlog.pump.heartbeatInterval | default 2 }} # a comma separated list of PD endpoints -pd-urls = "http://{{ .Values.clusterName }}-pd:2379" +pd-urls = "http://{{ template "cluster.name" . }}-pd:2379" #[security] # Path of file that contains list of trusted SSL CAs for connection with cluster components. diff --git a/charts/tidb-cluster/templates/discovery-deployment.yaml b/charts/tidb-cluster/templates/discovery-deployment.yaml index c0dc462b25..a661efacb4 100644 --- a/charts/tidb-cluster/templates/discovery-deployment.yaml +++ b/charts/tidb-cluster/templates/discovery-deployment.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - name: {{ .Values.clusterName }}-discovery + name: {{ template "cluster.name" . }}-discovery labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -27,7 +27,7 @@ spec: serviceAccount: {{ .Values.discovery.serviceAccount }} {{- else }} {{- if .Values.rbac.create }} - serviceAccount: {{ .Values.clusterName }}-discovery + serviceAccount: {{ template "cluster.name" . }}-discovery {{- end }} {{- end }} containers: diff --git a/charts/tidb-cluster/templates/discovery-rbac.yaml b/charts/tidb-cluster/templates/discovery-rbac.yaml index cecd8e84f5..7e49bbdb26 100644 --- a/charts/tidb-cluster/templates/discovery-rbac.yaml +++ b/charts/tidb-cluster/templates/discovery-rbac.yaml @@ -2,7 +2,7 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: {{ .Values.clusterName}}-discovery + name: {{ template "cluster.name" . }}-discovery labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -12,13 +12,13 @@ metadata: rules: - apiGroups: ["pingcap.com"] resources: ["tidbclusters"] - resourceNames: [{{ .Values.clusterName | quote }}] + resourceNames: [{{ template "cluster.name" . }}] verbs: ["get"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: {{ .Values.clusterName }}-discovery + name: {{ template "cluster.name" . }}-discovery labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -27,16 +27,16 @@ metadata: helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} subjects: - kind: ServiceAccount - name: {{ .Values.clusterName }}-discovery + name: {{ template "cluster.name" . }}-discovery roleRef: kind: Role - name: {{ .Values.clusterName }}-discovery + name: {{ template "cluster.name" . }}-discovery apiGroup: rbac.authorization.k8s.io --- kind: ServiceAccount apiVersion: v1 metadata: - name: {{ .Values.clusterName }}-discovery + name: {{ template "cluster.name" . }}-discovery labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/discovery-service.yaml b/charts/tidb-cluster/templates/discovery-service.yaml index 5b8d3f1500..7d1f8f1cd6 100644 --- a/charts/tidb-cluster/templates/discovery-service.yaml +++ b/charts/tidb-cluster/templates/discovery-service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-discovery + name: {{ template "cluster.name" . }}-discovery labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/drainer-configmap.yaml b/charts/tidb-cluster/templates/drainer-configmap.yaml index a30ab8074c..832fb12879 100644 --- a/charts/tidb-cluster/templates/drainer-configmap.yaml +++ b/charts/tidb-cluster/templates/drainer-configmap.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-drainer + name: {{ template "cluster.name" . }}-drainer labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/drainer-service.yaml b/charts/tidb-cluster/templates/drainer-service.yaml index 6bec774eb2..05bb9bffcc 100644 --- a/charts/tidb-cluster/templates/drainer-service.yaml +++ b/charts/tidb-cluster/templates/drainer-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-drainer + name: {{ template "cluster.name" . }}-drainer labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/drainer-statefulset.yaml b/charts/tidb-cluster/templates/drainer-statefulset.yaml index 5ae92cd7ca..89d604001e 100644 --- a/charts/tidb-cluster/templates/drainer-statefulset.yaml +++ b/charts/tidb-cluster/templates/drainer-statefulset.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: {{ .Values.clusterName }}-drainer + name: {{ template "cluster.name" . }}-drainer labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -16,7 +16,7 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: tidb-operator app.kubernetes.io/component: drainer - serviceName: {{ .Values.clusterName }}-drainer + serviceName: {{ template "cluster.name" . }}-drainer replicas: 1 template: metadata: @@ -50,7 +50,7 @@ spec: volumes: - name: config configMap: - name: {{ .Values.clusterName }}-drainer + name: {{ template "cluster.name" . }}-drainer items: - key: drainer-config path: drainer.toml diff --git a/charts/tidb-cluster/templates/monitor-configmap.yaml b/charts/tidb-cluster/templates/monitor-configmap.yaml index 36e7b73040..a1d010d901 100644 --- a/charts/tidb-cluster/templates/monitor-configmap.yaml +++ b/charts/tidb-cluster/templates/monitor-configmap.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/monitor-deployment.yaml b/charts/tidb-cluster/templates/monitor-deployment.yaml index 7dc1d29868..2d7127e02a 100644 --- a/charts/tidb-cluster/templates/monitor-deployment.yaml +++ b/charts/tidb-cluster/templates/monitor-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -27,7 +27,7 @@ spec: serviceAccount: {{ .Values.monitor.serviceAccount }} {{- else }} {{- if .Values.rbac.create }} - serviceAccount: {{ .Values.clusterName }}-monitor + serviceAccount: {{ template "cluster.name" . }}-monitor {{- end }} {{- end }} {{- if .Values.monitor.nodeSelector }} @@ -120,12 +120,12 @@ spec: - name: GF_SECURITY_ADMIN_USER valueFrom: secretKeyRef: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor key: username - name: GF_SECURITY_ADMIN_PASSWORD valueFrom: secretKeyRef: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor key: password - name: TZ value: {{ .Values.timezone | default "UTC" }} @@ -141,13 +141,13 @@ spec: - name: monitor-data {{- if .Values.monitor.persistent }} persistentVolumeClaim: - claimName: {{ .Values.clusterName }}-monitor + claimName: {{ template "cluster.name" . }}-monitor {{- else }} emptyDir: {} {{- end }} - name: prometheus-config configMap: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor items: - key: prometheus-config path: prometheus.yml @@ -156,7 +156,7 @@ spec: {{- if .Values.monitor.grafana.create }} - name: grafana-config configMap: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor items: - key: grafana-config path: grafana.ini diff --git a/charts/tidb-cluster/templates/monitor-job.yaml b/charts/tidb-cluster/templates/monitor-job.yaml index c2651c2c1f..0a0b51745d 100644 --- a/charts/tidb-cluster/templates/monitor-job.yaml +++ b/charts/tidb-cluster/templates/monitor-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: {{ .Values.clusterName }}-monitor-configurator + name: {{ template "cluster.name" . }}-monitor-configurator labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -23,16 +23,16 @@ spec: image: {{ .Values.monitor.dashboardInstaller.image }} imagePullPolicy: {{ .Values.monitor.dashboardInstaller.imagePullPolicy | default "IfNotPresent" }} args: - - {{ .Values.clusterName }}-grafana:3000 + - {{ template "cluster.name" . }}-grafana:3000 env: - name: GRAFANA_USERNAME valueFrom: secretKeyRef: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor key: username - name: GRAFANA_PASSWORD valueFrom: secretKeyRef: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor key: password {{- end }} diff --git a/charts/tidb-cluster/templates/monitor-pvc.yaml b/charts/tidb-cluster/templates/monitor-pvc.yaml index 11406e1e1d..8bef475602 100644 --- a/charts/tidb-cluster/templates/monitor-pvc.yaml +++ b/charts/tidb-cluster/templates/monitor-pvc.yaml @@ -2,7 +2,7 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: tidb-operator diff --git a/charts/tidb-cluster/templates/monitor-rbac.yaml b/charts/tidb-cluster/templates/monitor-rbac.yaml index 5503d76805..8f725e3285 100644 --- a/charts/tidb-cluster/templates/monitor-rbac.yaml +++ b/charts/tidb-cluster/templates/monitor-rbac.yaml @@ -3,7 +3,7 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: {{ .Values.clusterName}}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -18,7 +18,7 @@ rules: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -27,17 +27,21 @@ metadata: helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} subjects: - kind: ServiceAccount - name: {{ .Values.monitor.serviceAccount | default (print .Values.clusterName "-monitor") }} + {{- if .Values.monitor.serviceAccount }} + name: {{ .Values.monitor.serviceAccount }} + {{- else }} + name: {{ template "cluster.name" . }}-monitor + {{- end }} roleRef: kind: Role - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor apiGroup: rbac.authorization.k8s.io --- {{- if not .Values.monitor.serviceAccount }} kind: ServiceAccount apiVersion: v1 metadata: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/monitor-secret.yaml b/charts/tidb-cluster/templates/monitor-secret.yaml index c75faaa4d8..f1df734252 100644 --- a/charts/tidb-cluster/templates/monitor-secret.yaml +++ b/charts/tidb-cluster/templates/monitor-secret.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Secret metadata: - name: {{ .Values.clusterName }}-monitor + name: {{ template "cluster.name" . }}-monitor labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/monitor-service.yaml b/charts/tidb-cluster/templates/monitor-service.yaml index 516c8c853a..8c57ee4024 100644 --- a/charts/tidb-cluster/templates/monitor-service.yaml +++ b/charts/tidb-cluster/templates/monitor-service.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-grafana + name: {{ template "cluster.name" . }}-grafana labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -26,7 +26,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-prometheus + name: {{ template "cluster.name" . }}-prometheus labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/pd-configmap.yaml b/charts/tidb-cluster/templates/pd-configmap.yaml index c97ae7558d..683a7a7f1d 100644 --- a/charts/tidb-cluster/templates/pd-configmap.yaml +++ b/charts/tidb-cluster/templates/pd-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-pd + name: {{ template "cluster.name" . }}-pd labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/privileged-tidb-configmap.yaml b/charts/tidb-cluster/templates/privileged-tidb-configmap.yaml index 1b57e99e3f..f13962d086 100644 --- a/charts/tidb-cluster/templates/privileged-tidb-configmap.yaml +++ b/charts/tidb-cluster/templates/privileged-tidb-configmap.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-privileged-tidb + name: {{ template "cluster.name" . }}-privileged-tidb labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/privileged-tidb-deployment.yaml b/charts/tidb-cluster/templates/privileged-tidb-deployment.yaml index a633229bd8..78141998e3 100644 --- a/charts/tidb-cluster/templates/privileged-tidb-deployment.yaml +++ b/charts/tidb-cluster/templates/privileged-tidb-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - name: {{ .Values.clusterName }}-privileged-tidb + name: {{ template "cluster.name" . }}-privileged-tidb labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -56,7 +56,7 @@ spec: protocol: TCP env: - name: CLUSTER_NAME - value: {{ .Values.clusterName }} + value: {{ template "cluster.name" . }} - name: TZ value: {{ .Values.timezone | default "UTC" }} volumeMounts: @@ -73,7 +73,7 @@ spec: volumes: - name: config configMap: - name: {{ .Values.clusterName }}-privileged-tidb + name: {{ template "cluster.name" . }}-privileged-tidb items: - key: config-file path: tidb.toml @@ -85,7 +85,7 @@ spec: fieldPath: metadata.annotations - name: startup-script configMap: - name: {{ .Values.clusterName }}-privileged-tidb + name: {{ template "cluster.name" . }}-privileged-tidb items: - key: startup-script path: privileged_tidb_start_script.sh diff --git a/charts/tidb-cluster/templates/privileged-tidb-service.yaml b/charts/tidb-cluster/templates/privileged-tidb-service.yaml index 1f348a3ea9..54f5c758f3 100644 --- a/charts/tidb-cluster/templates/privileged-tidb-service.yaml +++ b/charts/tidb-cluster/templates/privileged-tidb-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-privileged-tidb + name: {{ template "cluster.name" . }}-privileged-tidb labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/pump-configmap.yaml b/charts/tidb-cluster/templates/pump-configmap.yaml index 2488cc99ed..2956184188 100644 --- a/charts/tidb-cluster/templates/pump-configmap.yaml +++ b/charts/tidb-cluster/templates/pump-configmap.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-pump + name: {{ template "cluster.name" . }}-pump labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/pump-service.yaml b/charts/tidb-cluster/templates/pump-service.yaml index 3166a67457..5190350485 100644 --- a/charts/tidb-cluster/templates/pump-service.yaml +++ b/charts/tidb-cluster/templates/pump-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-pump + name: {{ template "cluster.name" . }}-pump labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/pump-statefulset.yaml b/charts/tidb-cluster/templates/pump-statefulset.yaml index c2a05dcf10..e006d4ebdf 100644 --- a/charts/tidb-cluster/templates/pump-statefulset.yaml +++ b/charts/tidb-cluster/templates/pump-statefulset.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: {{ .Values.clusterName }}-pump + name: {{ template "cluster.name" . }}-pump labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -16,7 +16,7 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: tidb-operator app.kubernetes.io/component: pump - serviceName: {{ .Values.clusterName }}-pump + serviceName: {{ template "cluster.name" . }}-pump replicas: {{ .Values.binlog.pump.replicas }} template: metadata: @@ -50,7 +50,7 @@ spec: volumes: - name: config configMap: - name: {{ .Values.clusterName }}-pump + name: {{ template "cluster.name" . }}-pump items: - key: pump-config path: pump.toml diff --git a/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml b/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml index 74ccf98b7a..6dfdb7adc6 100644 --- a/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml +++ b/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1beta1 kind: CronJob metadata: - name: {{ .Values.clusterName }}-scheduled-backup + name: {{ template "cluster.name" . }}-scheduled-backup labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -38,7 +38,7 @@ spec: imagePullPolicy: {{ .Values.scheduledBackup.binlogImagePullPolicy | default "IfNotPresent" }} command: - /binlogctl - - -pd-urls=http://{{ .Values.clusterName }}-pd:2379 + - -pd-urls=http://{{ template "cluster.name" . }}-pd:2379 - -cmd=generate_meta - -data-dir=/savepoint-dir volumeMounts: @@ -88,7 +88,7 @@ spec: emptyDir: {} - name: data persistentVolumeClaim: - claimName: {{ .Values.clusterName }}-scheduled-backup + claimName: {{ template "cluster.name" . }}-scheduled-backup {{- if .Values.scheduledBackup.gcp }} - name: gcp-credentials secret: diff --git a/charts/tidb-cluster/templates/scheduled-backup-pvc.yaml b/charts/tidb-cluster/templates/scheduled-backup-pvc.yaml index 7248b7f781..a7d9491fd2 100644 --- a/charts/tidb-cluster/templates/scheduled-backup-pvc.yaml +++ b/charts/tidb-cluster/templates/scheduled-backup-pvc.yaml @@ -2,7 +2,7 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: {{ .Values.clusterName }}-scheduled-backup + name: {{ template "cluster.name" . }}-scheduled-backup labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: tidb-operator diff --git a/charts/tidb-cluster/templates/scripts/_start_drainer.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_drainer.sh.tpl index f8c1309b75..7e56bfcaa5 100644 --- a/charts/tidb-cluster/templates/scripts/_start_drainer.sh.tpl +++ b/charts/tidb-cluster/templates/scripts/_start_drainer.sh.tpl @@ -1,6 +1,6 @@ set -euo pipefail -domain=`echo ${HOSTNAME}`.{{ .Values.clusterName }}-drainer +domain=`echo ${HOSTNAME}`.{{ template "cluster.name" . }}-drainer elapseTime=0 period=1 @@ -26,7 +26,7 @@ done /drainer \ -L={{ .Values.binlog.drainer.logLevel | default "info" }} \ --addr=`echo ${HOSTNAME}`.{{ .Values.clusterName }}-drainer:8249 \ +-addr=`echo ${HOSTNAME}`.{{ template "cluster.name" . }}-drainer:8249 \ -config=/etc/drainer/drainer.toml \ -disable-detect={{ .Values.binlog.drainer.disableDetect | default false }} \ -initial-commit-ts={{ .Values.binlog.drainer.initialCommitTs | default 0 }} \ diff --git a/charts/tidb-cluster/templates/scripts/_start_pump.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_pump.sh.tpl index 481eaafd45..8896a8fb71 100644 --- a/charts/tidb-cluster/templates/scripts/_start_pump.sh.tpl +++ b/charts/tidb-cluster/templates/scripts/_start_pump.sh.tpl @@ -1,6 +1,6 @@ set -euo pipefail /pump \ -L={{ .Values.binlog.pump.logLevel | default "info" }} \ --advertise-addr=`echo ${HOSTNAME}`.{{ .Values.clusterName }}-pump:8250 \ +-advertise-addr=`echo ${HOSTNAME}`.{{ template "cluster.name" . }}-pump:8250 \ -config=/etc/pump/pump.toml \ -log-file= diff --git a/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl index 0c732bc975..d67fc4ba15 100644 --- a/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl +++ b/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl @@ -1,6 +1,6 @@ set -euo pipefail dirname=scheduled-backup-`date +%Y-%m-%dT%H%M%S`-${MY_POD_NAME} -host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]' | tr '-' '_'` +host=`echo {{ template "cluster.name" . }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]' | tr '-' '_'` mkdir -p /data/${dirname}/ cp /savepoint-dir/savepoint /data/${dirname}/ diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index beb93718a0..4e3a49eab2 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -1,7 +1,7 @@ apiVersion: pingcap.com/v1alpha1 kind: TidbCluster metadata: - name: {{ .Values.clusterName }} + name: {{ template "cluster.name" . }} labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/tidb-configmap.yaml b/charts/tidb-cluster/templates/tidb-configmap.yaml index 5ee5a9f099..6d01bda5d4 100644 --- a/charts/tidb-cluster/templates/tidb-configmap.yaml +++ b/charts/tidb-cluster/templates/tidb-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-tidb + name: {{ template "cluster.name" . }}-tidb labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/tidb-initializer-job.yaml b/charts/tidb-cluster/templates/tidb-initializer-job.yaml index 647dcd68fc..f406b6a1fd 100644 --- a/charts/tidb-cluster/templates/tidb-initializer-job.yaml +++ b/charts/tidb-cluster/templates/tidb-initializer-job.yaml @@ -2,11 +2,11 @@ apiVersion: batch/v1 kind: Job metadata: - name: {{ .Values.clusterName }}-tidb-initializer + name: {{ template "cluster.name" . }}-tidb-initializer labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Values.clusterName }} + app.kubernetes.io/instance: {{ template "cluster.name" . }} app.kubernetes.io/component: tidb-initializer helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} spec: @@ -15,7 +15,7 @@ spec: metadata: labels: app.kubernetes.io/name: {{ template "chart.name" . }} - app.kubernetes.io/instance: {{ .Values.clusterName }} + app.kubernetes.io/instance: {{ template "cluster.name" . }} app.kubernetes.io/component: tidb-initializer spec: restartPolicy: OnFailure @@ -28,7 +28,7 @@ spec: - -c - | import os, MySQLdb - host = {{ printf "%s-tidb" .Values.clusterName | quote }} + host = '{{ template "cluster.name" . }}-tidb' port = 4000 password_dir = '/etc/tidb/password' conn = MySQLdb.connect(host=host, port=port, user='root', connect_timeout=5) @@ -66,7 +66,7 @@ spec: {{- if .Values.tidb.initSql }} - name: init-sql configMap: - name: {{ .Values.clusterName }}-tidb + name: {{ template "cluster.name" . }}-tidb items: - key: init-sql path: init.sql diff --git a/charts/tidb-cluster/templates/tidb-service.yaml b/charts/tidb-cluster/templates/tidb-service.yaml index 8467b42383..fd9d4821a7 100644 --- a/charts/tidb-cluster/templates/tidb-service.yaml +++ b/charts/tidb-cluster/templates/tidb-service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.clusterName }}-tidb + name: {{ template "cluster.name" . }}-tidb labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/tikv-configmap.yaml b/charts/tidb-cluster/templates/tikv-configmap.yaml index 1af42f708f..8d80451d6d 100644 --- a/charts/tidb-cluster/templates/tikv-configmap.yaml +++ b/charts/tidb-cluster/templates/tikv-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.clusterName }}-tikv + name: {{ template "cluster.name" . }}-tikv labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index c0fd8be1fb..bbb261e173 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -7,9 +7,8 @@ rbac: create: true -# clusterName is the TiDB cluster name, it is required and should be unique -# if multiple clusters are deployed in the same namespace. -clusterName: demo +# clusterName is the TiDB cluster name, if not specified, the chart release name will be used +# clusterName: demo # schedulerName must be same with charts/tidb-operator/values#scheduler.schedulerName schedulerName: tidb-scheduler diff --git a/docs/aws-eks-tutorial.md b/docs/aws-eks-tutorial.md index 275e6e940d..53179b0609 100644 --- a/docs/aws-eks-tutorial.md +++ b/docs/aws-eks-tutorial.md @@ -374,10 +374,10 @@ max-open-files = 1024 ```sh # Deploy your first TiDB cluster -helm install ./charts/tidb-cluster -n tidb --namespace=tidb --set pd.storageClassName=gp2,tikv.storageClassName=gp2 +helm install ./charts/tidb-cluster -n demo --namespace=tidb --set pd.storageClassName=gp2,tikv.storageClassName=gp2 # Or if something goes wrong later and you want to update the deployment, use command: -# helm upgrade tidb ./charts/tidb-cluster --namespace=tidb --set pd.storageClassName=gp2,tikv.storageClassName=gp2 +# helm upgrade demo ./charts/tidb-cluster --namespace=tidb --set pd.storageClassName=gp2,tikv.storageClassName=gp2 # verify and wait until tidb-initializer pod status becomes completed: kubectl get pods --namespace tidb -o wide @@ -465,7 +465,7 @@ the TiDB (V2.0) should be able to finish the first TPC-H query with 15 seconds. With a single command we can easily scale out the TiDB cluster. To scale out TiKV: ```sh -helm upgrade tidb charts/tidb-cluster --set pd.storageClassName=gp2,tikv.storageClassName=gp2,tikv.replicas=5,tidb.replicas=3 +helm upgrade demo charts/tidb-cluster --set pd.storageClassName=gp2,tikv.storageClassName=gp2,tikv.replicas=5,tidb.replicas=3 ``` Now the number of TiKV pods is increased from the default 3 to 5. You can check it with: diff --git a/docs/google-kubernetes-tutorial.md b/docs/google-kubernetes-tutorial.md index 62e73e3b8b..8809d5ec1f 100644 --- a/docs/google-kubernetes-tutorial.md +++ b/docs/google-kubernetes-tutorial.md @@ -107,7 +107,7 @@ When you see `Running`, `Control + C` and proceed to launch a TiDB cluster! Now with a single command we can bring-up a full TiDB cluster: - helm install ./charts/tidb-cluster -n tidb --namespace=tidb --set pd.storageClassName=pd-ssd,tikv.storageClassName=pd-ssd + helm install ./charts/tidb-cluster -n demo --namespace=tidb --set pd.storageClassName=pd-ssd,tikv.storageClassName=pd-ssd It will take a few minutes to launch. You can monitor the progress with: @@ -151,7 +151,7 @@ If you did not specify a password in helm, set one now: With a single command we can easily scale out the TiDB cluster. To scale out TiKV: - helm upgrade tidb charts/tidb-cluster --set pd.storageClassName=pd-ssd,tikv.storageClassName=pd-ssd,tikv.replicas=5 + helm upgrade demo charts/tidb-cluster --set pd.storageClassName=pd-ssd,tikv.storageClassName=pd-ssd,tikv.replicas=5 Now the number of TiKV pods is increased from the default 3 to 5. You can check it with: @@ -161,12 +161,12 @@ Now the number of TiKV pods is increased from the default 3 to 5. You can check When the TiDB cluster is not needed, you can delete it with the following command: - helm delete tidb --purge + helm delete demo --purge The above commands only delete the running pods, the data is persistent. If you do not need the data anymore, you should run the following commands to clean the data and the dynamically created persistent disks: - kubectl delete pvc -n tidb -l app.kubernetes.io/instance=tidb,app.kubernetes.io/managed-by=tidb-operator && - kubectl get pv -l app.kubernetes.io/namespace=tidb,app.kubernetes.io/managed-by=tidb-operator,app.kubernetes.io/instance=tidb -o name | xargs -I {} kubectl patch {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}' + kubectl delete pvc -n tidb -l app.kubernetes.io/instance=demo,app.kubernetes.io/managed-by=tidb-operator && + kubectl get pv -l app.kubernetes.io/namespace=tidb,app.kubernetes.io/managed-by=tidb-operator,app.kubernetes.io/instance=demo -o name | xargs -I {} kubectl patch {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}' ## Shut down the Kubernetes cluster diff --git a/docs/local-dind-tutorial.md b/docs/local-dind-tutorial.md index f40ced5b38..85c777e242 100644 --- a/docs/local-dind-tutorial.md +++ b/docs/local-dind-tutorial.md @@ -60,8 +60,8 @@ tidb-scheduler-56757c896c-clzdg 2/2 Running 0 1m ## Step 3: Deploy a TiDB cluster in the DinD Kubernetes cluster ```sh -$ helm install charts/tidb-cluster --name=tidb-cluster --namespace=tidb -$ watch kubectl get pods --namespace tidb -l app.kubernetes.io/instance=tidb-cluster -o wide +$ helm install charts/tidb-cluster --name=demo --namespace=tidb +$ watch kubectl get pods --namespace tidb -l app.kubernetes.io/instance=demo -o wide $ # wait a few minutes to get all TiDB components created and ready $ kubectl get tidbcluster -n tidb @@ -147,7 +147,7 @@ You can scale out or scale in the TiDB cluster simply by modifying the number of 2. Run the following command to apply the changes: ```sh - helm upgrade tidb-cluster charts/tidb-cluster --namespace=tidb + helm upgrade demo charts/tidb-cluster --namespace=tidb ``` > **Note:** If you need to scale in TiKV, the consumed time depends on the volume of your existing data, because the data needs to be migrated safely. @@ -161,7 +161,7 @@ You can scale out or scale in the TiDB cluster simply by modifying the number of 2. Run the following command to apply the changes: ```sh - helm upgrade tidb-cluster charts/tidb-cluster --namespace=tidb + helm upgrade demo charts/tidb-cluster --namespace=tidb ``` ## Destroy the TiDB cluster @@ -169,7 +169,7 @@ You can scale out or scale in the TiDB cluster simply by modifying the number of When you are done with your test, use the following command to destroy the TiDB cluster: ```sh -$ helm delete tidb-cluster --purge +$ helm delete demo --purge ``` > **Note:** This only deletes the running pods and other resources, the data is persisted. If you do not need the data anymore, run the following commands to clean up the data. (Be careful, this permanently deletes the data). diff --git a/docs/operation-guide.md b/docs/operation-guide.md index 60a5c3fdb3..75b95ffa2b 100644 --- a/docs/operation-guide.md +++ b/docs/operation-guide.md @@ -1,13 +1,12 @@ # TiDB Cluster Operation Guide -TiDB Operator can manage multiple clusters in the same Kubernetes cluster. Clusters are qualified by `namespace` and `clusterName`, namely different clusters may have same `namespace` or `clusterName` but not both. +TiDB Operator can manage multiple clusters in the same Kubernetes cluster. -The default `clusterName` is `demo` which is defined in charts/tidb-cluster/values.yaml. The following variables will be used in the rest of the document: +The following variables will be used in the rest of the document: ```shell -$ releaseName="tidb-cluster" +$ releaseName="demo" $ namespace="tidb" -$ clusterName="demo" # Make sure this is the same as variable defined in charts/tidb-cluster/values.yaml ``` > **Note:** The rest of the document will use `values.yaml` to reference `charts/tidb-cluster/values.yaml` @@ -38,20 +37,20 @@ $ kubectl get svc -n ${namespace} # check the available services By default the TiDB cluster has no password set. You can specify a password by setting `tidb.password` in `values.yaml` before deploying. You can retrieve the password from the initialization `Secret`: ```shell -$ PASSWORD=$(kubectl get secret -n ${namespace} ${clusterName}-tidb -ojsonpath="{.data.password}" | base64 --decode | awk '{print $6}') +$ PASSWORD=$(kubectl get secret -n ${namespace} ${releaseName}-tidb -ojsonpath="{.data.password}" | base64 --decode | awk '{print $6}') $ echo ${PASSWORD} ``` * Access inside of the Kubernetes cluster - When your application is deployed in the same Kubernetes cluster, you can access TiDB via domain name `demo-tidb.tidb.svc` with port `4000`. Here `demo` is the `clusterName` which can be modified in `values.yaml`. And the latter `tidb` is the namespace you specified when using `helm install` to deploy TiDB cluster. + When your application is deployed in the same Kubernetes cluster, you can access TiDB via domain name `demo-tidb.tidb.svc` with port `4000`. Here `demo` is the `releaseName`. And the latter `tidb` is the namespace you specified when using `helm install` to deploy TiDB cluster. * Access outside of the Kubernetes cluster * Using kubectl portforward ```shell - $ kubectl port-forward -n ${namespace} svc/${clusterName}-tidb 4000:4000 &>/tmp/portforward-tidb.log + $ kubectl port-forward -n ${namespace} svc/${releaseName}-tidb 4000:4000 &>/tmp/portforward-tidb.log $ mysql -h 127.0.0.1 -P 4000 -u root -p ``` @@ -125,7 +124,7 @@ By default the monitor data is not persistent, when the monitor pod is killed fo You can view the dashboard using `kubectl portforward`: ```shell -$ kubectl port-forward -n ${namespace} svc/${clusterName}-grafana 3000:3000 &>/tmp/portforward-grafana.log +$ kubectl port-forward -n ${namespace} svc/${releaseName}-grafana 3000:3000 &>/tmp/portforward-grafana.log ``` Then open your browser at http://localhost:3000 The default username and password are both `admin`