From cc41ad5af87bde821ecde48d63dd7c1c40d72ea2 Mon Sep 17 00:00:00 2001 From: 9547 Date: Fri, 13 Nov 2020 19:10:23 +0800 Subject: [PATCH] dry: merge dm's {alertmanager,grafana,prometheus} into cluster (#890) --- components/dm/ansible/import.go | 12 +- components/dm/ansible/import_test.go | 10 +- components/dm/command/scale_in.go | 2 +- components/dm/spec/alertmanager.go | 173 ----------- components/dm/spec/bindversion.go | 2 +- components/dm/spec/cluster.go | 6 +- components/dm/spec/grafana.go | 284 ------------------ components/dm/spec/grafana_test.go | 62 ---- components/dm/spec/logic.go | 50 ++- components/dm/spec/prometheus.go | 248 --------------- .../dm/spec/testdata/dashboards/tidb.json | 3 - components/dm/spec/topology_dm.go | 68 +++-- components/dm/spec/topology_dm_test.go | 42 +-- components/dm/task/update_dm_meta.go | 13 +- pkg/cluster/ansible/import.go | 4 +- pkg/cluster/ansible/import_test.go | 8 +- pkg/cluster/ansible/inventory.go | 22 +- pkg/cluster/ansible/service.go | 4 +- pkg/cluster/embed/autogen_pkger.go | 6 +- pkg/cluster/manager.go | 8 +- pkg/cluster/operation/destroy.go | 8 +- pkg/cluster/operation/scale_in.go | 6 +- pkg/cluster/spec/alertmanager.go | 40 +-- pkg/cluster/spec/bindversion.go | 2 +- pkg/cluster/spec/cdc.go | 19 +- pkg/cluster/spec/drainer.go | 19 +- pkg/cluster/spec/grafana.go | 34 ++- pkg/cluster/spec/grafana_test.go | 4 +- pkg/cluster/spec/instance.go | 2 +- pkg/cluster/spec/pd.go | 23 +- pkg/cluster/spec/prometheus.go | 153 +++++++--- pkg/cluster/spec/pump.go | 19 +- pkg/cluster/spec/spec.go | 57 +++- pkg/cluster/spec/spec_manager_test.go | 8 + pkg/cluster/spec/tidb.go | 19 +- pkg/cluster/spec/tiflash.go | 25 +- pkg/cluster/spec/tikv.go | 21 +- pkg/cluster/spec/tispark.go | 41 +-- pkg/cluster/spec/validate.go | 2 +- pkg/cluster/task/update_meta.go | 28 +- pkg/cluster/task/update_topology.go | 10 +- pkg/cluster/template/config/prometheus.go | 15 + templates/config/dm/prometheus.yml.tpl | 8 +- templates/config/prometheus.yml.tpl | 38 ++- templates/scripts/dm/run_grafana.sh.tpl | 16 - templates/scripts/dm/run_prometheus.sh.tpl | 25 -- tests/tiup-dm/script/util.sh | 46 +-- tests/tiup-dm/test_import.sh | 5 +- 48 files changed, 535 insertions(+), 1185 deletions(-) delete mode 100644 components/dm/spec/alertmanager.go delete mode 100644 components/dm/spec/grafana.go delete mode 100644 components/dm/spec/grafana_test.go delete mode 100644 components/dm/spec/prometheus.go delete mode 100644 components/dm/spec/testdata/dashboards/tidb.json delete mode 100644 templates/scripts/dm/run_grafana.sh.tpl delete mode 100644 templates/scripts/dm/run_prometheus.sh.tpl diff --git a/components/dm/ansible/import.go b/components/dm/ansible/import.go index e22e501561..b7553d5766 100644 --- a/components/dm/ansible/import.go +++ b/components/dm/ansible/import.go @@ -222,7 +222,7 @@ func (im *Importer) handleWorkerConfig(srv *spec.WorkerSpec, fname string) error // ScpSourceToMaster scp the source files to master, // and set V1SourcePath of the master spec. -func (im *Importer) ScpSourceToMaster(topo *spec.Topology) (err error) { +func (im *Importer) ScpSourceToMaster(topo *spec.Specification) (err error) { for i := 0; i < len(topo.Masters); i++ { master := &topo.Masters[i] target := filepath.Join(firstNonEmpty(master.DeployDir, topo.GlobalOptions.DeployDir), "v1source") @@ -293,7 +293,7 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } meta = &spec.Metadata{ - Topology: new(spec.Topology), + Topology: new(spec.Specification), } topo := meta.Topology @@ -480,7 +480,7 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } case "alertmanager_servers": for _, host := range group.Hosts { - srv := spec.AlertManagerSpec{ + srv := spec.AlertmanagerSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), DeployDir: firstNonEmpty(host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir), @@ -521,9 +521,9 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } } - srv.DeployDir = instancDeployDir(spec.ComponentAlertManager, srv.WebPort, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) + srv.DeployDir = instancDeployDir(spec.ComponentAlertmanager, srv.WebPort, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) - topo.Alertmanager = append(topo.Alertmanager, srv) + topo.Alertmanagers = append(topo.Alertmanagers, srv) } case "grafana_servers": for _, host := range group.Hosts { @@ -559,7 +559,7 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } srv.DeployDir = instancDeployDir(spec.ComponentGrafana, srv.Port, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) - topo.Grafana = append(topo.Grafana, srv) + topo.Grafanas = append(topo.Grafanas, srv) } case "all", "ungrouped": // ignore intent diff --git a/components/dm/ansible/import_test.go b/components/dm/ansible/import_test.go index f560b4ae37..cef557dc86 100644 --- a/components/dm/ansible/import_test.go +++ b/components/dm/ansible/import_test.go @@ -188,9 +188,9 @@ func TestImportFromAnsible(t *testing.T) { assert.Equal(expectedWorker, worker) // check Alertmanager - assert.Len(topo.Alertmanager, 1) - aler := topo.Alertmanager[0] - expectedAlter := spec.AlertManagerSpec{ + assert.Len(topo.Alertmanagers, 1) + aler := topo.Alertmanagers[0] + expectedAlter := spec.AlertmanagerSpec{ Host: "172.19.0.101", SSHPort: 22, WebPort: 9093, @@ -201,8 +201,8 @@ func TestImportFromAnsible(t *testing.T) { assert.Equal(expectedAlter, aler) // Check Grafana - assert.Len(topo.Grafana, 1) - grafana := topo.Grafana[0] + assert.Len(topo.Grafanas, 1) + grafana := topo.Grafanas[0] expectedGrafana := spec.GrafanaSpec{ Host: "172.19.0.101", SSHPort: 22, diff --git a/components/dm/command/scale_in.go b/components/dm/command/scale_in.go index c1e772cbaf..cf66c65845 100644 --- a/components/dm/command/scale_in.go +++ b/components/dm/command/scale_in.go @@ -75,7 +75,7 @@ func newScaleInCmd() *cobra.Command { // ScaleInDMCluster scale in dm cluster. func ScaleInDMCluster( getter operator.ExecutorGetter, - topo *dm.Topology, + topo *dm.Specification, options operator.Options, ) error { // instances by uuid diff --git a/components/dm/spec/alertmanager.go b/components/dm/spec/alertmanager.go deleted file mode 100644 index 5fb124d764..0000000000 --- a/components/dm/spec/alertmanager.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/pingcap/errors" - "github.com/pingcap/tiup/pkg/cluster" - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/cluster/spec" - cspec "github.com/pingcap/tiup/pkg/cluster/spec" - "github.com/pingcap/tiup/pkg/cluster/task" - "github.com/pingcap/tiup/pkg/cluster/template/config" - "github.com/pingcap/tiup/pkg/cluster/template/scripts" - "github.com/pingcap/tiup/pkg/meta" -) - -// AlertManagerComponent represents Alertmanager component. -type AlertManagerComponent struct{ *Topology } - -// Name implements Component interface. -func (c *AlertManagerComponent) Name() string { - return ComponentAlertManager -} - -// Role implements Component interface. -func (c *AlertManagerComponent) Role() string { - return cspec.RoleMonitor -} - -// Instances implements Component interface. -func (c *AlertManagerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Alertmanager)) - for _, s := range c.Alertmanager { - ins = append(ins, &AlertManagerInstance{ - BaseInstance: cspec.BaseInstance{ - InstanceSpec: s, - Name: c.Name(), - Host: s.Host, - Port: s.WebPort, - SSHP: s.SSHPort, - - Ports: []int{ - s.WebPort, - s.ClusterPort, - }, - Dirs: []string{ - s.DeployDir, - s.DataDir, - }, - StatusFn: func(_ *tls.Config, _ ...string) string { - return "-" - }, - }, - topo: c.Topology, - }) - } - return ins -} - -// AlertManagerInstance represent the alert manager instance -type AlertManagerInstance struct { - cspec.BaseInstance - topo *Topology -} - -var _ cluster.DeployerInstance = &AlertManagerInstance{} - -// Deploy implements DeployerInstance interface. -func (i *AlertManagerInstance) Deploy(t *task.Builder, srcPath string, deployDir string, version string, clusterName string, clusterVersion string) { - t.CopyComponent( - i.ComponentName(), - i.OS(), - i.Arch(), - version, - srcPath, - i.GetHost(), - deployDir, - ).Func("CopyConfig", func(ctx *task.Context) error { - tempDir, err := ioutil.TempDir("", "tiup-*") - if err != nil { - return errors.AddStack(err) - } - // transfer config - e := ctx.Get(i.GetHost()) - fp := filepath.Join(tempDir, fmt.Sprintf("alertmanager_%s.yml", i.GetHost())) - if err := config.NewAlertManagerConfig().ConfigToFile(fp); err != nil { - return err - } - dst := filepath.Join(deployDir, "conf", "alertmanager.yml") - err = e.Transfer(fp, dst, false) - if err != nil { - return errors.Annotatef(err, "failed to transfer %s to %s@%s", fp, i.GetHost(), dst) - } - return nil - }) - -} - -// InitConfig implement Instance interface -func (i *AlertManagerInstance) InitConfig( - e executor.Executor, - clusterName, - clusterVersion, - deployUser string, - paths meta.DirPaths, -) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { - return err - } - - enableTLS := i.topo.GlobalOptions.TLSEnabled - - // Transfer start script - spec := i.InstanceSpec.(AlertManagerSpec) - cfg := scripts.NewAlertManagerScript(spec.Host, paths.Deploy, paths.Data[0], paths.Log, enableTLS). - WithWebPort(spec.WebPort).WithClusterPort(spec.ClusterPort).WithNumaNode(spec.NumaNode). - AppendEndpoints(cspec.AlertManagerEndpoints(i.topo.Alertmanager, deployUser, enableTLS)) - - fp := filepath.Join(paths.Cache, fmt.Sprintf("run_alertmanager_%s_%d.sh", i.GetHost(), i.GetPort())) - if err := cfg.ConfigToFile(fp); err != nil { - return err - } - - dst := filepath.Join(paths.Deploy, "scripts", "run_alertmanager.sh") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - if _, _, err := e.Execute("chmod +x "+dst, false); err != nil { - return err - } - - // If the user specific a local config file, we should overwrite the default one with it - if spec.ConfigFilePath != "" { - name := filepath.Base(spec.ConfigFilePath) - dst := filepath.Join(paths.Deploy, "conf", name) - if err := i.TransferLocalConfigFile(e, spec.ConfigFilePath, dst); err != nil { - return errors.Annotate(err, "transfer alertmanager config failed") - } - } - - return nil -} - -// ScaleConfig deploy temporary config on scaling -func (i *AlertManagerInstance) ScaleConfig( - e executor.Executor, - topo spec.Topology, - clusterName string, - clusterVersion string, - deployUser string, - paths meta.DirPaths, -) error { - s := i.topo - defer func() { i.topo = s }() - i.topo = topo.(*Topology) - return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) -} diff --git a/components/dm/spec/bindversion.go b/components/dm/spec/bindversion.go index 757954b81a..7bcee361c2 100644 --- a/components/dm/spec/bindversion.go +++ b/components/dm/spec/bindversion.go @@ -5,7 +5,7 @@ import "github.com/pingcap/tiup/pkg/cluster/spec" // DMComponentVersion maps the dm version to the third components binding version func DMComponentVersion(comp, version string) string { switch comp { - case spec.ComponentAlertManager: + case spec.ComponentAlertmanager: return "v0.17.0" case spec.ComponentGrafana, spec.ComponentPrometheus: return "v4.0.3" diff --git a/components/dm/spec/cluster.go b/components/dm/spec/cluster.go index f3bb49da1d..5b62ad2f66 100644 --- a/components/dm/spec/cluster.go +++ b/components/dm/spec/cluster.go @@ -29,7 +29,7 @@ type Metadata struct { Version string `yaml:"dm_version"` // the version of TiDB cluster //EnableFirewall bool `yaml:"firewall"` - Topology *Topology `yaml:"topology"` + Topology *Specification `yaml:"topology"` } var _ cspec.UpgradableMetadata = &Metadata{} @@ -51,7 +51,7 @@ func (m *Metadata) GetTopology() cspec.Topology { // SetTopology implements Metadata interface. func (m *Metadata) SetTopology(topo cspec.Topology) { - dmTopo, ok := topo.(*Topology) + dmTopo, ok := topo.(*Specification) if !ok { panic(fmt.Sprintln("wrong type: ", reflect.TypeOf(topo))) } @@ -72,7 +72,7 @@ func GetSpecManager() *cspec.SpecManager { if specManager == nil { specManager = cspec.NewSpec(filepath.Join(cspec.ProfileDir(), cspec.TiOpsClusterDir), func() cspec.Metadata { return &Metadata{ - Topology: new(Topology), + Topology: new(Specification), } }) } diff --git a/components/dm/spec/grafana.go b/components/dm/spec/grafana.go deleted file mode 100644 index 6c83c37227..0000000000 --- a/components/dm/spec/grafana.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "crypto/tls" - "fmt" - "path/filepath" - "strings" - - "github.com/pingcap/errors" - "github.com/pingcap/tiup/pkg/cluster" - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/cluster/spec" - "github.com/pingcap/tiup/pkg/cluster/task" - "github.com/pingcap/tiup/pkg/cluster/template/config" - "github.com/pingcap/tiup/pkg/cluster/template/scripts" - "github.com/pingcap/tiup/pkg/meta" -) - -// GrafanaComponent represents Grafana component. -type GrafanaComponent struct{ *Topology } - -// Name implements Component interface. -func (c *GrafanaComponent) Name() string { - return ComponentGrafana -} - -// Role implements Component interface. -func (c *GrafanaComponent) Role() string { - return spec.RoleMonitor -} - -// Instances implements Component interface. -func (c *GrafanaComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Grafana)) - for _, s := range c.Grafana { - ins = append(ins, &GrafanaInstance{ - BaseInstance: spec.BaseInstance{ - InstanceSpec: s, - Name: c.Name(), - Host: s.Host, - Port: s.Port, - SSHP: s.SSHPort, - - Ports: []int{ - s.Port, - }, - Dirs: []string{ - s.DeployDir, - }, - StatusFn: func(_ *tls.Config, _ ...string) string { - return "-" - }, - }, - topo: c.Topology, - }) - } - return ins -} - -// GrafanaInstance represent the grafana instance -type GrafanaInstance struct { - spec.BaseInstance - topo *Topology -} - -// InitConfig implement Instance interface -func (i *GrafanaInstance) InitConfig( - e executor.Executor, - clusterName, - clusterVersion, - deployUser string, - paths meta.DirPaths, -) error { - if len(i.topo.Monitors) == 0 { - return errors.New("no prometheus found in topology") - } - - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { - return err - } - - // transfer run script - tpl := filepath.Join("/templates", "scripts", "dm", "run_grafana.sh.tpl") - cfg := scripts.NewGrafanaScript(clusterName, paths.Deploy).WithTPLFile(tpl) - fp := filepath.Join(paths.Cache, fmt.Sprintf("run_grafana_%s_%d.sh", i.GetHost(), i.GetPort())) - if err := cfg.ConfigToFile(fp); err != nil { - return err - } - - dst := filepath.Join(paths.Deploy, "scripts", "run_grafana.sh") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - if _, _, err := e.Execute("chmod +x "+dst, false); err != nil { - return err - } - - // transfer config - fp = filepath.Join(paths.Cache, fmt.Sprintf("grafana_%s.ini", i.GetHost())) - if err := config.NewGrafanaConfig(i.GetHost(), paths.Deploy).WithPort(uint64(i.GetPort())).ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(paths.Deploy, "conf", "grafana.ini") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - if err := i.initDashboards(e, i.InstanceSpec.(GrafanaSpec), paths); err != nil { - return errors.Annotate(err, "initial dashboards") - } - - var dirs []string - - // provisioningDir Must same as in grafana.ini.tpl - provisioningDir := filepath.Join(paths.Deploy, "provisioning") - dirs = append(dirs, provisioningDir) - - datasourceDir := filepath.Join(provisioningDir, "datasources") - dirs = append(dirs, datasourceDir) - - dashboardDir := filepath.Join(provisioningDir, "dashboards") - dirs = append(dirs, dashboardDir) - - cmd := fmt.Sprintf("mkdir -p %s", strings.Join(dirs, " ")) - if _, _, err := e.Execute(cmd, false); err != nil { - return errors.AddStack(err) - } - - // transfer dashboard.yml - fp = filepath.Join(paths.Cache, fmt.Sprintf("dashboard_%s.yml", i.GetHost())) - if err := config.NewDashboardConfig(clusterName, paths.Deploy).ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(dashboardDir, "dashboard.yml") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - // transfer datasource.yml - fp = filepath.Join(paths.Cache, fmt.Sprintf("datasource_%s.yml", i.GetHost())) - if err := config.NewDatasourceConfig(clusterName, i.topo.Monitors[0].Host). - WithPort(uint64(i.topo.Monitors[0].Port)). - ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(datasourceDir, "datasource.yml") - return e.Transfer(fp, dst, false) -} - -func (i *GrafanaInstance) initDashboards(e executor.Executor, spec GrafanaSpec, paths meta.DirPaths) error { - dashboardsDir := filepath.Join(paths.Deploy, "dashboards") - // To make this step idempotent, we need cleanup old dashboards first - if _, _, err := e.Execute(fmt.Sprintf("rm -f %s/*.json", dashboardsDir), false); err != nil { - return err - } - - if spec.DashboardDir != "" { - return i.TransferLocalConfigDir(e, spec.DashboardDir, dashboardsDir, func(name string) bool { - return strings.HasSuffix(name, ".json") - }) - } - - // Use the default ones - cmd := fmt.Sprintf("cp %[1]s/bin/*.json %[1]s/dashboards/", paths.Deploy) - if _, _, err := e.Execute(cmd, false); err != nil { - return err - } - return nil -} - -// ScaleConfig deploy temporary config on scaling -func (i *GrafanaInstance) ScaleConfig( - e executor.Executor, - topo spec.Topology, - clusterName string, - clusterVersion string, - deployUser string, - paths meta.DirPaths, -) error { - s := i.topo - defer func() { i.topo = s }() - dmtopo := topo.(*Topology) - i.topo = dmtopo.Merge(i.topo) - return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) -} - -var _ cluster.DeployerInstance = &GrafanaInstance{} - -// Deploy implements DeployerInstance interface. -func (i *GrafanaInstance) Deploy(t *task.Builder, srcPath string, deployDir string, version string, clusterName string, clusterVersion string) { - t.CopyComponent( - i.ComponentName(), - i.OS(), - i.Arch(), - version, - srcPath, - i.GetHost(), - deployDir, - ).Shell( // rm the json file which relate to tidb cluster and useless. - i.GetHost(), - fmt.Sprintf("rm %s/*.json", filepath.Join(deployDir, "bin")), - false, /*sudo*/ - ).Func("Dashboards", func(ctx *task.Context) error { - e := ctx.Get(i.GetHost()) - - return i.installDashboards(e, deployDir, clusterName, clusterVersion) - }) -} - -func (i *GrafanaInstance) installDashboards(e executor.Executor, deployDir, clusterName, clusterVersion string) error { - tmp := filepath.Join(deployDir, "_tiup_tmp") - _, stderr, err := e.Execute(fmt.Sprintf("mkdir -p %s", tmp), false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - srcPath := task.PackagePath(ComponentDMMaster, clusterVersion, i.OS(), i.Arch()) - dstPath := filepath.Join(tmp, filepath.Base(srcPath)) - err = e.Transfer(srcPath, dstPath, false) - if err != nil { - return errors.AddStack(err) - } - - cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, tmp, dstPath) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // copy dm-master/scripts/*.json - targetDir := filepath.Join(deployDir, "dashboards") - _, stderr, err = e.Execute(fmt.Sprintf("mkdir -p %s", targetDir), false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - cmd = fmt.Sprintf("cp %s/dm-master/scripts/*.json %s", tmp, targetDir) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - for _, cmd := range []string{ - `find %s -type f -exec sed -i "s/\${DS_.*-CLUSTER}/%s/g" {} \;`, - `find %s -type f -exec sed -i "s/DS_.*-CLUSTER/%s/g" {} \;`, - `find %s -type f -exec sed -i "s/test-cluster/%s/g" {} \;`, - `find %s -type f -exec sed -i "s/Test-Cluster/%s/g" {} \;`, - } { - cmd := fmt.Sprintf(cmd, targetDir, clusterName) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - } - - cmd = fmt.Sprintf("rm -rf %s", tmp) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // backup *.json for later reload (in case that the user change dashboard_dir) - cmd = fmt.Sprintf("cp %s/*.json %s", targetDir, filepath.Join(deployDir, "bin")) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - return nil -} diff --git a/components/dm/spec/grafana_test.go b/components/dm/spec/grafana_test.go deleted file mode 100644 index bb7efe67f7..0000000000 --- a/components/dm/spec/grafana_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "io/ioutil" - "os" - "os/user" - "path" - "path/filepath" - "testing" - - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/meta" - "github.com/stretchr/testify/assert" -) - -func TestLocalDashboards(t *testing.T) { - deployDir, err := ioutil.TempDir("", "tiup-*") - assert.Nil(t, err) - defer os.RemoveAll(deployDir) - localDir, err := filepath.Abs("./testdata/dashboards") - assert.Nil(t, err) - - topo := new(Topology) - topo.Grafana = append(topo.Grafana, GrafanaSpec{ - Host: "127.0.0.1", - Port: 3000, - DashboardDir: localDir, - }) - - comp := GrafanaComponent{topo} - ints := comp.Instances() - - assert.Equal(t, len(ints), 1) - grafanaInstance := ints[0].(*GrafanaInstance) - - user, err := user.Current() - assert.Nil(t, err) - e, err := executor.New(executor.SSHTypeNone, false, executor.SSHConfig{Host: "127.0.0.1", User: user.Username}) - assert.Nil(t, err) - err = grafanaInstance.initDashboards(e, topo.Grafana[0], meta.DirPaths{Deploy: deployDir}) - assert.Nil(t, err) - - assert.FileExists(t, path.Join(deployDir, "dashboards", "tidb.json")) - fs, err := ioutil.ReadDir(localDir) - assert.Nil(t, err) - for _, f := range fs { - assert.FileExists(t, path.Join(deployDir, "dashboards", f.Name())) - } -} diff --git a/components/dm/spec/logic.go b/components/dm/spec/logic.go index cc564dca3f..161321980a 100644 --- a/components/dm/spec/logic.go +++ b/components/dm/spec/logic.go @@ -32,7 +32,7 @@ const ( ComponentDMWorker = "dm-worker" ComponentPrometheus = spec.ComponentPrometheus ComponentGrafana = spec.ComponentGrafana - ComponentAlertManager = spec.ComponentAlertManager + ComponentAlertmanager = spec.ComponentAlertmanager ) type ( @@ -52,7 +52,7 @@ type Component = spec.Component type Instance = spec.Instance // DMMasterComponent represents TiDB component. -type DMMasterComponent struct{ *Topology } +type DMMasterComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DMMasterComponent) Name() string { @@ -67,7 +67,7 @@ func (c *DMMasterComponent) Role() string { // Instances implements Component interface. func (c *DMMasterComponent) Instances() []Instance { ins := make([]Instance, 0) - for _, s := range c.Masters { + for _, s := range c.Topology.Masters { s := s ins = append(ins, &MasterInstance{ Name: s.Name, @@ -98,7 +98,7 @@ func (c *DMMasterComponent) Instances() []Instance { type MasterInstance struct { Name string spec.BaseInstance - topo *Topology + topo *Specification } // InitConfig implement Instance interface @@ -151,7 +151,7 @@ func (i *MasterInstance) ScaleConfig( return err } - c := topo.(*Topology) + c := topo.(*Specification) spec := i.InstanceSpec.(MasterSpec) cfg := scripts.NewDMMasterScaleScript( spec.Name, @@ -179,9 +179,7 @@ func (i *MasterInstance) ScaleConfig( } // DMWorkerComponent represents DM worker component. -type DMWorkerComponent struct { - *Topology -} +type DMWorkerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DMWorkerComponent) Name() string { @@ -196,7 +194,7 @@ func (c *DMWorkerComponent) Role() string { // Instances implements Component interface. func (c *DMWorkerComponent) Instances() []Instance { ins := make([]Instance, 0) - for _, s := range c.Workers { + for _, s := range c.Topology.Workers { s := s ins = append(ins, &WorkerInstance{ Name: s.Name, @@ -227,7 +225,7 @@ func (c *DMWorkerComponent) Instances() []Instance { type WorkerInstance struct { Name string spec.BaseInstance - topo *Topology + topo *Specification } // InitConfig implement Instance interface @@ -280,22 +278,22 @@ func (i *WorkerInstance) ScaleConfig( defer func() { i.topo = s }() - i.topo = topo.(*Topology) + i.topo = topo.(*Specification) return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) } // GetGlobalOptions returns cluster topology -func (topo *Topology) GetGlobalOptions() spec.GlobalOptions { +func (topo *Specification) GetGlobalOptions() spec.GlobalOptions { return topo.GlobalOptions } // GetMonitoredOptions returns MonitoredOptions -func (topo *Topology) GetMonitoredOptions() *spec.MonitoredOptions { +func (topo *Specification) GetMonitoredOptions() *spec.MonitoredOptions { return nil } // ComponentsByStopOrder return component in the order need to stop. -func (topo *Topology) ComponentsByStopOrder() (comps []Component) { +func (topo *Specification) ComponentsByStopOrder() (comps []Component) { comps = topo.ComponentsByStartOrder() // revert order i := 0 @@ -309,36 +307,36 @@ func (topo *Topology) ComponentsByStopOrder() (comps []Component) { } // ComponentsByStartOrder return component in the order need to start. -func (topo *Topology) ComponentsByStartOrder() (comps []Component) { +func (topo *Specification) ComponentsByStartOrder() (comps []Component) { // "dm-master", "dm-worker" comps = append(comps, &DMMasterComponent{topo}) comps = append(comps, &DMWorkerComponent{topo}) - comps = append(comps, &MonitorComponent{topo}) - comps = append(comps, &GrafanaComponent{topo}) - comps = append(comps, &AlertManagerComponent{topo}) + comps = append(comps, &spec.MonitorComponent{Topology: topo}) + comps = append(comps, &spec.GrafanaComponent{Topology: topo}) + comps = append(comps, &spec.AlertManagerComponent{Topology: topo}) return } // ComponentsByUpdateOrder return component in the order need to be updated. -func (topo *Topology) ComponentsByUpdateOrder() (comps []Component) { +func (topo *Specification) ComponentsByUpdateOrder() (comps []Component) { // "dm-master", "dm-worker" comps = append(comps, &DMMasterComponent{topo}) comps = append(comps, &DMWorkerComponent{topo}) - comps = append(comps, &MonitorComponent{topo}) - comps = append(comps, &GrafanaComponent{topo}) - comps = append(comps, &AlertManagerComponent{topo}) + comps = append(comps, &spec.MonitorComponent{Topology: topo}) + comps = append(comps, &spec.GrafanaComponent{Topology: topo}) + comps = append(comps, &spec.AlertManagerComponent{Topology: topo}) return } // IterComponent iterates all components in component starting order -func (topo *Topology) IterComponent(fn func(comp Component)) { +func (topo *Specification) IterComponent(fn func(comp Component)) { for _, comp := range topo.ComponentsByStartOrder() { fn(comp) } } // IterInstance iterates all instances in component starting order -func (topo *Topology) IterInstance(fn func(instance Instance)) { +func (topo *Specification) IterInstance(fn func(instance Instance)) { for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { fn(inst) @@ -347,7 +345,7 @@ func (topo *Topology) IterInstance(fn func(instance Instance)) { } // IterHost iterates one instance for each host -func (topo *Topology) IterHost(fn func(instance Instance)) { +func (topo *Specification) IterHost(fn func(instance Instance)) { hostMap := make(map[string]bool) for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { @@ -362,7 +360,7 @@ func (topo *Topology) IterHost(fn func(instance Instance)) { } // Endpoints returns the PD endpoints configurations -func (topo *Topology) Endpoints(user string) []*scripts.DMMasterScript { +func (topo *Specification) Endpoints(user string) []*scripts.DMMasterScript { var ends []*scripts.DMMasterScript for _, s := range topo.Masters { deployDir := spec.Abs(user, s.DeployDir) diff --git a/components/dm/spec/prometheus.go b/components/dm/spec/prometheus.go deleted file mode 100644 index cc5e1c08fd..0000000000 --- a/components/dm/spec/prometheus.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "crypto/tls" - "fmt" - "path/filepath" - "strings" - - "github.com/pingcap/errors" - "github.com/pingcap/tiup/pkg/cluster" - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/cluster/spec" - "github.com/pingcap/tiup/pkg/cluster/task" - "github.com/pingcap/tiup/pkg/cluster/template/config/dm" - "github.com/pingcap/tiup/pkg/cluster/template/scripts" - "github.com/pingcap/tiup/pkg/meta" -) - -// MonitorComponent represents Monitor component. -type MonitorComponent struct{ *Topology } - -// Name implements Component interface. -func (c *MonitorComponent) Name() string { - return ComponentPrometheus -} - -// Role implements Component interface. -func (c *MonitorComponent) Role() string { - return spec.RoleMonitor -} - -// Instances implements Component interface. -func (c *MonitorComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Monitors)) - for _, s := range c.Monitors { - ins = append(ins, &MonitorInstance{spec.BaseInstance{ - InstanceSpec: s, - Name: c.Name(), - Host: s.Host, - Port: s.Port, - SSHP: s.SSHPort, - - Ports: []int{ - s.Port, - }, - Dirs: []string{ - s.DeployDir, - s.DataDir, - }, - StatusFn: func(_ *tls.Config, _ ...string) string { - return "-" - }, - }, c.Topology}) - } - return ins -} - -// MonitorInstance represent the monitor instance -type MonitorInstance struct { - spec.BaseInstance - topo *Topology -} - -// InitConfig implement Instance interface -func (i *MonitorInstance) InitConfig( - e executor.Executor, - clusterName, - clusterVersion, - deployUser string, - paths meta.DirPaths, -) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { - return err - } - - enableTLS := i.topo.GlobalOptions.TLSEnabled - - // transfer run script - spec := i.InstanceSpec.(PrometheusSpec) - cfg := scripts.NewPrometheusScript( - i.GetHost(), - paths.Deploy, - paths.Data[0], - paths.Log, - ).WithPort(spec.Port). - WithNumaNode(spec.NumaNode). - WithTPLFile(filepath.Join("/templates", "scripts", "dm", "run_prometheus.sh.tpl")) - - fp := filepath.Join(paths.Cache, fmt.Sprintf("run_prometheus_%s_%d.sh", i.GetHost(), i.GetPort())) - if err := cfg.ConfigToFile(fp); err != nil { - return err - } - - dst := filepath.Join(paths.Deploy, "scripts", "run_prometheus.sh") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - if _, _, err := e.Execute("chmod +x "+dst, false); err != nil { - return err - } - - topo := i.topo - - // transfer config - fp = filepath.Join(paths.Cache, fmt.Sprintf("prometheus_%s_%d.yml", i.GetHost(), i.GetPort())) - cfig := dm.NewPrometheusConfig(clusterName, enableTLS) - - for _, master := range topo.Masters { - cfig.AddMasterAddrs(master.Host, uint64(master.Port)) - } - - for _, worker := range topo.Workers { - cfig.AddWorkerAddrs(worker.Host, uint64(worker.Port)) - } - - for _, alertmanager := range topo.Alertmanager { - cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) - } - - if err := i.initRules(e, spec, paths); err != nil { - return errors.AddStack(err) - } - - if err := cfig.ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(paths.Deploy, "conf", "prometheus.yml") - return e.Transfer(fp, dst, false) -} - -func (i *MonitorInstance) initRules(e executor.Executor, spec PrometheusSpec, paths meta.DirPaths) error { - confDir := filepath.Join(paths.Deploy, "conf") - // To make this step idempotent, we need cleanup old rules first - if _, _, err := e.Execute(fmt.Sprintf("rm -f %s/*.rules.yml", confDir), false); err != nil { - return err - } - - // If the user specify a rule directory, we should use the rules specified - if spec.RuleDir != "" { - return i.TransferLocalConfigDir(e, spec.RuleDir, confDir, func(name string) bool { - return strings.HasSuffix(name, ".rules.yml") - }) - } - - // Use the default ones - cmd := fmt.Sprintf("cp %[1]s/bin/prometheus/*.rules.yml %[1]s/conf/", paths.Deploy) - if _, _, err := e.Execute(cmd, false); err != nil { - return err - } - return nil -} - -// ScaleConfig deploy temporary config on scaling -func (i *MonitorInstance) ScaleConfig( - e executor.Executor, - topo spec.Topology, - clusterName string, - clusterVersion string, - deployUser string, - paths meta.DirPaths, -) error { - s := i.topo - defer func() { i.topo = s }() - i.topo = topo.(*Topology) - return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) -} - -var _ cluster.DeployerInstance = &MonitorInstance{} - -// Deploy implements DeployerInstance interface. -func (i *MonitorInstance) Deploy(t *task.Builder, srcPath string, deployDir string, version string, _ string, clusterVersion string) { - t.CopyComponent( - i.ComponentName(), - i.OS(), - i.Arch(), - version, - srcPath, - i.GetHost(), - deployDir, - ).Shell( // rm the rules file which relate to tidb cluster and useless. - i.GetHost(), - fmt.Sprintf("rm %s/*.rules.yml", filepath.Join(deployDir, "bin", "prometheus")), - false, /*sudo*/ - ).Func("CopyRulesYML", func(ctx *task.Context) error { - e := ctx.Get(i.GetHost()) - - return i.installRules(e, deployDir, clusterVersion) - }) -} - -func (i *MonitorInstance) installRules(e executor.Executor, deployDir, clusterVersion string) error { - tmp := filepath.Join(deployDir, "_tiup_tmp") - _, stderr, err := e.Execute(fmt.Sprintf("mkdir -p %s", tmp), false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - srcPath := task.PackagePath(ComponentDMMaster, clusterVersion, i.OS(), i.Arch()) - dstPath := filepath.Join(tmp, filepath.Base(srcPath)) - - err = e.Transfer(srcPath, dstPath, false) - if err != nil { - return errors.AddStack(err) - } - - cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, tmp, dstPath) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // copy dm-master/conf/*.rules.yml - targetDir := filepath.Join(deployDir, "conf") - cmd = fmt.Sprintf("cp %s/dm-master/conf/*.rules.yml %s", tmp, targetDir) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - cmd = fmt.Sprintf("rm -rf %s", tmp) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // backup *.rules.yml for later reload (in case that the user change rule_dir) - cmd = fmt.Sprintf("cp %s/*.rules.yml %s", targetDir, filepath.Join(deployDir, "bin", "prometheus")) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - return nil -} diff --git a/components/dm/spec/testdata/dashboards/tidb.json b/components/dm/spec/testdata/dashboards/tidb.json deleted file mode 100644 index 7b9f3dc3f7..0000000000 --- a/components/dm/spec/testdata/dashboards/tidb.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "desc": "this is a dummy test file" -} \ No newline at end of file diff --git a/components/dm/spec/topology_dm.go b/components/dm/spec/topology_dm.go index d0c92312bf..0f0f7eb5cc 100644 --- a/components/dm/spec/topology_dm.go +++ b/components/dm/spec/topology_dm.go @@ -70,8 +70,8 @@ type ( PrometheusSpec = spec.PrometheusSpec // GrafanaSpec is the spec of Grafana GrafanaSpec = spec.GrafanaSpec - // AlertManagerSpec is the spec of Alertmanager - AlertManagerSpec = spec.AlertManagerSpec + // AlertmanagerSpec is the spec of Alertmanager + AlertmanagerSpec = spec.AlertmanagerSpec // ResourceControl is the spec of ResourceControl ResourceControl = meta.ResourceControl ) @@ -83,23 +83,23 @@ type ( Worker map[string]interface{} `yaml:"worker"` } - // Topology represents the specification of topology.yaml - Topology struct { + // Specification represents the specification of topology.yaml + Specification struct { GlobalOptions GlobalOptions `yaml:"global,omitempty" validate:"global:editable"` // MonitoredOptions MonitoredOptions `yaml:"monitored,omitempty" validate:"monitored:editable"` - ServerConfigs DMServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` - Masters []MasterSpec `yaml:"master_servers"` - Workers []WorkerSpec `yaml:"worker_servers"` - Monitors []PrometheusSpec `yaml:"monitoring_servers"` - Grafana []GrafanaSpec `yaml:"grafana_servers,omitempty"` - Alertmanager []AlertManagerSpec `yaml:"alertmanager_servers,omitempty"` + ServerConfigs DMServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` + Masters []MasterSpec `yaml:"master_servers"` + Workers []WorkerSpec `yaml:"worker_servers"` + Monitors []spec.PrometheusSpec `yaml:"monitoring_servers"` + Grafanas []spec.GrafanaSpec `yaml:"grafana_servers,omitempty"` + Alertmanagers []spec.AlertmanagerSpec `yaml:"alertmanager_servers,omitempty"` } ) // AllDMComponentNames contains the names of all dm components. // should include all components in ComponentsByStartOrder func AllDMComponentNames() (roles []string) { - tp := &Topology{} + tp := &Specification{} tp.IterComponent(func(c Component) { roles = append(roles, c.Name()) }) @@ -225,8 +225,8 @@ func (s WorkerSpec) IsImported() bool { } // UnmarshalYAML sets default values when unmarshaling the topology file -func (topo *Topology) UnmarshalYAML(unmarshal func(interface{}) error) error { - type topology Topology +func (topo *Specification) UnmarshalYAML(unmarshal func(interface{}) error) error { + type topology Specification if err := unmarshal((*topology)(topo)); err != nil { return err } @@ -244,7 +244,7 @@ func (topo *Topology) UnmarshalYAML(unmarshal func(interface{}) error) error { // platformConflictsDetect checks for conflicts in topology for different OS / Arch // for set to the same host / IP -func (topo *Topology) platformConflictsDetect() error { +func (topo *Specification) platformConflictsDetect() error { type ( conflict struct { os string @@ -305,7 +305,7 @@ func (topo *Topology) platformConflictsDetect() error { return nil } -func (topo *Topology) portConflictsDetect() error { +func (topo *Specification) portConflictsDetect() error { type ( usedPort struct { host string @@ -383,7 +383,7 @@ func (topo *Topology) portConflictsDetect() error { return nil } -func (topo *Topology) dirConflictsDetect() error { +func (topo *Specification) dirConflictsDetect() error { type ( usedDir struct { host string @@ -467,7 +467,7 @@ func (topo *Topology) dirConflictsDetect() error { // CountDir counts for dir paths used by any instance in the cluster with the same // prefix, useful to find potential path conflicts -func (topo *Topology) CountDir(targetHost, dirPrefix string) int { +func (topo *Specification) CountDir(targetHost, dirPrefix string) int { dirTypes := []string{ "DataDir", "DeployDir", @@ -532,7 +532,7 @@ func (topo *Topology) CountDir(targetHost, dirPrefix string) int { } // TLSConfig generates a tls.Config for the specification as needed -func (topo *Topology) TLSConfig(dir string) (*tls.Config, error) { +func (topo *Specification) TLSConfig(dir string) (*tls.Config, error) { if !topo.GlobalOptions.TLSEnabled { return nil, nil } @@ -541,7 +541,7 @@ func (topo *Topology) TLSConfig(dir string) (*tls.Config, error) { // Validate validates the topology specification and produce error if the // specification invalid (e.g: port conflicts or directory conflicts) -func (topo *Topology) Validate() error { +func (topo *Specification) Validate() error { if err := topo.platformConflictsDetect(); err != nil { return err } @@ -558,25 +558,28 @@ func (topo *Topology) Validate() error { } // BaseTopo implements Topology interface. -func (topo *Topology) BaseTopo() *spec.BaseTopo { +func (topo *Specification) BaseTopo() *spec.BaseTopo { return &spec.BaseTopo{ GlobalOptions: &topo.GlobalOptions, MonitoredOptions: topo.GetMonitoredOptions(), MasterList: topo.GetMasterList(), + Monitors: topo.Monitors, + Grafanas: topo.Grafanas, + Alertmanagers: topo.Alertmanagers, } } // NewPart implements ScaleOutTopology interface. -func (topo *Topology) NewPart() spec.Topology { - return &Topology{ +func (topo *Specification) NewPart() spec.Topology { + return &Specification{ GlobalOptions: topo.GlobalOptions, ServerConfigs: topo.ServerConfigs, } } // MergeTopo implements ScaleOutTopology interface. -func (topo *Topology) MergeTopo(rhs spec.Topology) spec.Topology { - other, ok := rhs.(*Topology) +func (topo *Specification) MergeTopo(rhs spec.Topology) spec.Topology { + other, ok := rhs.(*Specification) if !ok { panic("topo should be DM Topology") } @@ -585,7 +588,7 @@ func (topo *Topology) MergeTopo(rhs spec.Topology) spec.Topology { } // GetMasterList returns a list of Master API hosts of the current cluster -func (topo *Topology) GetMasterList() []string { +func (topo *Specification) GetMasterList() []string { var masterList []string for _, master := range topo.Masters { @@ -596,16 +599,17 @@ func (topo *Topology) GetMasterList() []string { } // Merge returns a new Topology which sum old ones -func (topo *Topology) Merge(that *Topology) *Topology { - return &Topology{ +func (topo *Specification) Merge(that spec.Topology) spec.Topology { + spec := that.(*Specification) + return &Specification{ GlobalOptions: topo.GlobalOptions, // MonitoredOptions: topo.MonitoredOptions, ServerConfigs: topo.ServerConfigs, - Masters: append(topo.Masters, that.Masters...), - Workers: append(topo.Workers, that.Workers...), - Monitors: append(topo.Monitors, that.Monitors...), - Grafana: append(topo.Grafana, that.Grafana...), - Alertmanager: append(topo.Alertmanager, that.Alertmanager...), + Masters: append(topo.Masters, spec.Masters...), + Workers: append(topo.Workers, spec.Workers...), + Monitors: append(topo.Monitors, spec.Monitors...), + Grafanas: append(topo.Grafanas, spec.Grafanas...), + Alertmanagers: append(topo.Alertmanagers, spec.Alertmanagers...), } } diff --git a/components/dm/spec/topology_dm_test.go b/components/dm/spec/topology_dm_test.go index fbfa04c528..bb92e63d98 100644 --- a/components/dm/spec/topology_dm_test.go +++ b/components/dm/spec/topology_dm_test.go @@ -32,14 +32,14 @@ var _ = Suite(&metaSuiteDM{}) func TestDefaultDataDir(t *testing.T) { // Test with without global DataDir. - topo := new(Topology) + topo := new(Specification) topo.Masters = append(topo.Masters, MasterSpec{Host: "1.1.1.1", Port: 1111}) topo.Workers = append(topo.Workers, WorkerSpec{Host: "1.1.2.1", Port: 2221}) data, err := yaml.Marshal(topo) assert.Nil(t, err) // Check default value. - topo = new(Topology) + topo = new(Specification) err = yaml.Unmarshal(data, topo) assert.Nil(t, err) assert.Equal(t, "data", topo.GlobalOptions.DataDir) @@ -49,7 +49,7 @@ func TestDefaultDataDir(t *testing.T) { // Can keep the default value. data, err = yaml.Marshal(topo) assert.Nil(t, err) - topo = new(Topology) + topo = new(Specification) err = yaml.Unmarshal(data, topo) assert.Nil(t, err) assert.Equal(t, "data", topo.GlobalOptions.DataDir) @@ -57,7 +57,7 @@ func TestDefaultDataDir(t *testing.T) { assert.Equal(t, "data", topo.Workers[0].DataDir) // Test with global DataDir. - topo = new(Topology) + topo = new(Specification) topo.GlobalOptions.DataDir = "/gloable_data" topo.Masters = append(topo.Masters, MasterSpec{Host: "1.1.1.1", Port: 1111}) topo.Masters = append(topo.Masters, MasterSpec{Host: "1.1.1.2", Port: 1112, DataDir: "/my_data"}) @@ -66,7 +66,7 @@ func TestDefaultDataDir(t *testing.T) { data, err = yaml.Marshal(topo) assert.Nil(t, err) - topo = new(Topology) + topo = new(Specification) err = yaml.Unmarshal(data, topo) assert.Nil(t, err) @@ -78,7 +78,7 @@ func TestDefaultDataDir(t *testing.T) { } func TestGlobalOptions(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" @@ -105,7 +105,7 @@ worker_servers: } func TestDirectoryConflicts(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" @@ -139,7 +139,7 @@ worker_servers: } func TestPortConflicts(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" @@ -159,7 +159,7 @@ worker_servers: func TestPlatformConflicts(t *testing.T) { // aarch64 and arm64 are equal - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: os: "linux" @@ -173,7 +173,7 @@ worker_servers: assert.Nil(t, err) // different arch defined for the same host - topo = Topology{} + topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" @@ -187,7 +187,7 @@ worker_servers: assert.Equal(t, "platform mismatch for '172.16.5.138' between 'master_servers:linux/arm64' and 'worker_servers:linux/amd64'", err.Error()) // different os defined for the same host - topo = Topology{} + topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" @@ -203,7 +203,7 @@ worker_servers: } func TestCountDir(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: @@ -296,8 +296,8 @@ func with2TempFile(content1, content2 string, fn func(string, string)) { }) } -func merge4test(base, scale string) (*Topology, error) { - baseTopo := Topology{} +func merge4test(base, scale string) (*Specification, error) { + baseTopo := Specification{} if err := spec.ParseTopologyYaml(base, &baseTopo); err != nil { return nil, err } @@ -312,7 +312,7 @@ func merge4test(base, scale string) (*Topology, error) { return nil, err } - return mergedTopo.(*Topology), nil + return mergedTopo.(*Specification), nil } func TestRelativePath(t *testing.T) { @@ -323,7 +323,7 @@ master_servers: worker_servers: - host: 172.16.5.140 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -339,7 +339,7 @@ master_servers: data_dir: my-data log_dir: my-log `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -356,7 +356,7 @@ global: master_servers: - host: 172.16.5.140 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -378,7 +378,7 @@ worker_servers: - host: 172.16.5.140 port: 20161 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -407,7 +407,7 @@ worker_servers: - host: 172.16.5.140 port: 20161 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -439,7 +439,7 @@ worker_servers: - host: 172.16.5.140 port: 20161 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) diff --git a/components/dm/task/update_dm_meta.go b/components/dm/task/update_dm_meta.go index 9e57c7cc17..c2456c4d34 100644 --- a/components/dm/task/update_dm_meta.go +++ b/components/dm/task/update_dm_meta.go @@ -19,6 +19,7 @@ import ( dmspec "github.com/pingcap/tiup/components/dm/spec" + "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/set" ) @@ -44,7 +45,7 @@ func (u *UpdateDMMeta) Execute(ctx *task.Context) error { // make a copy newMeta := &dmspec.Metadata{} *newMeta = *u.metadata - newMeta.Topology = &dmspec.Topology{ + newMeta.Topology = &dmspec.Specification{ GlobalOptions: u.metadata.Topology.GlobalOptions, // MonitoredOptions: u.metadata.Topology.MonitoredOptions, ServerConfigs: u.metadata.Topology.ServerConfigs, @@ -64,23 +65,23 @@ func (u *UpdateDMMeta) Execute(ctx *task.Context) error { } newMeta.Topology.Workers = append(newMeta.Topology.Workers, topo.Workers[i]) } - for i, instance := range (&dmspec.MonitorComponent{Topology: topo}).Instances() { + for i, instance := range (&spec.MonitorComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.Monitors = append(newMeta.Topology.Monitors, topo.Monitors[i]) } - for i, instance := range (&dmspec.GrafanaComponent{Topology: topo}).Instances() { + for i, instance := range (&spec.GrafanaComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Grafana = append(newMeta.Topology.Grafana, topo.Grafana[i]) + newMeta.Topology.Grafanas = append(newMeta.Topology.Grafanas, topo.Grafanas[i]) } - for i, instance := range (&dmspec.AlertManagerComponent{Topology: topo}).Instances() { + for i, instance := range (&spec.AlertManagerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Alertmanager = append(newMeta.Topology.Alertmanager, topo.Alertmanager[i]) + newMeta.Topology.Alertmanagers = append(newMeta.Topology.Alertmanagers, topo.Alertmanagers[i]) } return dmspec.GetSpecManager().SaveMeta(u.cluster, newMeta) diff --git a/pkg/cluster/ansible/import.go b/pkg/cluster/ansible/import.go index 08d3588c84..af5005226e 100644 --- a/pkg/cluster/ansible/import.go +++ b/pkg/cluster/ansible/import.go @@ -65,8 +65,8 @@ func parseInventoryFile(invFile io.Reader) (string, *spec.ClusterMeta, *aini.Inv PumpServers: make([]spec.PumpSpec, 0), Drainers: make([]spec.DrainerSpec, 0), Monitors: make([]spec.PrometheusSpec, 0), - Grafana: make([]spec.GrafanaSpec, 0), - Alertmanager: make([]spec.AlertManagerSpec, 0), + Grafanas: make([]spec.GrafanaSpec, 0), + Alertmanagers: make([]spec.AlertmanagerSpec, 0), }, } clsName := "" diff --git a/pkg/cluster/ansible/import_test.go b/pkg/cluster/ansible/import_test.go index 600ab130de..018db032ca 100644 --- a/pkg/cluster/ansible/import_test.go +++ b/pkg/cluster/ansible/import_test.go @@ -140,10 +140,10 @@ func sortClusterMeta(clsMeta *spec.ClusterMeta) { sort.Slice(clsMeta.Topology.Monitors, func(i, j int) bool { return clsMeta.Topology.Monitors[i].Host < clsMeta.Topology.Monitors[j].Host }) - sort.Slice(clsMeta.Topology.Grafana, func(i, j int) bool { - return clsMeta.Topology.Grafana[i].Host < clsMeta.Topology.Grafana[j].Host + sort.Slice(clsMeta.Topology.Grafanas, func(i, j int) bool { + return clsMeta.Topology.Grafanas[i].Host < clsMeta.Topology.Grafanas[j].Host }) - sort.Slice(clsMeta.Topology.Alertmanager, func(i, j int) bool { - return clsMeta.Topology.Alertmanager[i].Host < clsMeta.Topology.Alertmanager[j].Host + sort.Slice(clsMeta.Topology.Alertmanagers, func(i, j int) bool { + return clsMeta.Topology.Alertmanagers[i].Host < clsMeta.Topology.Alertmanagers[j].Host }) } diff --git a/pkg/cluster/ansible/inventory.go b/pkg/cluster/ansible/inventory.go index 0f47101815..e3338b5df1 100644 --- a/pkg/cluster/ansible/inventory.go +++ b/pkg/cluster/ansible/inventory.go @@ -110,21 +110,21 @@ func ParseAndImportInventory(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, } clsMeta.Topology.Monitors[i] = ins.(spec.PrometheusSpec) } - for i := 0; i < len(clsMeta.Topology.Alertmanager); i++ { - s := clsMeta.Topology.Alertmanager[i] + for i := 0; i < len(clsMeta.Topology.Alertmanagers); i++ { + s := clsMeta.Topology.Alertmanagers[i] ins, err := parseDirs(clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } - clsMeta.Topology.Alertmanager[i] = ins.(spec.AlertManagerSpec) + clsMeta.Topology.Alertmanagers[i] = ins.(spec.AlertmanagerSpec) } - for i := 0; i < len(clsMeta.Topology.Grafana); i++ { - s := clsMeta.Topology.Grafana[i] + for i := 0; i < len(clsMeta.Topology.Grafanas); i++ { + s := clsMeta.Topology.Grafanas[i] ins, err := parseDirs(clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } - clsMeta.Topology.Grafana[i] = ins.(spec.GrafanaSpec) + clsMeta.Topology.Grafanas[i] = ins.(spec.GrafanaSpec) } // TODO: get values from templates of roles to overwrite defaults @@ -429,7 +429,7 @@ func parseGroupVars(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini if host == "" { host = srv.Name } - tmpIns := spec.AlertManagerSpec{ + tmpIns := spec.AlertmanagerSpec{ Host: host, SSHPort: getHostPort(srv, ansCfg), Imported: true, @@ -452,9 +452,9 @@ func parseGroupVars(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini log.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) - clsMeta.Topology.Alertmanager = append(clsMeta.Topology.Alertmanager, tmpIns) + clsMeta.Topology.Alertmanagers = append(clsMeta.Topology.Alertmanagers, tmpIns) } - log.Infof("Imported %d Alertmanager node(s).", len(clsMeta.Topology.Alertmanager)) + log.Infof("Imported %d Alertmanager node(s).", len(clsMeta.Topology.Alertmanagers)) } // grafana_servers @@ -485,9 +485,9 @@ func parseGroupVars(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini log.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) - clsMeta.Topology.Grafana = append(clsMeta.Topology.Grafana, tmpIns) + clsMeta.Topology.Grafanas = append(clsMeta.Topology.Grafanas, tmpIns) } - log.Infof("Imported %d Grafana node(s).", len(clsMeta.Topology.Alertmanager)) + log.Infof("Imported %d Grafana node(s).", len(clsMeta.Topology.Alertmanagers)) } // kafka_exporter_servers diff --git a/pkg/cluster/ansible/service.go b/pkg/cluster/ansible/service.go index b59bfec22c..d985b914e1 100644 --- a/pkg/cluster/ansible/service.go +++ b/pkg/cluster/ansible/service.go @@ -214,9 +214,9 @@ func parseDirs(user string, ins spec.InstanceSpec, sshTimeout uint64, sshType ex } } return newIns, nil - case spec.ComponentAlertManager: + case spec.ComponentAlertmanager: // parse dirs - newIns := ins.(spec.AlertManagerSpec) + newIns := ins.(spec.AlertmanagerSpec) for _, line := range strings.Split(string(stdout), "\n") { if strings.HasPrefix(line, "DEPLOY_DIR=") { newIns.DeployDir = strings.TrimPrefix(line, "DEPLOY_DIR=") diff --git a/pkg/cluster/embed/autogen_pkger.go b/pkg/cluster/embed/autogen_pkger.go index 079c83dc0a..e574df3297 100644 --- a/pkg/cluster/embed/autogen_pkger.go +++ b/pkg/cluster/embed/autogen_pkger.go @@ -20,13 +20,11 @@ func init() { autogenFiles["/templates/config/blackbox.yml"] = "bW9kdWxlczoKICAgIGh0dHBfMnh4OgogICAgICBwcm9iZXI6IGh0dHAKICAgICAgaHR0cDoKICAgICAgICBtZXRob2Q6IEdFVAogICAgaHR0cF9wb3N0XzJ4eDoKICAgICAgcHJvYmVyOiBodHRwCiAgICAgIGh0dHA6CiAgICAgICAgbWV0aG9kOiBQT1NUCiAgICB0Y3BfY29ubmVjdDoKICAgICAgcHJvYmVyOiB0Y3AKICAgIHBvcDNzX2Jhbm5lcjoKICAgICAgcHJvYmVyOiB0Y3AKICAgICAgdGNwOgogICAgICAgIHF1ZXJ5X3Jlc3BvbnNlOgogICAgICAgIC0gZXhwZWN0OiAiXitPSyIKICAgICAgICB0bHM6IHRydWUKICAgICAgICB0bHNfY29uZmlnOgogICAgICAgICAgaW5zZWN1cmVfc2tpcF92ZXJpZnk6IGZhbHNlCiAgICBzc2hfYmFubmVyOgogICAgICBwcm9iZXI6IHRjcAogICAgICB0Y3A6CiAgICAgICAgcXVlcnlfcmVzcG9uc2U6CiAgICAgICAgLSBleHBlY3Q6ICJeU1NILTIuMC0iCiAgICBpcmNfYmFubmVyOgogICAgICBwcm9iZXI6IHRjcAogICAgICB0Y3A6CiAgICAgICAgcXVlcnlfcmVzcG9uc2U6CiAgICAgICAgLSBzZW5kOiAiTklDSyBwcm9iZXIiCiAgICAgICAgLSBzZW5kOiAiVVNFUiBwcm9iZXIgcHJvYmVyIHByb2JlciA6cHJvYmVyIgogICAgICAgIC0gZXhwZWN0OiAiUElORyA6KFteIF0rKSIKICAgICAgICAgIHNlbmQ6ICJQT05HICR7MX0iCiAgICAgICAgLSBleHBlY3Q6ICJeOlteIF0rIDAwMSIKICAgIGljbXA6CiAgICAgIHByb2JlcjogaWNtcAogICAgICB0aW1lb3V0OiA1cwogICAgICBpY21wOgogICAgICAgIHByZWZlcnJlZF9pcF9wcm90b2NvbDogImlwNCI=" autogenFiles["/templates/config/dashboard.yml.tpl"] = "YXBpVmVyc2lvbjogMQpwcm92aWRlcnM6CiAgLSBuYW1lOiB7ey5DbHVzdGVyTmFtZX19CiAgICBmb2xkZXI6IHt7LkNsdXN0ZXJOYW1lfX0KICAgIHR5cGU6IGZpbGUKICAgIGRpc2FibGVEZWxldGlvbjogZmFsc2UKICAgIGVkaXRhYmxlOiB0cnVlCiAgICB1cGRhdGVJbnRlcnZhbFNlY29uZHM6IDMwCiAgICBvcHRpb25zOgogICAgICBwYXRoOiB7ey5EZXBsb3lEaXJ9fS9kYXNoYm9hcmRz" autogenFiles["/templates/config/datasource.yml.tpl"] = "YXBpVmVyc2lvbjogMQpkZWxldGVEYXRhc291cmNlczoKICAtIG5hbWU6IHt7LkNsdXN0ZXJOYW1lfX0KZGF0YXNvdXJjZXM6CiAgLSBuYW1lOiB7ey5DbHVzdGVyTmFtZX19CiAgICB0eXBlOiBwcm9tZXRoZXVzCiAgICBhY2Nlc3M6IHByb3h5CiAgICB1cmw6IGh0dHA6Ly97ey5JUH19Ont7LlBvcnR9fQogICAgd2l0aENyZWRlbnRpYWxzOiBmYWxzZQogICAgaXNEZWZhdWx0OiBmYWxzZQogICAgdGxzQXV0aDogZmFsc2UKICAgIHRsc0F1dGhXaXRoQ0FDZXJ0OiBmYWxzZQogICAgdmVyc2lvbjogMQogICAgZWRpdGFibGU6IHRydWU=" - autogenFiles["/templates/config/dm/prometheus.yml.tpl"] = "LS0tCmdsb2JhbDoKICBzY3JhcGVfaW50ZXJ2YWw6ICAgICAxNXMgIyBCeSBkZWZhdWx0LCBzY3JhcGUgdGFyZ2V0cyBldmVyeSAxNSBzZWNvbmRzLgogIGV2YWx1YXRpb25faW50ZXJ2YWw6IDE1cyAjIEJ5IGRlZmF1bHQsIHNjcmFwZSB0YXJnZXRzIGV2ZXJ5IDE1IHNlY29uZHMuCiAgIyBzY3JhcGVfdGltZW91dCBpcyBzZXQgdG8gdGhlIGdsb2JhbCBkZWZhdWx0ICgxMHMpLgogIGV4dGVybmFsX2xhYmVsczoKICAgIGNsdXN0ZXI6ICd7ey5DbHVzdGVyTmFtZX19JwogICAgbW9uaXRvcjogInByb21ldGhldXMiCgojIExvYWQgYW5kIGV2YWx1YXRlIHJ1bGVzIGluIHRoaXMgZmlsZSBldmVyeSAnZXZhbHVhdGlvbl9pbnRlcnZhbCcgc2Vjb25kcy4KcnVsZV9maWxlczoKICAtICdkbV93b3JrZXIucnVsZXMueW1sJwogIC0gJ2RtX21hc3Rlci5ydWxlcy55bWwnCgp7ey0gaWYgLkFsZXJ0bWFuYWdlckFkZHJzfX0KYWxlcnRpbmc6CiBhbGVydG1hbmFnZXJzOgogLSBzdGF0aWNfY29uZmlnczoKICAgLSB0YXJnZXRzOgp7ey0gcmFuZ2UgLkFsZXJ0bWFuYWdlckFkZHJzfX0KICAgICAtICd7ey59fScKe3stIGVuZH19Cnt7LSBlbmR9fQoKc2NyYXBlX2NvbmZpZ3M6Cnt7LSBpZiAuTWFzdGVyQWRkcnN9fQogIC0gam9iX25hbWU6ICJkbV9tYXN0ZXIiCiAgICBob25vcl9sYWJlbHM6IHRydWUgIyBkb24ndCBvdmVyd3JpdGUgam9iICYgaW5zdGFuY2UgbGFiZWxzCiAgICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgIHt7LSByYW5nZSAuTWFzdGVyQWRkcnN9fQogICAgICAgLSAne3sufX0nCiAgICB7ey0gZW5kfX0Ke3stIGVuZH19Cgp7ey0gaWYgLldvcmtlckFkZHJzfX0KICAtIGpvYl9uYW1lOiAiZG1fd29ya2VyIgogICAgaG9ub3JfbGFiZWxzOiB0cnVlICMgZG9uJ3Qgb3ZlcndyaXRlIGpvYiAmIGluc3RhbmNlIGxhYmVscwogICAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICB7ey0gcmFuZ2UgLldvcmtlckFkZHJzfX0KICAgICAgIC0gJ3t7Ln19JwogICAge3stIGVuZH19Cnt7LSBlbmR9fQo=" + autogenFiles["/templates/config/dm/prometheus.yml.tpl"] = "LS0tCmdsb2JhbDoKICBzY3JhcGVfaW50ZXJ2YWw6ICAgICAxNXMgIyBCeSBkZWZhdWx0LCBzY3JhcGUgdGFyZ2V0cyBldmVyeSAxNSBzZWNvbmRzLgogIGV2YWx1YXRpb25faW50ZXJ2YWw6IDE1cyAjIEJ5IGRlZmF1bHQsIHNjcmFwZSB0YXJnZXRzIGV2ZXJ5IDE1IHNlY29uZHMuCiAgIyBzY3JhcGVfdGltZW91dCBpcyBzZXQgdG8gdGhlIGdsb2JhbCBkZWZhdWx0ICgxMHMpLgogIGV4dGVybmFsX2xhYmVsczoKICAgIGNsdXN0ZXI6ICd7ey5DbHVzdGVyTmFtZX19JwogICAgbW9uaXRvcjogInByb21ldGhldXMiCgojIExvYWQgYW5kIGV2YWx1YXRlIHJ1bGVzIGluIHRoaXMgZmlsZSBldmVyeSAnZXZhbHVhdGlvbl9pbnRlcnZhbCcgc2Vjb25kcy4KcnVsZV9maWxlczoKICAtICdkbV93b3JrZXIucnVsZXMueW1sJwogIC0gJ2RtX21hc3Rlci5ydWxlcy55bWwnCgp7ey0gaWYgLkFsZXJ0bWFuYWdlckFkZHJzfX0KYWxlcnRpbmc6CiAgYWxlcnRtYW5hZ2VyczoKICAtIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgp7ey0gcmFuZ2UgLkFsZXJ0bWFuYWdlckFkZHJzfX0KICAgIC0gJ3t7Ln19Jwp7ey0gZW5kfX0Ke3stIGVuZH19CgpzY3JhcGVfY29uZmlnczoKe3stIGlmIC5NYXN0ZXJBZGRyc319CiAgLSBqb2JfbmFtZTogImRtX21hc3RlciIKICAgIGhvbm9yX2xhYmVsczogdHJ1ZSAjIGRvbid0IG92ZXJ3cml0ZSBqb2IgJiBpbnN0YW5jZSBsYWJlbHMKICAgIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAge3stIHJhbmdlIC5NYXN0ZXJBZGRyc319CiAgICAgICAtICd7ey59fScKICAgIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuV29ya2VyQWRkcnN9fQogIC0gam9iX25hbWU6ICJkbV93b3JrZXIiCiAgICBob25vcl9sYWJlbHM6IHRydWUgIyBkb24ndCBvdmVyd3JpdGUgam9iICYgaW5zdGFuY2UgbGFiZWxzCiAgICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgIHt7LSByYW5nZSAuV29ya2VyQWRkcnN9fQogICAgICAgLSAne3sufX0nCiAgICB7ey0gZW5kfX0Ke3stIGVuZH19Cg==" autogenFiles["/templates/config/grafana.ini.tpl"] = "##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change

# possible values : production, development
; app_mode = production

# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
; instance_name = ${HOSTNAME}

#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
#
data = {{.DeployDir}}/data
#
# Directory where grafana can store logs
#
logs = {{.DeployDir}}/logs
#
# Directory where grafana will automatically scan and look for plugins
#
plugins = {{.DeployDir}}/plugins
#
# folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = {{.DeployDir}}/provisioning

#
#################################### Server ####################################
[server]
# Protocol (http or https)
;protocol = http

# The ip address to bind to, empty will bind to all interfaces
;http_addr =

# The http port  to use
http_port = {{.Port}}

# The public facing domain name used to access grafana from a browser
domain = {{.IP}}

# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false

# The full public facing url
;root_url = %(protocol)s://%(domain)s:%(http_port)s/

# Log web requests
;router_logging = false

# the path relative working path
;static_root_path = public

# enable gzip
;enable_gzip = false

# https certs & key file
;cert_file =
;cert_key =

#################################### Database ####################################
[database]
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
;password =

# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable

# For "sqlite3" only, path relative to data_path setting
;path = grafana.db

#################################### Session ####################################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
;provider = file

# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
;provider_config = sessions

# Session cookie name
;cookie_name = grafana_sess

# If you use session in https only, default is false
;cookie_secure = false

# Session life time, default is 86400
;session_life_time = 86400

#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true

# Set to false to disable all checks to https://grafana.net
# for new vesions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.net to get latest versions
check_for_updates = true

# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =

#################################### Security ####################################
[security]
# default admin user, created on startup
;admin_user = admin

# default admin password, can be changed before first start of grafana,  or in profile settings
;admin_password = admin

# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm

# Auto-login remember days
;login_remember_days = 7
;cookie_username = grafana_user
;cookie_remember_name = grafana_remember

# disable gravatar profile images
;disable_gravatar = false

# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =

[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io

#################################### Users ####################################
[users]
# disable user signup / registration
;allow_sign_up = true

# Allow non admin users to create organizations
;allow_org_create = true

# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true

# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer

# Background text for the user field on the login page
;login_hint = email or username

# Default UI theme ("dark" or "light")
;default_theme = dark

#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
;enabled = false

# specify organization name that should be used for unauthenticated users
;org_name = Main Org.

# specify role for unauthenticated users
;org_role = Viewer

#################################### Basic Auth ##########################
[auth.basic]
;enabled = true

#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml

#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost

[emails]
;welcome_email_on_sign_up = false

#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and  file
# Use space to separate multiple modes, e.g. "console file"
mode = file

# Either "trace", "debug", "info", "warn", "error", "critical", default is "info"
;level = info

# For "console" mode only
[log.console]
;level =

# log line format, valid options are text, console and json
;format = console

# For "file" mode only
[log.file]
level = info

# log line format, valid options are text, console and json
format = text

# This enables automated log rotate(switch of following options), default is true
;log_rotate = true

# Max line number of single file, default is 1000000
;max_lines = 1000000

# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28

# Segment log daily, default is true
;daily_rotate = true

# Expired days of log file(delete after max days), default is 7
;max_days = 7

[log.syslog]
;level =

# log line format, valid options are text, console and json
;format = text

# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =

# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =

# Syslog tag. By default, the process' argv[0] is used.
;tag =


#################################### AMQP Event Publisher ##########################
[event_publisher]
;enabled = false
;rabbitmq_url = amqp://localhost/
;exchange = grafana_events

;#################################### Dashboard JSON files ##########################
[dashboards.json]
enabled = false
path = {{.DeployDir}}/dashboards

#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /api/metrics
[metrics]
# Disable / Enable internal metrics
;enabled           = true

# Publish interval
;interval_seconds  = 10

# Send internal metrics to Graphite
; [metrics.graphite]
; address = localhost:2003
; prefix = prod.grafana.%(instance_name)s.

#################################### Internal Grafana Metrics ##########################
# Url used to to import dashboards directly from Grafana.net
[grafana_net]
url = https://grafana.net" - autogenFiles["/templates/config/prometheus.yml.tpl"] = "---
global:
  scrape_interval:     15s # By default, scrape targets every 15 seconds.
  evaluation_interval: 15s # By default, scrape targets every 15 seconds.
  # scrape_timeout is set to the global default (10s).
  external_labels:
    cluster: '{{.ClusterName}}'
    monitor: "prometheus"

# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
  - 'node.rules.yml'
  - 'blacker.rules.yml'
  - 'bypass.rules.yml'
  - 'pd.rules.yml'
  - 'tidb.rules.yml'
  - 'tikv.rules.yml'
  - 'tikv.accelerate.rules.yml'
{{- if .TiFlashStatusAddrs}}
  - 'tiflash.rules.yml'
{{- end}}
{{- if .PumpAddrs}}
  - 'binlog.rules.yml'
{{- end}}
{{- if .CDCAddrs}}
  - 'ticdc.rules.yml'
{{- end}}
{{- if .KafkaAddrs}}
  - 'kafka.rules.yml'
{{- end}}
{{- if .LightningAddrs}}
  - 'lightning.rules.yml'
{{- end}}

{{- if .AlertmanagerAddrs}}
alerting:
 alertmanagers:
 - static_configs:
   - targets:
{{- range .AlertmanagerAddrs}}
     - '{{.}}'
{{- end}}
{{- end}}

scrape_configs:
{{- if .PushgatewayAddr}}
  - job_name: 'overwritten-cluster'
    scrape_interval: 15s
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
      - targets: ['{{.PushgatewayAddr}}']

  - job_name: "blackbox_exporter_http"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [http_2xx]
    static_configs:
    - targets:
      - 'http://{{.PushgatewayAddr}}/metrics'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .LightningAddrs}}
  - job_name: "lightning"
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
      - targets: ['{{index .LightningAddrs 0}}']
{{- end}}
  - job_name: "overwritten-nodes"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
{{- range .NodeExporterAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tidb"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiDBStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tikv"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiKVStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "pd"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .PDAddrs}}
      - '{{.}}'
{{- end}}
{{- if .TiFlashStatusAddrs}}
  - job_name: "tiflash"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
    {{- range .TiFlashLearnerStatusAddrs}}
       - '{{.}}'
    {{- end}}
{{- end}}
{{- if .PumpAddrs}}
{{- if .KafkaExporterAddr}}
  - job_name: 'kafka_exporter'
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
      - '{{.KafkaExporterAddr}}'
{{- end}}
  - job_name: 'pump'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .PumpAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: 'drainer'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: "port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
{{- if .KafkaAddrs}}
    - targets:
    {{- range .KafkaAddrs}}
        - '{{.}}'
    {{- end}}
      labels:
        group: 'kafka'
{{- end}}
{{- if .ZookeeperAddrs}}
    - targets:
    {{- range .ZookeeperAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'zookeeper'
{{- end}}
    - targets:
{{- range .PumpAddrs}}
      - '{{.}}'
{{- end}}
      labels:
        group: 'pump'
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'drainer'
{{- if .KafkaExporterAddr}}
    - targets:
      - '{{.KafkaExporterAddr}}'
      labels:
        group: 'kafka_exporter'
{{- end}}
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .CDCAddrs}}
  - job_name: "ticdc"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .CDCAddrs}}
      - '{{.}}'
{{- end}}
{{- end}}
  - job_name: "tidb_port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
    - targets:
    {{- range .TiDBStatusAddrs}}
      - '{{.}}' 
    {{- end}}
      labels:
        group: 'tidb'
    - targets:
    {{- range .TiKVStatusAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'tikv'
    - targets:
    {{- range .PDAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'pd'
{{- if .TiFlashStatusAddrs}}
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
      labels:
        group: 'tiflash'
{{- end}}
{{- if .PushgatewayAddr}}
    - targets:
      - '{{.PushgatewayAddr}}'
      labels:
        group: 'pushgateway'
{{- end}}
{{- if .GrafanaAddr}}
    - targets:
      - '{{.GrafanaAddr}}'
      labels:
        group: 'grafana'
{{- end}}
    - targets:
    {{- range .NodeExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'node_exporter'
    - targets:
    {{- range .BlackboxExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'blackbox_exporter'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- range $addr := .BlackboxExporterAddrs}}
  - job_name: "blackbox_exporter_{{$addr}}_icmp"
    scrape_interval: 6s
    metrics_path: /probe
    params:
      module: [icmp]
    static_configs:
    - targets:
    {{- range $.MonitoredServers}}
      - '{{.}}'
    {{- end}}
    relabel_configs:
      - source_labels: [__address__]
        regex: (.*)(:80)?
        target_label: __param_target
        replacement: ${1}
      - source_labels: [__param_target]
        regex: (.*)
        target_label: ping
        replacement: ${1}
      - source_labels: []
        regex: .*
        target_label: __address__
        replacement: {{$addr}}
{{- end}}" + autogenFiles["/templates/config/prometheus.yml.tpl"] = "---
global:
  scrape_interval:     15s # By default, scrape targets every 15 seconds.
  evaluation_interval: 15s # By default, scrape targets every 15 seconds.
  # scrape_timeout is set to the global default (10s).
  external_labels:
    cluster: '{{.ClusterName}}'
    monitor: "prometheus"

# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
{{- if .MonitoredServers}}
  - 'node.rules.yml'
  - 'blacker.rules.yml'
  - 'bypass.rules.yml'
{{- end}}
{{- if .PDAddrs}}
  - 'pd.rules.yml'
{{- end}}
{{- if .TiDBStatusAddrs}}
  - 'tidb.rules.yml'
{{- end}}
{{- if .TiKVStatusAddrs}}
  - 'tikv.rules.yml'
  - 'tikv.accelerate.rules.yml'
{{- end}}
{{- if .TiFlashStatusAddrs}}
  - 'tiflash.rules.yml'
{{- end}}
{{- if .PumpAddrs}}
  - 'binlog.rules.yml'
{{- end}}
{{- if .CDCAddrs}}
  - 'ticdc.rules.yml'
{{- end}}
{{- if .KafkaAddrs}}
  - 'kafka.rules.yml'
{{- end}}
{{- if .LightningAddrs}}
  - 'lightning.rules.yml'
{{- end}}
{{- if .DMWorkerAddrs}}
  - 'dm_worker.rules.yml'
{{- end}}
{{- if .DMMasterAddrs}}
  - 'dm_master.rules.yml'
{{- end}}

{{- if .AlertmanagerAddrs}}
alerting:
 alertmanagers:
 - static_configs:
   - targets:
{{- range .AlertmanagerAddrs}}
     - '{{.}}'
{{- end}}
{{- end}}

scrape_configs:
{{- if .PushgatewayAddr}}
  - job_name: 'overwritten-cluster'
    scrape_interval: 15s
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
      - targets: ['{{.PushgatewayAddr}}']

  - job_name: "blackbox_exporter_http"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [http_2xx]
    static_configs:
    - targets:
      - 'http://{{.PushgatewayAddr}}/metrics'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .LightningAddrs}}
  - job_name: "lightning"
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
      - targets: ['{{index .LightningAddrs 0}}']
{{- end}}
  - job_name: "overwritten-nodes"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
{{- range .NodeExporterAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tidb"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiDBStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tikv"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiKVStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "pd"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .PDAddrs}}
      - '{{.}}'
{{- end}}
{{- if .TiFlashStatusAddrs}}
  - job_name: "tiflash"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
    {{- range .TiFlashLearnerStatusAddrs}}
       - '{{.}}'
    {{- end}}
{{- end}}
{{- if .PumpAddrs}}
{{- if .KafkaExporterAddr}}
  - job_name: 'kafka_exporter'
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
      - '{{.KafkaExporterAddr}}'
{{- end}}
  - job_name: 'pump'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .PumpAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: 'drainer'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: "port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
{{- if .KafkaAddrs}}
    - targets:
    {{- range .KafkaAddrs}}
        - '{{.}}'
    {{- end}}
      labels:
        group: 'kafka'
{{- end}}
{{- if .ZookeeperAddrs}}
    - targets:
    {{- range .ZookeeperAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'zookeeper'
{{- end}}
    - targets:
{{- range .PumpAddrs}}
      - '{{.}}'
{{- end}}
      labels:
        group: 'pump'
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'drainer'
{{- if .KafkaExporterAddr}}
    - targets:
      - '{{.KafkaExporterAddr}}'
      labels:
        group: 'kafka_exporter'
{{- end}}
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .CDCAddrs}}
  - job_name: "ticdc"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .CDCAddrs}}
      - '{{.}}'
{{- end}}
{{- end}}
  - job_name: "tidb_port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
    - targets:
    {{- range .TiDBStatusAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'tidb'
    - targets:
    {{- range .TiKVStatusAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'tikv'
    - targets:
    {{- range .PDAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'pd'
{{- if .TiFlashStatusAddrs}}
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
      labels:
        group: 'tiflash'
{{- end}}
{{- if .PushgatewayAddr}}
    - targets:
      - '{{.PushgatewayAddr}}'
      labels:
        group: 'pushgateway'
{{- end}}
{{- if .GrafanaAddr}}
    - targets:
      - '{{.GrafanaAddr}}'
      labels:
        group: 'grafana'
{{- end}}
    - targets:
    {{- range .NodeExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'node_exporter'
    - targets:
    {{- range .BlackboxExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'blackbox_exporter'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- range $addr := .BlackboxExporterAddrs}}
  - job_name: "blackbox_exporter_{{$addr}}_icmp"
    scrape_interval: 6s
    metrics_path: /probe
    params:
      module: [icmp]
    static_configs:
    - targets:
    {{- range $.MonitoredServers}}
      - '{{.}}'
    {{- end}}
    relabel_configs:
      - source_labels: [__address__]
        regex: (.*)(:80)?
        target_label: __param_target
        replacement: ${1}
      - source_labels: [__param_target]
        regex: (.*)
        target_label: ping
        replacement: ${1}
      - source_labels: []
        regex: .*
        target_label: __address__
        replacement: {{$addr}}
{{- end}}

{{- if .DMMasterAddrs}}
  - job_name: "dm_master"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
    {{- range .DMMasterAddrs}}
      - '{{.}}'
    {{- end}}
{{- end}}

{{- if .DMWorkerAddrs}}
  - job_name: "dm_worker"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
    {{- range .DMWorkerAddrs}}
      - '{{.}}'
    {{- end}}
{{- end}}
" autogenFiles["/templates/config/spark-defaults.conf.tpl"] = "IwojIExpY2Vuc2VkIHRvIHRoZSBBcGFjaGUgU29mdHdhcmUgRm91bmRhdGlvbiAoQVNGKSB1bmRlciBvbmUgb3IgbW9yZQojIGNvbnRyaWJ1dG9yIGxpY2Vuc2UgYWdyZWVtZW50cy4gIFNlZSB0aGUgTk9USUNFIGZpbGUgZGlzdHJpYnV0ZWQgd2l0aAojIHRoaXMgd29yayBmb3IgYWRkaXRpb25hbCBpbmZvcm1hdGlvbiByZWdhcmRpbmcgY29weXJpZ2h0IG93bmVyc2hpcC4KIyBUaGUgQVNGIGxpY2Vuc2VzIHRoaXMgZmlsZSB0byBZb3UgdW5kZXIgdGhlIEFwYWNoZSBMaWNlbnNlLCBWZXJzaW9uIDIuMAojICh0aGUgIkxpY2Vuc2UiKTsgeW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoCiMgdGhlIExpY2Vuc2UuICBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgIGh0dHA6Ly93d3cuYXBhY2hlLm9yZy9saWNlbnNlcy9MSUNFTlNFLTIuMAojCiMgVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQojIGRpc3RyaWJ1dGVkIHVuZGVyIHRoZSBMaWNlbnNlIGlzIGRpc3RyaWJ1dGVkIG9uIGFuICJBUyBJUyIgQkFTSVMsCiMgV0lUSE9VVCBXQVJSQU5USUVTIE9SIENPTkRJVElPTlMgT0YgQU5ZIEtJTkQsIGVpdGhlciBleHByZXNzIG9yIGltcGxpZWQuCiMgU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAojIGxpbWl0YXRpb25zIHVuZGVyIHRoZSBMaWNlbnNlLgojCgojIERlZmF1bHQgc3lzdGVtIHByb3BlcnRpZXMgaW5jbHVkZWQgd2hlbiBydW5uaW5nIHNwYXJrLXN1Ym1pdC4KIyBUaGlzIGlzIHVzZWZ1bCBmb3Igc2V0dGluZyBkZWZhdWx0IGVudmlyb25tZW50YWwgc2V0dGluZ3MuCgojIEV4YW1wbGU6CiNzcGFyay5ldmVudExvZy5kaXI6ICJoZGZzOi8vbmFtZW5vZGU6ODAyMS9kaXJlY3RvcnkiCiMgc3BhcmsuZXhlY3V0b3IuZXh0cmFKYXZhT3B0aW9ucyAgLVhYOitQcmludEdDRGV0YWlscyAtRGtleT12YWx1ZSAtRG51bWJlcnM9Im9uZSB0d28gdGhyZWUiCgp7ey0gZGVmaW5lICJQRExpc3QifX0KICB7ey0gcmFuZ2UgJGlkeCwgJHBkIDo9IC59fQogICAge3stIGlmIGVxICRpZHggMH19CiAgICAgIHt7LSAkcGR9fQogICAge3stIGVsc2UgLX19CiAgICAgICx7eyRwZH19CiAgICB7ey0gZW5kfX0KICB7ey0gZW5kfX0Ke3stIGVuZH19Cgp7eyByYW5nZSAkaywgJHYgOj0gLkN1c3RvbUZpZWxkc319Cnt7ICRrIH19ICAge3sgJHYgfX0Ke3stIGVuZCB9fQpzcGFyay5zcWwuZXh0ZW5zaW9ucyAgIG9yZy5hcGFjaGUuc3Bhcmsuc3FsLlRpRXh0ZW5zaW9ucwoKe3stIGlmIC5UaVNwYXJrTWFzdGVyc319CnNwYXJrLm1hc3RlciAgIHNwYXJrOi8ve3suVGlTcGFya01hc3RlcnN9fQp7ey0gZW5kfX0KCnNwYXJrLnRpc3BhcmsucGQuYWRkcmVzc2VzIHt7dGVtcGxhdGUgIlBETGlzdCIgLkVuZHBvaW50c319Cg==" autogenFiles["/templates/config/spark-log4j.properties.tpl"] = "IwojIExpY2Vuc2VkIHRvIHRoZSBBcGFjaGUgU29mdHdhcmUgRm91bmRhdGlvbiAoQVNGKSB1bmRlciBvbmUgb3IgbW9yZQojIGNvbnRyaWJ1dG9yIGxpY2Vuc2UgYWdyZWVtZW50cy4gIFNlZSB0aGUgTk9USUNFIGZpbGUgZGlzdHJpYnV0ZWQgd2l0aAojIHRoaXMgd29yayBmb3IgYWRkaXRpb25hbCBpbmZvcm1hdGlvbiByZWdhcmRpbmcgY29weXJpZ2h0IG93bmVyc2hpcC4KIyBUaGUgQVNGIGxpY2Vuc2VzIHRoaXMgZmlsZSB0byBZb3UgdW5kZXIgdGhlIEFwYWNoZSBMaWNlbnNlLCBWZXJzaW9uIDIuMAojICh0aGUgIkxpY2Vuc2UiKTsgeW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoCiMgdGhlIExpY2Vuc2UuICBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgIGh0dHA6Ly93d3cuYXBhY2hlLm9yZy9saWNlbnNlcy9MSUNFTlNFLTIuMAojCiMgVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQojIGRpc3RyaWJ1dGVkIHVuZGVyIHRoZSBMaWNlbnNlIGlzIGRpc3RyaWJ1dGVkIG9uIGFuICJBUyBJUyIgQkFTSVMsCiMgV0lUSE9VVCBXQVJSQU5USUVTIE9SIENPTkRJVElPTlMgT0YgQU5ZIEtJTkQsIGVpdGhlciBleHByZXNzIG9yIGltcGxpZWQuCiMgU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAojIGxpbWl0YXRpb25zIHVuZGVyIHRoZSBMaWNlbnNlLgojCgojIFNldCBldmVyeXRoaW5nIHRvIGJlIGxvZ2dlZCB0byB0aGUgY29uc29sZQpsb2c0ai5yb290Q2F0ZWdvcnk9SU5GTywgY29uc29sZQpsb2c0ai5hcHBlbmRlci5jb25zb2xlPW9yZy5hcGFjaGUubG9nNGouQ29uc29sZUFwcGVuZGVyCmxvZzRqLmFwcGVuZGVyLmNvbnNvbGUudGFyZ2V0PVN5c3RlbS5lcnIKbG9nNGouYXBwZW5kZXIuY29uc29sZS5sYXlvdXQ9b3JnLmFwYWNoZS5sb2c0ai5QYXR0ZXJuTGF5b3V0CmxvZzRqLmFwcGVuZGVyLmNvbnNvbGUubGF5b3V0LkNvbnZlcnNpb25QYXR0ZXJuPSVke3l5L01NL2RkIEhIOm1tOnNzfSAlcCAlY3sxfTogJW0lbgoKIyBTZXQgdGhlIGRlZmF1bHQgc3Bhcmstc2hlbGwgbG9nIGxldmVsIHRvIFdBUk4uIFdoZW4gcnVubmluZyB0aGUgc3Bhcmstc2hlbGwsIHRoZQojIGxvZyBsZXZlbCBmb3IgdGhpcyBjbGFzcyBpcyB1c2VkIHRvIG92ZXJ3cml0ZSB0aGUgcm9vdCBsb2dnZXIncyBsb2cgbGV2ZWwsIHNvIHRoYXQKIyB0aGUgdXNlciBjYW4gaGF2ZSBkaWZmZXJlbnQgZGVmYXVsdHMgZm9yIHRoZSBzaGVsbCBhbmQgcmVndWxhciBTcGFyayBhcHBzLgpsb2c0ai5sb2dnZXIub3JnLmFwYWNoZS5zcGFyay5yZXBsLk1haW49V0FSTgoKIyBTZXR0aW5ncyB0byBxdWlldCB0aGlyZCBwYXJ0eSBsb2dzIHRoYXQgYXJlIHRvbyB2ZXJib3NlCmxvZzRqLmxvZ2dlci5vcmcuc3BhcmtfcHJvamVjdC5qZXR0eT1XQVJOCmxvZzRqLmxvZ2dlci5vcmcuc3BhcmtfcHJvamVjdC5qZXR0eS51dGlsLmNvbXBvbmVudC5BYnN0cmFjdExpZmVDeWNsZT1FUlJPUgpsb2c0ai5sb2dnZXIub3JnLmFwYWNoZS5zcGFyay5yZXBsLlNwYXJrSU1haW4kZXhwclR5cGVyPUlORk8KbG9nNGoubG9nZ2VyLm9yZy5hcGFjaGUuc3BhcmsucmVwbC5TcGFya0lMb29wJFNwYXJrSUxvb3BJbnRlcnByZXRlcj1JTkZPCmxvZzRqLmxvZ2dlci5vcmcuYXBhY2hlLnBhcnF1ZXQ9RVJST1IKbG9nNGoubG9nZ2VyLnBhcnF1ZXQ9RVJST1IKCiMgU1BBUkstOTE4MzogU2V0dGluZ3MgdG8gYXZvaWQgYW5ub3lpbmcgbWVzc2FnZXMgd2hlbiBsb29raW5nIHVwIG5vbmV4aXN0ZW50IFVERnMgaW4gU3BhcmtTUUwgd2l0aCBIaXZlIHN1cHBvcnQKbG9nNGoubG9nZ2VyLm9yZy5hcGFjaGUuaGFkb29wLmhpdmUubWV0YXN0b3JlLlJldHJ5aW5nSE1TSGFuZGxlcj1GQVRBTApsb2c0ai5sb2dnZXIub3JnLmFwYWNoZS5oYWRvb3AuaGl2ZS5xbC5leGVjLkZ1bmN0aW9uUmVnaXN0cnk9RVJST1IKCiMgdGlzcGFyayBkaXNhYmxlICJXQVJOIE9iamVjdFN0b3JlOjU2OCAtIEZhaWxlZCB0byBnZXQgZGF0YWJhc2UiCmxvZzRqLmxvZ2dlci5vcmcuYXBhY2hlLmhhZG9vcC5oaXZlLm1ldGFzdG9yZS5PYmplY3RTdG9yZT1FUlJPUgo=" - autogenFiles["/templates/scripts/dm/run_grafana.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKTEFORz1lbl9VUy5VVEYtOCBcCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9iaW4vZ3JhZmFuYS1zZXJ2ZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2Jpbi9ncmFmYW5hLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1ob21lcGF0aD0ie3suRGVwbG95RGlyfX0vYmluIiBcCiAgICAtLWNvbmZpZz0ie3suRGVwbG95RGlyfX0vY29uZi9ncmFmYW5hLmluaSIK" - autogenFiles["/templates/scripts/dm/run_prometheus.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgpERVBMT1lfRElSPXt7LkRlcGxveURpcn19CmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCiMgV0FSTklORzogVGhpcyBmaWxlIHdhcyBhdXRvLWdlbmVyYXRlZC4gRG8gbm90IGVkaXQhCiMgICAgICAgICAgQWxsIHlvdXIgZWRpdCBtaWdodCBiZSBvdmVyd3JpdHRlbiEKCgpleGVjID4gPih0ZWUgLWkgLWEgInt7LkxvZ0Rpcn19L3Byb21ldGhldXMubG9nIikKZXhlYyAyPiYxCgp7ey0gaWYgLk51bWFOb2RlfX0KZXhlYyBudW1hY3RsIC0tY3B1bm9kZWJpbmQ9e3suTnVtYU5vZGV9fSAtLW1lbWJpbmQ9e3suTnVtYU5vZGV9fSBiaW4vcHJvbWV0aGV1cy9wcm9tZXRoZXVzIFwKe3stIGVsc2V9fQpleGVjIGJpbi9wcm9tZXRoZXVzL3Byb21ldGhldXMgXAp7ey0gZW5kfX0KICAgIC0tY29uZmlnLmZpbGU9Int7LkRlcGxveURpcn19L2NvbmYvcHJvbWV0aGV1cy55bWwiIFwKICAgIC0td2ViLmxpc3Rlbi1hZGRyZXNzPSI6e3suUG9ydH19IiBcCiAgICAtLXdlYi5leHRlcm5hbC11cmw9Imh0dHA6Ly97ey5JUH19Ont7LlBvcnR9fS8iIFwKICAgIC0td2ViLmVuYWJsZS1hZG1pbi1hcGkgXAogICAgLS1sb2cubGV2ZWw9ImluZm8iIFwKICAgIC0tc3RvcmFnZS50c2RiLnBhdGg9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1zdG9yYWdlLnRzZGIucmV0ZW50aW9uPSIzMGQiCg==" autogenFiles["/templates/scripts/run_alertmanager.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgpERVBMT1lfRElSPXt7LkRlcGxveURpcn19CmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCiMgV0FSTklORzogVGhpcyBmaWxlIHdhcyBhdXRvLWdlbmVyYXRlZC4gRG8gbm90IGVkaXQhCiMgICAgICAgICAgQWxsIHlvdXIgZWRpdCBtaWdodCBiZSBvdmVyd3JpdHRlbiEKCmV4ZWMgPiA+KHRlZSAtaSAtYSAie3suTG9nRGlyfX0vYWxlcnRtYW5hZ2VyLmxvZyIpCmV4ZWMgMj4mMQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2FsZXJ0bWFuYWdlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vYWxlcnRtYW5hZ2VyL2FsZXJ0bWFuYWdlciBcCnt7LSBlbmR9fQogICAgLS1jb25maWcuZmlsZT0iY29uZi9hbGVydG1hbmFnZXIueW1sIiBcCiAgICAtLXN0b3JhZ2UucGF0aD0ie3suRGF0YURpcn19IiBcCiAgICAtLWRhdGEucmV0ZW50aW9uPTEyMGggXAogICAgLS1sb2cubGV2ZWw9ImluZm8iIFwKICAgIC0td2ViLmxpc3Rlbi1hZGRyZXNzPSJ7ey5JUH19Ont7LldlYlBvcnR9fSIgXAp7ey0gaWYgLkVuZFBvaW50c319Cnt7LSByYW5nZSAkaWR4LCAkYW0gOj0gLkVuZFBvaW50c319CiAgICAtLWNsdXN0ZXIucGVlcj0ie3skYW0uSVB9fTp7eyRhbS5DbHVzdGVyUG9ydH19IiBcCnt7LSBlbmR9fQp7ey0gZW5kfX0KICAgIC0tY2x1c3Rlci5saXN0ZW4tYWRkcmVzcz0ie3suSVB9fTp7ey5DbHVzdGVyUG9ydH19Igo=" autogenFiles["/templates/scripts/run_blackbox_exporter.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKZXhlYyA+ID4odGVlIC1pIC1hICJ7ey5Mb2dEaXJ9fS9ibGFja2JveF9leHBvcnRlci5sb2ciKQpleGVjIDI+JjEKCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9ibGFja2JveF9leHBvcnRlci9ibGFja2JveF9leHBvcnRlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vYmxhY2tib3hfZXhwb3J0ZXIvYmxhY2tib3hfZXhwb3J0ZXIgXAp7ey0gZW5kfX0KICAgIC0td2ViLmxpc3Rlbi1hZGRyZXNzPSI6e3suUG9ydH19IiBcCiAgICAtLWxvZy5sZXZlbD0iaW5mbyIgXAogICAgLS1jb25maWcuZmlsZT0iY29uZi9ibGFja2JveC55bWwiCg==" autogenFiles["/templates/scripts/run_cdc.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKe3stIGRlZmluZSAiUERMaXN0In19CiAge3stIHJhbmdlICRpZHgsICRwZCA6PSAufX0KICAgIHt7LSBpZiBlcSAkaWR4IDB9fQogICAgICB7ey0gJHBkLlNjaGVtZX19Oi8ve3skcGQuSVB9fTp7eyRwZC5DbGllbnRQb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZW5kfX0KICB7ey0gZW5kfX0Ke3stIGVuZH19Cgp7ey0gaWYgLk51bWFOb2RlfX0KZXhlYyBudW1hY3RsIC0tY3B1bm9kZWJpbmQ9e3suTnVtYU5vZGV9fSAtLW1lbWJpbmQ9e3suTnVtYU5vZGV9fSBiaW4vY2RjIHNlcnZlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vY2RjIHNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1hZGRyICIwLjAuMC4wOnt7LlBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtYWRkciAie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQgInt7dGVtcGxhdGUgIlBETGlzdCIgLkVuZHBvaW50c319IiBcCnt7LSBpZiAuVExTRW5hYmxlZH19CiAgICAtLWNhIHRscy9jYS5jcnQgXAogICAgLS1jZXJ0IHRscy9jZGMuY3J0IFwKICAgIC0ta2V5IHRscy9jZGMucGVtIFwKe3stIGVuZH19Cnt7LSBpZiAuR0NUVEx9fQogICAgLS1nYy10dGwge3suR0NUVEx9fSBcCnt7LSBlbmR9fQp7ey0gaWYgLlRafX0KICAgIC0tdHogInt7LlRafX0iIFwKe3stIGVuZH19CiAgICAtLWxvZy1maWxlICJ7ey5Mb2dEaXJ9fS9jZGMubG9nIiAyPj4gInt7LkxvZ0Rpcn19L2NkY19zdGRlcnIubG9nIgo=" diff --git a/pkg/cluster/manager.go b/pkg/cluster/manager.go index 2e0bb4be49..9e950b579a 100644 --- a/pkg/cluster/manager.go +++ b/pkg/cluster/manager.go @@ -716,7 +716,7 @@ func (m *Manager) Reload(clusterName string, opt operator.Options, skipRestart b tb := task.NewBuilder().UserSSH(inst.GetHost(), inst.GetSSHPort(), base.User, opt.SSHTimeout, opt.SSHType, topo.BaseTopo().GlobalOptions.SSHType) if inst.IsImported() { switch compName := inst.ComponentName(); compName { - case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertManager: + case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: version := m.bindVersion(compName, base.Version) tb.Download(compName, inst.OS(), inst.Arch(), version). CopyComponent(compName, inst.OS(), inst.Arch(), version, "", inst.GetHost(), deployDir) @@ -842,7 +842,7 @@ func (m *Manager) Upgrade(clusterName string, clusterVersion string, opt operato tb := task.NewBuilder() if inst.IsImported() { switch inst.ComponentName() { - case spec.ComponentPrometheus, spec.ComponentGrafana, spec.ComponentAlertManager: + case spec.ComponentPrometheus, spec.ComponentGrafana, spec.ComponentAlertmanager: tb.CopyComponent( inst.ComponentName(), inst.OS(), @@ -1394,7 +1394,7 @@ func (m *Manager) ScaleIn( tb := task.NewBuilder() if instance.IsImported() { switch compName := instance.ComponentName(); compName { - case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertManager: + case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: version := m.bindVersion(compName, base.Version) tb.Download(compName, instance.OS(), instance.Arch(), version). CopyComponent( @@ -2078,7 +2078,7 @@ func buildScaleOutTask( tb := task.NewBuilder() if inst.IsImported() { switch compName := inst.ComponentName(); compName { - case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertManager: + case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: version := m.bindVersion(compName, base.Version) tb.Download(compName, inst.OS(), inst.Arch(), version). CopyComponent(compName, inst.OS(), inst.Arch(), version, "", inst.GetHost(), deployDir) diff --git a/pkg/cluster/operation/destroy.go b/pkg/cluster/operation/destroy.go index 8cf99b89ac..0387b86df3 100644 --- a/pkg/cluster/operation/destroy.go +++ b/pkg/cluster/operation/destroy.go @@ -490,7 +490,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.TiKVComponent{Specification: cluster}).Instances() + instances := (&spec.TiKVComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } @@ -520,7 +520,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.TiFlashComponent{Specification: cluster}).Instances() + instances := (&spec.TiFlashComponent{Topology: cluster}).Instances() id = s.Host + ":" + strconv.Itoa(s.GetMainPort()) if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err @@ -550,7 +550,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.PumpComponent{Specification: cluster}).Instances() + instances := (&spec.PumpComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } @@ -579,7 +579,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.DrainerComponent{Specification: cluster}).Instances() + instances := (&spec.DrainerComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } diff --git a/pkg/cluster/operation/scale_in.go b/pkg/cluster/operation/scale_in.go index a2003efd76..b084bb4f4c 100644 --- a/pkg/cluster/operation/scale_in.go +++ b/pkg/cluster/operation/scale_in.go @@ -118,7 +118,7 @@ func ScaleInCluster( } var pdEndpoint []string - for _, instance := range (&spec.PDComponent{Specification: cluster}).Instances() { + for _, instance := range (&spec.PDComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { pdEndpoint = append(pdEndpoint, Addr(instance)) } @@ -176,7 +176,7 @@ func ScaleInCluster( // TODO if binlog is switch on, cannot delete all pump servers. var tiflashInstances []spec.Instance - for _, instance := range (&spec.TiFlashComponent{Specification: cluster}).Instances() { + for _, instance := range (&spec.TiFlashComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { tiflashInstances = append(tiflashInstances, instance) } @@ -184,7 +184,7 @@ func ScaleInCluster( if len(tiflashInstances) > 0 { var tikvInstances []spec.Instance - for _, instance := range (&spec.TiKVComponent{Specification: cluster}).Instances() { + for _, instance := range (&spec.TiKVComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { tikvInstances = append(tikvInstances, instance) } diff --git a/pkg/cluster/spec/alertmanager.go b/pkg/cluster/spec/alertmanager.go index 0793ad1387..2005e72ace 100644 --- a/pkg/cluster/spec/alertmanager.go +++ b/pkg/cluster/spec/alertmanager.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/tiup/pkg/meta" ) -// AlertManagerSpec represents the AlertManager topology specification in topology.yaml -type AlertManagerSpec struct { +// AlertmanagerSpec represents the AlertManager topology specification in topology.yaml +type AlertmanagerSpec struct { Host string `yaml:"host"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` @@ -42,31 +42,31 @@ type AlertManagerSpec struct { } // Role returns the component role of the instance -func (s AlertManagerSpec) Role() string { - return ComponentAlertManager +func (s AlertmanagerSpec) Role() string { + return ComponentAlertmanager } // SSH returns the host and SSH port of the instance -func (s AlertManagerSpec) SSH() (string, int) { +func (s AlertmanagerSpec) SSH() (string, int) { return s.Host, s.SSHPort } // GetMainPort returns the main port of the instance -func (s AlertManagerSpec) GetMainPort() int { +func (s AlertmanagerSpec) GetMainPort() int { return s.WebPort } // IsImported returns if the node is imported from TiDB-Ansible -func (s AlertManagerSpec) IsImported() bool { +func (s AlertmanagerSpec) IsImported() bool { return s.Imported } // AlertManagerComponent represents Alertmanager component. -type AlertManagerComponent struct{ *Specification } +type AlertManagerComponent struct{ Topology } // Name implements Component interface. func (c *AlertManagerComponent) Name() string { - return ComponentAlertManager + return ComponentAlertmanager } // Role implements Component interface. @@ -76,8 +76,11 @@ func (c *AlertManagerComponent) Role() string { // Instances implements Component interface. func (c *AlertManagerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Alertmanager)) - for _, s := range c.Alertmanager { + alertmanagers := c.Topology.BaseTopo().Alertmanagers + + ins := make([]Instance, 0, len(alertmanagers)) + + for _, s := range alertmanagers { ins = append(ins, &AlertManagerInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -98,7 +101,7 @@ func (c *AlertManagerComponent) Instances() []Instance { return "-" }, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -107,7 +110,7 @@ func (c *AlertManagerComponent) Instances() []Instance { // AlertManagerInstance represent the alert manager instance type AlertManagerInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -118,16 +121,19 @@ func (i *AlertManagerInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + gOpts := *i.topo.BaseTopo().GlobalOptions + if err := i.BaseInstance.InitConfig(e, gOpts, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + alertmanagers := i.topo.BaseTopo().Alertmanagers + + enableTLS := gOpts.TLSEnabled // Transfer start script - spec := i.InstanceSpec.(AlertManagerSpec) + spec := i.InstanceSpec.(AlertmanagerSpec) cfg := scripts.NewAlertManagerScript(spec.Host, paths.Deploy, paths.Data[0], paths.Log, enableTLS). WithWebPort(spec.WebPort).WithClusterPort(spec.ClusterPort).WithNumaNode(spec.NumaNode). - AppendEndpoints(AlertManagerEndpoints(i.topo.Alertmanager, deployUser, enableTLS)) + AppendEndpoints(AlertManagerEndpoints(alertmanagers, deployUser, enableTLS)) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_alertmanager_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { diff --git a/pkg/cluster/spec/bindversion.go b/pkg/cluster/spec/bindversion.go index 4a7692bb75..e91203cf5c 100644 --- a/pkg/cluster/spec/bindversion.go +++ b/pkg/cluster/spec/bindversion.go @@ -21,7 +21,7 @@ import ( // TiDBComponentVersion maps the TiDB version to the third components binding version func TiDBComponentVersion(comp, version string) string { switch comp { - case ComponentAlertManager: + case ComponentAlertmanager: return "v0.17.0" case ComponentBlackboxExporter: return "v0.12.0" diff --git a/pkg/cluster/spec/cdc.go b/pkg/cluster/spec/cdc.go index 262ffdd4fe..9beac6c93b 100644 --- a/pkg/cluster/spec/cdc.go +++ b/pkg/cluster/spec/cdc.go @@ -62,7 +62,7 @@ func (s CDCSpec) IsImported() bool { } // CDCComponent represents CDC component. -type CDCComponent struct{ *Specification } +type CDCComponent struct{ Topology *Specification } // Name implements Component interface. func (c *CDCComponent) Name() string { @@ -76,8 +76,8 @@ func (c *CDCComponent) Role() string { // Instances implements Component interface. func (c *CDCComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.CDCServers)) - for _, s := range c.CDCServers { + ins := make([]Instance, 0, len(c.Topology.CDCServers)) + for _, s := range c.Topology.CDCServers { s := s ins = append(ins, &CDCInstance{BaseInstance{ InstanceSpec: s, @@ -100,7 +100,7 @@ func (c *CDCComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.Port) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -108,7 +108,7 @@ func (c *CDCComponent) Instances() []Instance { // CDCInstance represent the CDC instance. type CDCInstance struct { BaseInstance - topo *Specification + topo Topology } // ScaleConfig deploy temporary config on scaling @@ -137,11 +137,12 @@ func (i *CDCInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(CDCSpec) cfg := scripts.NewCDCScript( i.GetHost(), @@ -150,7 +151,7 @@ func (i *CDCInstance) InitConfig( enableTLS, spec.GCTTL, spec.TZ, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(i.topo.Endpoints(deployUser)...) + ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(topo.Endpoints(deployUser)...) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_cdc_%s_%d.sh", i.GetHost(), i.GetPort())) @@ -168,5 +169,5 @@ func (i *CDCInstance) InitConfig( specConfig := spec.Config - return i.MergeServerConfig(e, i.topo.ServerConfigs.CDC, specConfig, paths) + return i.MergeServerConfig(e, topo.ServerConfigs.CDC, specConfig, paths) } diff --git a/pkg/cluster/spec/drainer.go b/pkg/cluster/spec/drainer.go index ff0b948713..dc22666ab9 100644 --- a/pkg/cluster/spec/drainer.go +++ b/pkg/cluster/spec/drainer.go @@ -64,7 +64,7 @@ func (s DrainerSpec) IsImported() bool { } // DrainerComponent represents Drainer component. -type DrainerComponent struct{ *Specification } +type DrainerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DrainerComponent) Name() string { @@ -78,8 +78,8 @@ func (c *DrainerComponent) Role() string { // Instances implements Component interface. func (c *DrainerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Drainers)) - for _, s := range c.Drainers { + ins := make([]Instance, 0, len(c.Topology.Drainers)) + for _, s := range c.Topology.Drainers { s := s ins = append(ins, &DrainerInstance{BaseInstance{ InstanceSpec: s, @@ -103,7 +103,7 @@ func (c *DrainerComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.Port) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -111,7 +111,7 @@ func (c *DrainerComponent) Instances() []Instance { // DrainerInstance represent the Drainer instance. type DrainerInstance struct { BaseInstance - topo *Specification + topo Topology } // ScaleConfig deploy temporary config on scaling @@ -140,11 +140,12 @@ func (i *DrainerInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(DrainerSpec) cfg := scripts.NewDrainerScript( i.GetHost()+":"+strconv.Itoa(i.GetPort()), @@ -152,7 +153,7 @@ func (i *DrainerInstance) InitConfig( paths.Deploy, paths.Data[0], paths.Log, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(i.topo.Endpoints(deployUser)...) + ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(topo.Endpoints(deployUser)...) cfg.WithCommitTs(spec.CommitTS) @@ -170,7 +171,7 @@ func (i *DrainerInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.Drainer + globalConfig := topo.ServerConfigs.Drainer // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/grafana.go b/pkg/cluster/spec/grafana.go index eff06eb9a8..79dd98d69f 100644 --- a/pkg/cluster/spec/grafana.go +++ b/pkg/cluster/spec/grafana.go @@ -18,6 +18,7 @@ import ( "fmt" "path" "path/filepath" + "reflect" "strings" "github.com/pingcap/errors" @@ -61,7 +62,7 @@ func (s GrafanaSpec) IsImported() bool { } // GrafanaComponent represents Grafana component. -type GrafanaComponent struct{ *Specification } +type GrafanaComponent struct{ Topology } // Name implements Component interface. func (c *GrafanaComponent) Name() string { @@ -75,8 +76,10 @@ func (c *GrafanaComponent) Role() string { // Instances implements Component interface. func (c *GrafanaComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Grafana)) - for _, s := range c.Grafana { + servers := c.BaseTopo().Grafanas + ins := make([]Instance, 0, len(servers)) + + for _, s := range servers { ins = append(ins, &GrafanaInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -95,7 +98,7 @@ func (c *GrafanaComponent) Instances() []Instance { return "-" }, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -104,7 +107,7 @@ func (c *GrafanaComponent) Instances() []Instance { // GrafanaInstance represent the grafana instance type GrafanaInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -115,7 +118,8 @@ func (i *GrafanaInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + gOpts := *i.topo.BaseTopo().GlobalOptions + if err := i.BaseInstance.InitConfig(e, gOpts, deployUser, paths); err != nil { return err } @@ -160,13 +164,22 @@ func (i *GrafanaInstance) InitConfig( return err } + topo := reflect.ValueOf(i.topo) + if topo.Kind() == reflect.Ptr { + topo = topo.Elem() + } + val := topo.FieldByName("Monitors") + if (val == reflect.Value{}) { + return errors.Errorf("field Monitors not found in topology: %v", topo) + } + monitors := val.Interface().([]PrometheusSpec) // transfer datasource.yml - if len(i.topo.Monitors) == 0 { + if len(monitors) == 0 { return errors.New("no prometheus found in topology") } fp = filepath.Join(paths.Cache, fmt.Sprintf("datasource_%s.yml", i.GetHost())) - if err := config.NewDatasourceConfig(clusterName, i.topo.Monitors[0].Host). - WithPort(uint64(i.topo.Monitors[0].Port)). + if err := config.NewDatasourceConfig(clusterName, monitors[0].Host). + WithPort(uint64(monitors[0].Port)). ConfigToFile(fp); err != nil { return err } @@ -221,7 +234,6 @@ func (i *GrafanaInstance) ScaleConfig( ) error { s := i.topo defer func() { i.topo = s }() - cluster := mustBeClusterTopo(topo) - i.topo = cluster.Merge(i.topo) + i.topo = topo.Merge(i.topo) return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) } diff --git a/pkg/cluster/spec/grafana_test.go b/pkg/cluster/spec/grafana_test.go index 1dff7ac73b..1222dcd3bd 100644 --- a/pkg/cluster/spec/grafana_test.go +++ b/pkg/cluster/spec/grafana_test.go @@ -35,7 +35,7 @@ func TestLocalDashboards(t *testing.T) { assert.Nil(t, err) topo := new(Specification) - topo.Grafana = append(topo.Grafana, GrafanaSpec{ + topo.Grafanas = append(topo.Grafanas, GrafanaSpec{ Host: "127.0.0.1", Port: 3000, DashboardDir: localDir, @@ -53,7 +53,7 @@ func TestLocalDashboards(t *testing.T) { assert.Nil(t, err) clusterName := "tiup-test-cluster-" + uuid.New().String() - err = grafanaInstance.initDashboards(e, topo.Grafana[0], meta.DirPaths{Deploy: deployDir}, clusterName) + err = grafanaInstance.initDashboards(e, topo.Grafanas[0], meta.DirPaths{Deploy: deployDir}, clusterName) assert.Nil(t, err) assert.FileExists(t, path.Join(deployDir, "dashboards", "tidb.json")) diff --git a/pkg/cluster/spec/instance.go b/pkg/cluster/spec/instance.go index 40b2534c3d..85b6b2113d 100644 --- a/pkg/cluster/spec/instance.go +++ b/pkg/cluster/spec/instance.go @@ -44,7 +44,7 @@ const ( ComponentCDC = "cdc" ComponentTiSpark = "tispark" ComponentSpark = "spark" - ComponentAlertManager = "alertmanager" + ComponentAlertmanager = "alertmanager" ComponentPrometheus = "prometheus" ComponentPushwaygate = "pushgateway" ComponentBlackboxExporter = "blackbox_exporter" diff --git a/pkg/cluster/spec/pd.go b/pkg/cluster/spec/pd.go index edb9cf1fe1..f95506393f 100644 --- a/pkg/cluster/spec/pd.go +++ b/pkg/cluster/spec/pd.go @@ -106,7 +106,7 @@ func (s PDSpec) IsImported() bool { } // PDComponent represents PD component. -type PDComponent struct{ *Specification } +type PDComponent struct{ Topology *Specification } // Name implements Component interface. func (c *PDComponent) Name() string { @@ -120,8 +120,8 @@ func (c *PDComponent) Role() string { // Instances implements Component interface. func (c *PDComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.PDServers)) - for _, s := range c.PDServers { + ins := make([]Instance, 0, len(c.Topology.PDServers)) + for _, s := range c.Topology.PDServers { s := s ins = append(ins, &PDInstance{ Name: s.Name, @@ -143,7 +143,7 @@ func (c *PDComponent) Instances() []Instance { }, StatusFn: s.Status, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -153,7 +153,7 @@ func (c *PDComponent) Instances() []Instance { type PDInstance struct { Name string BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -164,11 +164,12 @@ func (i *PDInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(PDSpec) cfg := scripts.NewPDScript( spec.Name, @@ -178,7 +179,7 @@ func (i *PDInstance) InitConfig( paths.Log, ).WithClientPort(spec.ClientPort). WithPeerPort(spec.PeerPort). - AppendEndpoints(i.topo.Endpoints(deployUser)...). + AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) scheme := "http" @@ -200,15 +201,15 @@ func (i *PDInstance) InitConfig( } // Set the PD metrics storage address - if semver.Compare(clusterVersion, "v3.1.0") >= 0 && len(i.topo.Monitors) > 0 { + if semver.Compare(clusterVersion, "v3.1.0") >= 0 && len(topo.Monitors) > 0 { if spec.Config == nil { spec.Config = map[string]interface{}{} } - prom := i.topo.Monitors[0] + prom := topo.Monitors[0] spec.Config["pd-server.metric-storage"] = fmt.Sprintf("%s://%s:%d", scheme, prom.Host, prom.Port) } - globalConfig := i.topo.ServerConfigs.PD + globalConfig := topo.ServerConfigs.PD // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/prometheus.go b/pkg/cluster/spec/prometheus.go index aaa79ffb3b..6274ddaad9 100644 --- a/pkg/cluster/spec/prometheus.go +++ b/pkg/cluster/spec/prometheus.go @@ -18,6 +18,7 @@ import ( "fmt" "path" "path/filepath" + "reflect" "strings" "github.com/pingcap/errors" @@ -66,7 +67,7 @@ func (s PrometheusSpec) IsImported() bool { } // MonitorComponent represents Monitor component. -type MonitorComponent struct{ *Specification } +type MonitorComponent struct{ Topology } // Name implements Component interface. func (c *MonitorComponent) Name() string { @@ -80,8 +81,10 @@ func (c *MonitorComponent) Role() string { // Instances implements Component interface. func (c *MonitorComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Monitors)) - for _, s := range c.Monitors { + servers := c.BaseTopo().Monitors + ins := make([]Instance, 0, len(servers)) + + for _, s := range servers { ins = append(ins, &MonitorInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), @@ -99,7 +102,7 @@ func (c *MonitorComponent) Instances() []Instance { StatusFn: func(_ *tls.Config, _ ...string) string { return "-" }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -107,7 +110,7 @@ func (c *MonitorComponent) Instances() []Instance { // MonitorInstance represent the monitor instance type MonitorInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -118,11 +121,12 @@ func (i *MonitorInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + gOpts := *i.topo.BaseTopo().GlobalOptions + if err := i.BaseInstance.InitConfig(e, gOpts, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := gOpts.TLSEnabled // transfer run script spec := i.InstanceSpec.(PrometheusSpec) cfg := scripts.NewPrometheusScript( @@ -147,54 +151,103 @@ func (i *MonitorInstance) InitConfig( return err } - topo := i.topo + topoHasField := func(field string) (reflect.Value, bool) { + return findSliceField(i.topo, field) + } + monitoredOptions := i.topo.GetMonitoredOptions() // transfer config fp = filepath.Join(paths.Cache, fmt.Sprintf("prometheus_%s_%d.yml", i.GetHost(), i.GetPort())) cfig := config.NewPrometheusConfig(clusterName, enableTLS) - cfig.AddBlackbox(i.GetHost(), uint64(topo.MonitoredOptions.BlackboxExporterPort)) + if monitoredOptions != nil { + cfig.AddBlackbox(i.GetHost(), uint64(monitoredOptions.BlackboxExporterPort)) + } uniqueHosts := set.NewStringSet() - for _, pd := range topo.PDServers { - uniqueHosts.Insert(pd.Host) - cfig.AddPD(pd.Host, uint64(pd.ClientPort)) - } - for _, kv := range topo.TiKVServers { - uniqueHosts.Insert(kv.Host) - cfig.AddTiKV(kv.Host, uint64(kv.StatusPort)) - } - for _, db := range topo.TiDBServers { - uniqueHosts.Insert(db.Host) - cfig.AddTiDB(db.Host, uint64(db.StatusPort)) - } - for _, flash := range topo.TiFlashServers { - uniqueHosts.Insert(flash.Host) - cfig.AddTiFlashLearner(flash.Host, uint64(flash.FlashProxyStatusPort)) - cfig.AddTiFlash(flash.Host, uint64(flash.StatusPort)) - } - for _, pump := range topo.PumpServers { - uniqueHosts.Insert(pump.Host) - cfig.AddPump(pump.Host, uint64(pump.Port)) - } - for _, drainer := range topo.Drainers { - uniqueHosts.Insert(drainer.Host) - cfig.AddDrainer(drainer.Host, uint64(drainer.Port)) - } - for _, cdc := range topo.CDCServers { - uniqueHosts.Insert(cdc.Host) - cfig.AddCDC(cdc.Host, uint64(cdc.Port)) - } - for _, grafana := range topo.Grafana { - uniqueHosts.Insert(grafana.Host) - cfig.AddGrafana(grafana.Host, uint64(grafana.Port)) - } - for _, alertmanager := range topo.Alertmanager { - uniqueHosts.Insert(alertmanager.Host) - cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) - } - for host := range uniqueHosts { - cfig.AddNodeExpoertor(host, uint64(topo.MonitoredOptions.NodeExporterPort)) - cfig.AddBlackboxExporter(host, uint64(topo.MonitoredOptions.BlackboxExporterPort)) - cfig.AddMonitoredServer(host) + + if servers, found := topoHasField("PDServers"); found { + for i := 0; i < servers.Len(); i++ { + pd := servers.Index(i).Interface().(PDSpec) + uniqueHosts.Insert(pd.Host) + cfig.AddPD(pd.Host, uint64(pd.ClientPort)) + } + } + if servers, found := topoHasField("TiKVServers"); found { + for i := 0; i < servers.Len(); i++ { + kv := servers.Index(i).Interface().(TiKVSpec) + uniqueHosts.Insert(kv.Host) + cfig.AddTiKV(kv.Host, uint64(kv.StatusPort)) + } + } + if servers, found := topoHasField("TiDBServers"); found { + for i := 0; i < servers.Len(); i++ { + db := servers.Index(i).Interface().(TiDBSpec) + uniqueHosts.Insert(db.Host) + cfig.AddTiDB(db.Host, uint64(db.StatusPort)) + } + } + if servers, found := topoHasField("TiFlashServers"); found { + for i := 0; i < servers.Len(); i++ { + flash := servers.Index(i).Interface().(TiFlashSpec) + cfig.AddTiFlashLearner(flash.Host, uint64(flash.FlashProxyStatusPort)) + cfig.AddTiFlash(flash.Host, uint64(flash.StatusPort)) + } + } + if servers, found := topoHasField("PumpServers"); found { + for i := 0; i < servers.Len(); i++ { + pump := servers.Index(i).Interface().(PumpSpec) + uniqueHosts.Insert(pump.Host) + cfig.AddPump(pump.Host, uint64(pump.Port)) + } + } + if servers, found := topoHasField("Trainers"); found { + for i := 0; i < servers.Len(); i++ { + drainer := servers.Index(i).Interface().(DrainerSpec) + uniqueHosts.Insert(drainer.Host) + cfig.AddDrainer(drainer.Host, uint64(drainer.Port)) + } + } + if servers, found := topoHasField("CDCServers"); found { + for i := 0; i < servers.Len(); i++ { + cdc := servers.Index(i).Interface().(CDCSpec) + uniqueHosts.Insert(cdc.Host) + cfig.AddCDC(cdc.Host, uint64(cdc.Port)) + } + } + if servers, found := topoHasField("Grafana"); found { + for i := 0; i < servers.Len(); i++ { + grafana := servers.Index(i).Interface().(GrafanaSpec) + uniqueHosts.Insert(grafana.Host) + cfig.AddGrafana(grafana.Host, uint64(grafana.Port)) + } + } + if servers, found := topoHasField("Alertmanager"); found { + for i := 0; i < servers.Len(); i++ { + alertmanager := servers.Index(i).Interface().(AlertmanagerSpec) + uniqueHosts.Insert(alertmanager.Host) + cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) + } + } + if servers, found := topoHasField("Masters"); found { + for i := 0; i < servers.Len(); i++ { + master := servers.Index(i) + host, port := master.FieldByName("Host").String(), master.FieldByName("Port").Int() + cfig.AddDMMaster(host, uint64(port)) + } + } + + if servers, found := topoHasField("Workers"); found { + for i := 0; i < servers.Len(); i++ { + master := servers.Index(i) + host, port := master.FieldByName("Host").String(), master.FieldByName("Port").Int() + cfig.AddDMWorker(host, uint64(port)) + } + } + if monitoredOptions != nil { + for host := range uniqueHosts { + cfig.AddNodeExpoertor(host, uint64(monitoredOptions.NodeExporterPort)) + cfig.AddBlackboxExporter(host, uint64(monitoredOptions.BlackboxExporterPort)) + cfig.AddMonitoredServer(host) + } } if err := i.initRules(e, spec, paths); err != nil { diff --git a/pkg/cluster/spec/pump.go b/pkg/cluster/spec/pump.go index b46255b862..0a7e82cffc 100644 --- a/pkg/cluster/spec/pump.go +++ b/pkg/cluster/spec/pump.go @@ -63,7 +63,7 @@ func (s PumpSpec) IsImported() bool { } // PumpComponent represents Pump component. -type PumpComponent struct{ *Specification } +type PumpComponent struct{ Topology *Specification } // Name implements Component interface. func (c *PumpComponent) Name() string { @@ -77,8 +77,8 @@ func (c *PumpComponent) Role() string { // Instances implements Component interface. func (c *PumpComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.PumpServers)) - for _, s := range c.PumpServers { + ins := make([]Instance, 0, len(c.Topology.PumpServers)) + for _, s := range c.Topology.PumpServers { s := s ins = append(ins, &PumpInstance{BaseInstance{ InstanceSpec: s, @@ -102,7 +102,7 @@ func (c *PumpComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.Port) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -110,7 +110,7 @@ func (c *PumpComponent) Instances() []Instance { // PumpInstance represent the Pump instance. type PumpInstance struct { BaseInstance - topo *Specification + topo Topology } // ScaleConfig deploy temporary config on scaling @@ -139,11 +139,12 @@ func (i *PumpInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(PumpSpec) cfg := scripts.NewPumpScript( i.GetHost()+":"+strconv.Itoa(i.GetPort()), @@ -151,7 +152,7 @@ func (i *PumpInstance) InitConfig( paths.Deploy, paths.Data[0], paths.Log, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(i.topo.Endpoints(deployUser)...) + ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(topo.Endpoints(deployUser)...) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pump_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -166,7 +167,7 @@ func (i *PumpInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.Pump + globalConfig := topo.ServerConfigs.Pump // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/spec.go b/pkg/cluster/spec/spec.go index ec7452d84a..d0a700b33c 100644 --- a/pkg/cluster/spec/spec.go +++ b/pkg/cluster/spec/spec.go @@ -104,8 +104,8 @@ type ( TiSparkMasters []TiSparkMasterSpec `yaml:"tispark_masters,omitempty"` TiSparkWorkers []TiSparkWorkerSpec `yaml:"tispark_workers,omitempty"` Monitors []PrometheusSpec `yaml:"monitoring_servers"` - Grafana []GrafanaSpec `yaml:"grafana_servers,omitempty"` - Alertmanager []AlertManagerSpec `yaml:"alertmanager_servers,omitempty"` + Grafanas []GrafanaSpec `yaml:"grafana_servers,omitempty"` + Alertmanagers []AlertmanagerSpec `yaml:"alertmanager_servers,omitempty"` } ) @@ -114,6 +114,10 @@ type BaseTopo struct { GlobalOptions *GlobalOptions MonitoredOptions *MonitoredOptions MasterList []string + + Monitors []PrometheusSpec + Grafanas []GrafanaSpec + Alertmanagers []AlertmanagerSpec } // Topology represents specification of the cluster. @@ -132,6 +136,7 @@ type Topology interface { // count how many time a path is used by instances in cluster CountDir(host string, dir string) int TLSConfig(dir string) (*tls.Config, error) + Merge(that Topology) Topology ScaleOutTopology } @@ -206,6 +211,9 @@ func (s *Specification) BaseTopo() *BaseTopo { GlobalOptions: &s.GlobalOptions, MonitoredOptions: s.GetMonitoredOptions(), MasterList: s.GetPDList(), + Monitors: s.Monitors, + Grafanas: s.Grafanas, + Alertmanagers: s.Alertmanagers, } } @@ -313,6 +321,22 @@ func findField(v reflect.Value, fieldName string) (int, bool) { return -1, false } +func findSliceField(v Topology, fieldName string) (reflect.Value, bool) { + topo := reflect.ValueOf(v) + if topo.Kind() == reflect.Ptr { + topo = topo.Elem() + } + + j, found := findField(topo, fieldName) + if found { + val := topo.Field(j) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + return val, true + } + } + return reflect.Value{}, false +} + // GetPDList returns a list of PD API hosts of the current cluster func (s *Specification) GetPDList() []string { var pdList []string @@ -333,23 +357,24 @@ func (s *Specification) GetEtcdClient(tlsCfg *tls.Config) (*clientv3.Client, err } // Merge returns a new Specification which sum old ones -func (s *Specification) Merge(that *Specification) *Specification { +func (s *Specification) Merge(that Topology) Topology { + spec := that.(*Specification) return &Specification{ GlobalOptions: s.GlobalOptions, MonitoredOptions: s.MonitoredOptions, ServerConfigs: s.ServerConfigs, - TiDBServers: append(s.TiDBServers, that.TiDBServers...), - TiKVServers: append(s.TiKVServers, that.TiKVServers...), - PDServers: append(s.PDServers, that.PDServers...), - TiFlashServers: append(s.TiFlashServers, that.TiFlashServers...), - PumpServers: append(s.PumpServers, that.PumpServers...), - Drainers: append(s.Drainers, that.Drainers...), - CDCServers: append(s.CDCServers, that.CDCServers...), - TiSparkMasters: append(s.TiSparkMasters, that.TiSparkMasters...), - TiSparkWorkers: append(s.TiSparkWorkers, that.TiSparkWorkers...), - Monitors: append(s.Monitors, that.Monitors...), - Grafana: append(s.Grafana, that.Grafana...), - Alertmanager: append(s.Alertmanager, that.Alertmanager...), + TiDBServers: append(s.TiDBServers, spec.TiDBServers...), + TiKVServers: append(s.TiKVServers, spec.TiKVServers...), + PDServers: append(s.PDServers, spec.PDServers...), + TiFlashServers: append(s.TiFlashServers, spec.TiFlashServers...), + PumpServers: append(s.PumpServers, spec.PumpServers...), + Drainers: append(s.Drainers, spec.Drainers...), + CDCServers: append(s.CDCServers, spec.CDCServers...), + TiSparkMasters: append(s.TiSparkMasters, spec.TiSparkMasters...), + TiSparkWorkers: append(s.TiSparkWorkers, spec.TiSparkWorkers...), + Monitors: append(s.Monitors, spec.Monitors...), + Grafanas: append(s.Grafanas, spec.Grafanas...), + Alertmanagers: append(s.Alertmanagers, spec.Alertmanagers...), } } @@ -639,7 +664,7 @@ func (s *Specification) Endpoints(user string) []*scripts.PDScript { } // AlertManagerEndpoints returns the AlertManager endpoints configurations -func AlertManagerEndpoints(alertmanager []AlertManagerSpec, user string, enableTLS bool) []*scripts.AlertManagerScript { +func AlertManagerEndpoints(alertmanager []AlertmanagerSpec, user string, enableTLS bool) []*scripts.AlertManagerScript { var ends []*scripts.AlertManagerScript for _, spec := range alertmanager { deployDir := Abs(user, spec.DeployDir) diff --git a/pkg/cluster/spec/spec_manager_test.go b/pkg/cluster/spec/spec_manager_test.go index 6f52ef995c..1c8d958810 100644 --- a/pkg/cluster/spec/spec_manager_test.go +++ b/pkg/cluster/spec/spec_manager_test.go @@ -45,6 +45,10 @@ func (m *TestMetadata) GetBaseMeta() *BaseMeta { return &m.BaseMeta } +func (t *TestTopology) Merge(topo Topology) Topology { + panic("not support") +} + func (m *TestMetadata) SetTopology(topo Topology) { testTopo, ok := topo.(*TestTopology) if !ok { @@ -97,6 +101,10 @@ func (t *TestTopology) GetMonitoredOptions() *MonitoredOptions { return nil } +func (t *TestTopology) GetGlobalOptions() GlobalOptions { + return GlobalOptions{} +} + func (t *TestTopology) CountDir(host string, dir string) int { return 0 } diff --git a/pkg/cluster/spec/tidb.go b/pkg/cluster/spec/tidb.go index 263f140685..64168d456c 100644 --- a/pkg/cluster/spec/tidb.go +++ b/pkg/cluster/spec/tidb.go @@ -62,7 +62,7 @@ func (s TiDBSpec) IsImported() bool { } // TiDBComponent represents TiDB component. -type TiDBComponent struct{ *Specification } +type TiDBComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiDBComponent) Name() string { @@ -76,8 +76,8 @@ func (c *TiDBComponent) Role() string { // Instances implements Component interface. func (c *TiDBComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiDBServers)) - for _, s := range c.TiDBServers { + ins := make([]Instance, 0, len(c.Topology.TiDBServers)) + for _, s := range c.Topology.TiDBServers { s := s ins = append(ins, &TiDBInstance{BaseInstance{ InstanceSpec: s, @@ -102,7 +102,7 @@ func (c *TiDBComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.StatusPort) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -110,7 +110,7 @@ func (c *TiDBComponent) Instances() []Instance { // TiDBInstance represent the TiDB instance type TiDBInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -121,11 +121,12 @@ func (i *TiDBInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(TiDBSpec) cfg := scripts.NewTiDBScript( i.GetHost(), @@ -133,7 +134,7 @@ func (i *TiDBInstance) InitConfig( paths.Log, ).WithPort(spec.Port).WithNumaNode(spec.NumaNode). WithStatusPort(spec.StatusPort). - AppendEndpoints(i.topo.Endpoints(deployUser)...). + AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tidb_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -148,7 +149,7 @@ func (i *TiDBInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.TiDB + globalConfig := topo.ServerConfigs.TiDB // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/tiflash.go b/pkg/cluster/spec/tiflash.go index 9066999beb..0decb8c4a0 100644 --- a/pkg/cluster/spec/tiflash.go +++ b/pkg/cluster/spec/tiflash.go @@ -88,7 +88,7 @@ func (s TiFlashSpec) IsImported() bool { } // TiFlashComponent represents TiFlash component. -type TiFlashComponent struct{ *Specification } +type TiFlashComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiFlashComponent) Name() string { @@ -102,8 +102,8 @@ func (c *TiFlashComponent) Role() string { // Instances implements Component interface. func (c *TiFlashComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiFlashServers)) - for _, s := range c.TiFlashServers { + ins := make([]Instance, 0, len(c.Topology.TiFlashServers)) + for _, s := range c.Topology.TiFlashServers { ins = append(ins, &TiFlashInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), @@ -124,7 +124,7 @@ func (c *TiFlashComponent) Instances() []Instance { s.DataDir, }, StatusFn: s.Status, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -132,7 +132,7 @@ func (c *TiFlashComponent) Instances() []Instance { // TiFlashInstance represent the TiFlash instance type TiFlashInstance struct { BaseInstance - topo *Specification + topo Topology } // GetServicePort returns the service port of TiFlash @@ -146,7 +146,7 @@ func (i *TiFlashInstance) checkIncorrectKey(key string) error { if dir, ok := i.InstanceSpec.(TiFlashSpec).Config[key].(string); ok && dir != "" { return fmt.Errorf(errMsg, key, "host") } - if dir, ok := i.topo.ServerConfigs.TiFlash[key].(string); ok && dir != "" { + if dir, ok := i.topo.(*Specification).ServerConfigs.TiFlash[key].(string); ok && dir != "" { return fmt.Errorf(errMsg, key, "server_configs") } return nil @@ -298,14 +298,15 @@ func (i *TiFlashInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } spec := i.InstanceSpec.(TiFlashSpec) tidbStatusAddrs := []string{} - for _, tidb := range i.topo.TiDBServers { + for _, tidb := range topo.TiDBServers { tidbStatusAddrs = append(tidbStatusAddrs, fmt.Sprintf("%s:%d", tidb.Host, uint64(tidb.StatusPort))) } tidbStatusStr := strings.Join(tidbStatusAddrs, ",") @@ -327,7 +328,7 @@ func (i *TiFlashInstance) InitConfig( WithStatusPort(spec.StatusPort). WithTmpDir(spec.TmpDir). WithNumaNode(spec.NumaNode). - AppendEndpoints(i.topo.Endpoints(deployUser)...) + AppendEndpoints(topo.Endpoints(deployUser)...) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tiflash_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -343,7 +344,7 @@ func (i *TiFlashInstance) InitConfig( return err } - conf, err := i.InitTiFlashLearnerConfig(cfg, clusterVersion, i.topo.ServerConfigs.TiFlashLearner) + conf, err := i.InitTiFlashLearnerConfig(cfg, clusterVersion, topo.ServerConfigs.TiFlashLearner) if err != nil { return err } @@ -375,7 +376,7 @@ func (i *TiFlashInstance) InitConfig( return err } - conf, err = i.InitTiFlashConfig(cfg, i.topo.ServerConfigs.TiFlash) + conf, err = i.InitTiFlashConfig(cfg, topo.ServerConfigs.TiFlash) if err != nil { return err } @@ -428,7 +429,7 @@ type replicateConfig struct { func (i *TiFlashInstance) getEndpoints() []string { var endpoints []string - for _, pd := range i.topo.PDServers { + for _, pd := range i.topo.(*Specification).PDServers { endpoints = append(endpoints, fmt.Sprintf("%s:%d", pd.Host, uint64(pd.ClientPort))) } return endpoints diff --git a/pkg/cluster/spec/tikv.go b/pkg/cluster/spec/tikv.go index 71c3b676db..e11251aa8f 100644 --- a/pkg/cluster/spec/tikv.go +++ b/pkg/cluster/spec/tikv.go @@ -143,9 +143,7 @@ func (s TiKVSpec) Labels() (map[string]string, error) { } // TiKVComponent represents TiKV component. -type TiKVComponent struct { - *Specification -} +type TiKVComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiKVComponent) Name() string { @@ -159,8 +157,8 @@ func (c *TiKVComponent) Role() string { // Instances implements Component interface. func (c *TiKVComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiKVServers)) - for _, s := range c.TiKVServers { + ins := make([]Instance, 0, len(c.Topology.TiKVServers)) + for _, s := range c.Topology.TiKVServers { s := s ins = append(ins, &TiKVInstance{BaseInstance{ InstanceSpec: s, @@ -179,7 +177,7 @@ func (c *TiKVComponent) Instances() []Instance { s.DataDir, }, StatusFn: s.Status, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -187,7 +185,7 @@ func (c *TiKVComponent) Instances() []Instance { // TiKVInstance represent the TiDB instance type TiKVInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -198,11 +196,12 @@ func (i *TiKVInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(TiKVSpec) cfg := scripts.NewTiKVScript( i.GetHost(), @@ -212,7 +211,7 @@ func (i *TiKVInstance) InitConfig( ).WithPort(spec.Port). WithNumaNode(spec.NumaNode). WithStatusPort(spec.StatusPort). - AppendEndpoints(i.topo.Endpoints(deployUser)...). + AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tikv_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -228,7 +227,7 @@ func (i *TiKVInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.TiKV + globalConfig := topo.ServerConfigs.TiKV // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/tispark.go b/pkg/cluster/spec/tispark.go index 1c546dfdce..49dee7d893 100644 --- a/pkg/cluster/spec/tispark.go +++ b/pkg/cluster/spec/tispark.go @@ -121,7 +121,7 @@ func (s TiSparkWorkerSpec) Status(tlsCfg *tls.Config, pdList ...string) string { } // TiSparkMasterComponent represents TiSpark master component. -type TiSparkMasterComponent struct{ *Specification } +type TiSparkMasterComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiSparkMasterComponent) Name() string { @@ -135,8 +135,8 @@ func (c *TiSparkMasterComponent) Role() string { // Instances implements Component interface. func (c *TiSparkMasterComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiSparkMasters)) - for _, s := range c.TiSparkMasters { + ins := make([]Instance, 0, len(c.Topology.TiSparkMasters)) + for _, s := range c.Topology.TiSparkMasters { ins = append(ins, &TiSparkMasterInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -154,7 +154,7 @@ func (c *TiSparkMasterComponent) Instances() []Instance { }, StatusFn: s.Status, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -163,7 +163,7 @@ func (c *TiSparkMasterComponent) Instances() []Instance { // TiSparkMasterInstance represent the TiSpark master instance type TiSparkMasterInstance struct { BaseInstance - topo *Specification + topo Topology } // GetCustomFields get custom spark configs of the instance @@ -201,6 +201,7 @@ func (i *TiSparkMasterInstance) InitConfig( comp := i.Role() host := i.GetHost() port := i.GetPort() + topo := i.topo.(*Specification) sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) systemCfg := system.NewTiSparkConfig(comp, deployUser, paths.Deploy, i.GetJavaHome()) @@ -219,11 +220,11 @@ func (i *TiSparkMasterInstance) InitConfig( // transfer default config pdList := make([]string, 0) - for _, pd := range i.topo.Endpoints(deployUser) { + for _, pd := range topo.Endpoints(deployUser) { pdList = append(pdList, fmt.Sprintf("%s:%d", pd.IP, pd.ClientPort)) } masterList := make([]string, 0) - for _, master := range i.topo.TiSparkMasters { + for _, master := range topo.TiSparkMasters { masterList = append(masterList, fmt.Sprintf("%s:%d", master.Host, master.Port)) } @@ -285,7 +286,7 @@ func (i *TiSparkMasterInstance) ScaleConfig( } // TiSparkWorkerComponent represents TiSpark slave component. -type TiSparkWorkerComponent struct{ *Specification } +type TiSparkWorkerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiSparkWorkerComponent) Name() string { @@ -299,8 +300,8 @@ func (c *TiSparkWorkerComponent) Role() string { // Instances implements Component interface. func (c *TiSparkWorkerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiSparkWorkers)) - for _, s := range c.TiSparkWorkers { + ins := make([]Instance, 0, len(c.Topology.TiSparkWorkers)) + for _, s := range c.Topology.TiSparkWorkers { ins = append(ins, &TiSparkWorkerInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -318,7 +319,7 @@ func (c *TiSparkWorkerComponent) Instances() []Instance { }, StatusFn: s.Status, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -327,7 +328,7 @@ func (c *TiSparkWorkerComponent) Instances() []Instance { // TiSparkWorkerInstance represent the TiSpark slave instance type TiSparkWorkerInstance struct { BaseInstance - topo *Specification + topo Topology } // GetJavaHome returns the java_home value in spec @@ -347,6 +348,7 @@ func (i *TiSparkWorkerInstance) InitConfig( comp := i.Role() host := i.GetHost() port := i.GetPort() + topo := i.topo.(*Specification) sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) systemCfg := system.NewTiSparkConfig(comp, deployUser, paths.Deploy, i.GetJavaHome()) @@ -365,16 +367,16 @@ func (i *TiSparkWorkerInstance) InitConfig( // transfer default config pdList := make([]string, 0) - for _, pd := range i.topo.Endpoints(deployUser) { + for _, pd := range topo.Endpoints(deployUser) { pdList = append(pdList, fmt.Sprintf("%s:%d", pd.IP, pd.ClientPort)) } masterList := make([]string, 0) - for _, master := range i.topo.TiSparkMasters { + for _, master := range topo.TiSparkMasters { masterList = append(masterList, fmt.Sprintf("%s:%d", master.Host, master.Port)) } cfg := config.NewTiSparkConfig(pdList).WithMasters(strings.Join(masterList, ",")). - WithCustomFields(i.topo.TiSparkMasters[0].SparkConfigs) + WithCustomFields(topo.TiSparkMasters[0].SparkConfigs) // transfer spark-defaults.conf fp := filepath.Join(paths.Cache, fmt.Sprintf("spark-defaults-%s-%d.conf", host, port)) if err := cfg.ConfigToFile(fp); err != nil { @@ -387,10 +389,10 @@ func (i *TiSparkWorkerInstance) InitConfig( env := scripts.NewTiSparkEnv(host). WithLocalIP(i.GetListenHost()). - WithMaster(i.topo.TiSparkMasters[0].Host). - WithMasterPorts(i.topo.TiSparkMasters[0].Port, i.topo.TiSparkMasters[0].WebPort). + WithMaster(topo.TiSparkMasters[0].Host). + WithMasterPorts(topo.TiSparkMasters[0].Port, topo.TiSparkMasters[0].WebPort). WithWorkerPorts(i.Ports[0], i.Ports[1]). - WithCustomEnv(i.topo.TiSparkMasters[0].SparkEnvs) + WithCustomEnv(topo.TiSparkMasters[0].SparkEnvs) // transfer spark-env.sh file fp = filepath.Join(paths.Cache, fmt.Sprintf("spark-env-%s-%d.sh", host, port)) if err := env.ScriptToFile(fp); err != nil { @@ -440,7 +442,6 @@ func (i *TiSparkWorkerInstance) ScaleConfig( ) error { s := i.topo defer func() { i.topo = s }() - cluster := mustBeClusterTopo(topo) - i.topo = cluster.Merge(i.topo) + i.topo = topo.Merge(i.topo) return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) } diff --git a/pkg/cluster/spec/validate.go b/pkg/cluster/spec/validate.go index 2f423df854..fdc1461fe5 100644 --- a/pkg/cluster/spec/validate.go +++ b/pkg/cluster/spec/validate.go @@ -785,7 +785,7 @@ func (s *Specification) validateTLSEnabled() error { ComponentDrainer, ComponentCDC, ComponentPrometheus, - ComponentAlertManager, + ComponentAlertmanager, ComponentGrafana: default: return errors.Errorf("component %s is not supported in TLS enabled cluster", c.Name()) diff --git a/pkg/cluster/task/update_meta.go b/pkg/cluster/task/update_meta.go index 798c340949..8e6c1f5caa 100644 --- a/pkg/cluster/task/update_meta.go +++ b/pkg/cluster/task/update_meta.go @@ -41,77 +41,77 @@ func (u *UpdateMeta) Execute(ctx *Context) error { deleted := set.NewStringSet(u.deletedNodesID...) topo := u.metadata.Topology - for i, instance := range (&spec.TiDBComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiDBComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiDBServers = append(newMeta.Topology.TiDBServers, topo.TiDBServers[i]) } - for i, instance := range (&spec.TiKVComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiKVComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiKVServers = append(newMeta.Topology.TiKVServers, topo.TiKVServers[i]) } - for i, instance := range (&spec.PDComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.PDComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.PDServers = append(newMeta.Topology.PDServers, topo.PDServers[i]) } - for i, instance := range (&spec.TiFlashComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiFlashComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiFlashServers = append(newMeta.Topology.TiFlashServers, topo.TiFlashServers[i]) } - for i, instance := range (&spec.PumpComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.PumpComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.PumpServers = append(newMeta.Topology.PumpServers, topo.PumpServers[i]) } - for i, instance := range (&spec.DrainerComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.DrainerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.Drainers = append(newMeta.Topology.Drainers, topo.Drainers[i]) } - for i, instance := range (&spec.CDCComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.CDCComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.CDCServers = append(newMeta.Topology.CDCServers, topo.CDCServers[i]) } - for i, instance := range (&spec.TiSparkWorkerComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiSparkWorkerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiSparkWorkers = append(newMeta.Topology.TiSparkWorkers, topo.TiSparkWorkers[i]) } - for i, instance := range (&spec.TiSparkMasterComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiSparkMasterComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiSparkMasters = append(newMeta.Topology.TiSparkMasters, topo.TiSparkMasters[i]) } - for i, instance := range (&spec.MonitorComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.MonitorComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.Monitors = append(newMeta.Topology.Monitors, topo.Monitors[i]) } - for i, instance := range (&spec.GrafanaComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.GrafanaComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Grafana = append(newMeta.Topology.Grafana, topo.Grafana[i]) + newMeta.Topology.Grafanas = append(newMeta.Topology.Grafanas, topo.Grafanas[i]) } - for i, instance := range (&spec.AlertManagerComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.AlertManagerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Alertmanager = append(newMeta.Topology.Alertmanager, topo.Alertmanager[i]) + newMeta.Topology.Alertmanagers = append(newMeta.Topology.Alertmanagers, topo.Alertmanagers[i]) } return spec.SaveClusterMeta(u.cluster, newMeta) } diff --git a/pkg/cluster/task/update_topology.go b/pkg/cluster/task/update_topology.go index 96178728ac..c83ebccd4b 100644 --- a/pkg/cluster/task/update_topology.go +++ b/pkg/cluster/task/update_topology.go @@ -46,11 +46,11 @@ func (u *UpdateTopology) Execute(ctx *Context) error { var ops []clientv3.Op var instances []spec.Instance - ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.MonitorComponent{Specification: topo}).Instances(), "prometheus") - ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.GrafanaComponent{Specification: topo}).Instances(), "grafana") - ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.AlertManagerComponent{Specification: topo}).Instances(), "alertmanager") + ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.MonitorComponent{Topology: topo}).Instances(), "prometheus") + ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.GrafanaComponent{Topology: topo}).Instances(), "grafana") + ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.AlertManagerComponent{Topology: topo}).Instances(), "alertmanager") - for _, instance := range (&spec.TiDBComponent{Specification: topo}).Instances() { + for _, instance := range (&spec.TiDBComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { ops = append(ops, clientv3.OpDelete(fmt.Sprintf("/topology/tidb/%s:%d", instance.GetHost(), instance.GetPort()), clientv3.WithPrefix())) } @@ -98,7 +98,7 @@ func updateInstancesAndOps(ops []clientv3.Op, destInstances []spec.Instance, del // for update it's topology. func updateTopologyOp(instance spec.Instance) (*clientv3.Op, error) { switch instance.ComponentName() { - case spec.ComponentAlertManager, spec.ComponentPrometheus, spec.ComponentGrafana: + case spec.ComponentAlertmanager, spec.ComponentPrometheus, spec.ComponentGrafana: topology := componentTopology{ IP: instance.GetHost(), Port: instance.GetPort(), diff --git a/pkg/cluster/template/config/prometheus.go b/pkg/cluster/template/config/prometheus.go index 98df697cbc..c62f56acd3 100644 --- a/pkg/cluster/template/config/prometheus.go +++ b/pkg/cluster/template/config/prometheus.go @@ -46,6 +46,9 @@ type PrometheusConfig struct { BlackboxAddr string KafkaExporterAddr string GrafanaAddr string + + DMMasterAddrs []string + DMWorkerAddrs []string } // NewPrometheusConfig returns a PrometheusConfig @@ -170,6 +173,18 @@ func (c *PrometheusConfig) AddGrafana(ip string, port uint64) *PrometheusConfig return c } +// AddDMMaster add an dm-master address +func (c *PrometheusConfig) AddDMMaster(ip string, port uint64) *PrometheusConfig { + c.DMMasterAddrs = append(c.DMMasterAddrs, fmt.Sprintf("%s:%d", ip, port)) + return c +} + +// AddDMWorker add an dm-worker address +func (c *PrometheusConfig) AddDMWorker(ip string, port uint64) *PrometheusConfig { + c.DMWorkerAddrs = append(c.DMWorkerAddrs, fmt.Sprintf("%s:%d", ip, port)) + return c +} + // Config generate the config file data. func (c *PrometheusConfig) Config() ([]byte, error) { fp := path.Join("/templates", "config", "prometheus.yml.tpl") diff --git a/templates/config/dm/prometheus.yml.tpl b/templates/config/dm/prometheus.yml.tpl index d5681b5efa..fcb2c9ca2f 100644 --- a/templates/config/dm/prometheus.yml.tpl +++ b/templates/config/dm/prometheus.yml.tpl @@ -14,11 +14,11 @@ rule_files: {{- if .AlertmanagerAddrs}} alerting: - alertmanagers: - - static_configs: - - targets: + alertmanagers: + - static_configs: + - targets: {{- range .AlertmanagerAddrs}} - - '{{.}}' + - '{{.}}' {{- end}} {{- end}} diff --git a/templates/config/prometheus.yml.tpl b/templates/config/prometheus.yml.tpl index a1bee0523b..43af42822a 100644 --- a/templates/config/prometheus.yml.tpl +++ b/templates/config/prometheus.yml.tpl @@ -9,13 +9,21 @@ global: # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: +{{- if .MonitoredServers}} - 'node.rules.yml' - 'blacker.rules.yml' - 'bypass.rules.yml' +{{- end}} +{{- if .PDAddrs}} - 'pd.rules.yml' +{{- end}} +{{- if .TiDBStatusAddrs}} - 'tidb.rules.yml' +{{- end}} +{{- if .TiKVStatusAddrs}} - 'tikv.rules.yml' - 'tikv.accelerate.rules.yml' +{{- end}} {{- if .TiFlashStatusAddrs}} - 'tiflash.rules.yml' {{- end}} @@ -31,6 +39,12 @@ rule_files: {{- if .LightningAddrs}} - 'lightning.rules.yml' {{- end}} +{{- if .DMWorkerAddrs}} + - 'dm_worker.rules.yml' +{{- end}} +{{- if .DMMasterAddrs}} + - 'dm_master.rules.yml' +{{- end}} {{- if .AlertmanagerAddrs}} alerting: @@ -262,7 +276,7 @@ scrape_configs: static_configs: - targets: {{- range .TiDBStatusAddrs}} - - '{{.}}' + - '{{.}}' {{- end}} labels: group: 'tidb' @@ -341,4 +355,24 @@ scrape_configs: regex: .* target_label: __address__ replacement: {{$addr}} -{{- end}} \ No newline at end of file +{{- end}} + +{{- if .DMMasterAddrs}} + - job_name: "dm_master" + honor_labels: true # don't overwrite job & instance labels + static_configs: + - targets: + {{- range .DMMasterAddrs}} + - '{{.}}' + {{- end}} +{{- end}} + +{{- if .DMWorkerAddrs}} + - job_name: "dm_worker" + honor_labels: true # don't overwrite job & instance labels + static_configs: + - targets: + {{- range .DMWorkerAddrs}} + - '{{.}}' + {{- end}} +{{- end}} diff --git a/templates/scripts/dm/run_grafana.sh.tpl b/templates/scripts/dm/run_grafana.sh.tpl deleted file mode 100644 index 341a56e49d..0000000000 --- a/templates/scripts/dm/run_grafana.sh.tpl +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -# WARNING: This file was auto-generated. Do not edit! -# All your edit might be overwritten! -DEPLOY_DIR={{.DeployDir}} -cd "${DEPLOY_DIR}" || exit 1 - -LANG=en_US.UTF-8 \ -{{- if .NumaNode}} -exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/bin/grafana-server \ -{{- else}} -exec bin/bin/grafana-server \ -{{- end}} - --homepath="{{.DeployDir}}/bin" \ - --config="{{.DeployDir}}/conf/grafana.ini" diff --git a/templates/scripts/dm/run_prometheus.sh.tpl b/templates/scripts/dm/run_prometheus.sh.tpl deleted file mode 100644 index a221eae406..0000000000 --- a/templates/scripts/dm/run_prometheus.sh.tpl +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -e - -DEPLOY_DIR={{.DeployDir}} -cd "${DEPLOY_DIR}" || exit 1 - -# WARNING: This file was auto-generated. Do not edit! -# All your edit might be overwritten! - - -exec > >(tee -i -a "{{.LogDir}}/prometheus.log") -exec 2>&1 - -{{- if .NumaNode}} -exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/prometheus/prometheus \ -{{- else}} -exec bin/prometheus/prometheus \ -{{- end}} - --config.file="{{.DeployDir}}/conf/prometheus.yml" \ - --web.listen-address=":{{.Port}}" \ - --web.external-url="http://{{.IP}}:{{.Port}}/" \ - --web.enable-admin-api \ - --log.level="info" \ - --storage.tsdb.path="{{.DataDir}}" \ - --storage.tsdb.retention="30d" diff --git a/tests/tiup-dm/script/util.sh b/tests/tiup-dm/script/util.sh index 9ddf37afa3..bc04fc5790 100755 --- a/tests/tiup-dm/script/util.sh +++ b/tests/tiup-dm/script/util.sh @@ -8,35 +8,35 @@ set -eu # PASS # coverage: 12.7% of statements in github.com/pingcap/tiup/components/dm/... function instance_num() { - name=$1 + name=$1 - count=$(tiup-dm display $name | grep "Total nodes" | awk -F ' ' '{print $3}') + count=$(tiup-dm display $name | grep "Total nodes" | awk -F ' ' '{print $3}') - echo $count + echo $count } # wait_instance_num_reach # wait the instance number of dm reach the target_num. # timeout 120 second function wait_instance_num_reach() { - name=$1 - target_num=$2 - - for ((i=0;i<120;i++)) - do - tiup-dm prune $name --yes - count=$(instance_num $name) - if [ "$count" == "$target_num" ]; then - echo "instance number reach $target_num" - return - else - sleep 1 - fi - - sleep 1 - done - - echo "fail to wait instance number reach $target_num, count $count, retry num: $i" - tiup-dm display $name - exit -1 + name=$1 + target_num=$2 + + for ((i=0;i<120;i++)) + do + tiup-dm prune $name --yes + count=$(instance_num $name) + if [ "$count" == "$target_num" ]; then + echo "instance number reach $target_num" + return + else + sleep 1 + fi + + sleep 1 + done + + echo "fail to wait instance number reach $target_num, count $count, retry num: $i" + tiup-dm display $name + exit -1 } diff --git a/tests/tiup-dm/test_import.sh b/tests/tiup-dm/test_import.sh index 032d7b90f7..8808f1fa91 100755 --- a/tests/tiup-dm/test_import.sh +++ b/tests/tiup-dm/test_import.sh @@ -10,8 +10,9 @@ function deploy_by_ansible() { apt-get -y install git curl sshpass python-pip sudo # step 2 - useradd -m -d /home/tidb tidb + id tidb || useradd -m -d /home/tidb tidb echo "tidb:tidb" | chpasswd + sed -i '/tidb/d' /etc/sudoers echo "tidb ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers # use the same key from root instead of create one. @@ -22,7 +23,7 @@ function deploy_by_ansible() { # step 3 su tidb <