From cad0c4ea1ca23123d4f8e4f6da2de1306bd2d8aa Mon Sep 17 00:00:00 2001 From: mahjonp Date: Fri, 6 Nov 2020 18:19:10 +0800 Subject: [PATCH 01/14] update pingcap/go-tpc version (#887) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9087d13096..bfeea173d8 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/go-tpc v1.0.4-0.20200525052430-dc963cdeef62 + github.com/pingcap/go-tpc v1.0.4-0.20201106030831-2cc8c0ab3409 github.com/pingcap/go-ycsb v0.0.0-20200226103513-00ca633a87d8 github.com/pingcap/kvproto v0.0.0-20200810113304-6157337686b1 github.com/pingcap/tidb-insight v0.3.1 diff --git a/go.sum b/go.sum index 9764f649d4..e60e089def 100644 --- a/go.sum +++ b/go.sum @@ -613,8 +613,8 @@ github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce/go.mod h1:w4PEZ5 github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 h1:Pe2LbxRmbTfAoKJ65bZLmhahmvHm7n9DUxGRQT00208= github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= -github.com/pingcap/go-tpc v1.0.4-0.20200525052430-dc963cdeef62 h1:Ssr4JKS/0jxI6Ye6x4v5DNOD5fjXYniy1sxVCkqkL08= -github.com/pingcap/go-tpc v1.0.4-0.20200525052430-dc963cdeef62/go.mod h1:YToE6BW+r+aWksQm1kuFnzKgEzaTKsVIHD36rxVYaWc= +github.com/pingcap/go-tpc v1.0.4-0.20201106030831-2cc8c0ab3409 h1:iD5yl4iZ1RDg6PgWKyclHS3OWh5y5fApNrQl1pyRmTA= +github.com/pingcap/go-tpc v1.0.4-0.20201106030831-2cc8c0ab3409/go.mod h1:YToE6BW+r+aWksQm1kuFnzKgEzaTKsVIHD36rxVYaWc= github.com/pingcap/go-ycsb v0.0.0-20200226103513-00ca633a87d8 h1:Lem+5BTrGrzntgag8n38ZMwlYi9Wjzf5658p57mdO04= github.com/pingcap/go-ycsb v0.0.0-20200226103513-00ca633a87d8/go.mod h1:B9UJ3Lbpk4r+qFNDAeS2l6ORGkVaVwMPO1mSqDXiNQc= github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3 h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo= From 92d6fab9c4baed63fd1094ef98dfc4c0aa958d21 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Wed, 11 Nov 2020 12:36:43 +0800 Subject: [PATCH 02/14] Fix ti-spark scale-out (#901) --- cmd/mirror.go | 4 ++-- pkg/cluster/manager.go | 3 --- pkg/cluster/spec/bindversion.go | 2 ++ pkg/repository/v1_repository.go | 7 +++++++ 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/cmd/mirror.go b/cmd/mirror.go index eb024441fe..15721ac3e1 100644 --- a/cmd/mirror.go +++ b/cmd/mirror.go @@ -670,8 +670,8 @@ func newMirrorCloneCmd() *cobra.Command { } defer repo.Mirror().Close() - var versionMapper = func(ver string) string { - return spec.TiDBComponentVersion(ver, "") + var versionMapper = func(comp string) string { + return spec.TiDBComponentVersion(comp, "") } return repository.CloneMirror(repo, components, versionMapper, args[0], args[1:], options) diff --git a/pkg/cluster/manager.go b/pkg/cluster/manager.go index 20176eff64..2e0bb4be49 100644 --- a/pkg/cluster/manager.go +++ b/pkg/cluster/manager.go @@ -817,9 +817,6 @@ func (m *Manager) Upgrade(clusterName string, clusterVersion string, opt operato for _, comp := range topo.ComponentsByUpdateOrder() { for _, inst := range comp.Instances() { version := m.bindVersion(inst.ComponentName(), clusterVersion) - if version == "" { - return perrs.Errorf("unsupported component: %v", inst.ComponentName()) - } compInfo := componentInfo{ component: inst.ComponentName(), version: version, diff --git a/pkg/cluster/spec/bindversion.go b/pkg/cluster/spec/bindversion.go index 058b502d45..4a7692bb75 100644 --- a/pkg/cluster/spec/bindversion.go +++ b/pkg/cluster/spec/bindversion.go @@ -31,6 +31,8 @@ func TiDBComponentVersion(comp, version string) string { return "v0.7.0" case ComponentCheckCollector: return "v0.3.1" + case ComponentSpark, ComponentTiSpark: + return "" // empty version should be treate as the the last stable one default: return version } diff --git a/pkg/repository/v1_repository.go b/pkg/repository/v1_repository.go index ca9494b39b..14355fb313 100644 --- a/pkg/repository/v1_repository.go +++ b/pkg/repository/v1_repository.go @@ -731,6 +731,13 @@ func (r *V1Repository) ComponentVersion(id, version string, includeYanked bool) if v0manifest.Version(version).IsNightly() && manifest.Nightly != "" { version = manifest.Nightly } + if version == "" { + v, _, err := r.LatestStableVersion(id, includeYanked) + if err != nil { + return nil, err + } + version = v.String() + } vi := manifest.VersionItem(r.PlatformString(), version, includeYanked) if vi == nil { return nil, fmt.Errorf("version %s on %s for component %s not found", version, r.PlatformString(), id) From bb072ef732e5cd098882a0af1efad121530e23ce Mon Sep 17 00:00:00 2001 From: 9547 Date: Fri, 13 Nov 2020 16:46:47 +0800 Subject: [PATCH 03/14] cluster: enable service after scale out (#905) --- components/cluster/command/scale_out.go | 24 +----------------------- tests/tiup-cluster/script/scale_core.sh | 4 +++- 2 files changed, 4 insertions(+), 24 deletions(-) diff --git a/components/cluster/command/scale_out.go b/components/cluster/command/scale_out.go index 5a61ee2756..d504287a4b 100644 --- a/components/cluster/command/scale_out.go +++ b/components/cluster/command/scale_out.go @@ -14,14 +14,11 @@ package command import ( - "context" "io/ioutil" "path/filepath" "github.com/pingcap/tiup/pkg/cluster" "github.com/pingcap/tiup/pkg/cluster/executor" - operator "github.com/pingcap/tiup/pkg/cluster/operation" - "github.com/pingcap/tiup/pkg/cluster/report" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/utils" @@ -78,15 +75,6 @@ func newScaleOutCmd() *cobra.Command { return cmd } -// Deprecated -func convertStepDisplaysToTasks(t []*task.StepDisplay) []task.Task { - tasks := make([]task.Task, 0, len(t)) - for _, sd := range t { - tasks = append(tasks, sd) - } - return tasks -} - func final(builder *task.Builder, name string, meta spec.Metadata) { builder.UpdateTopology(name, tidbSpec.Path(name), @@ -96,15 +84,5 @@ func final(builder *task.Builder, name string, meta spec.Metadata) { } func postScaleOutHook(builder *task.Builder, newPart spec.Topology) { - nodeInfoTask := task.NewBuilder().Func("Check status", func(ctx *task.Context) error { - var err error - teleNodeInfos, err = operator.GetNodeInfo(context.Background(), ctx, newPart) - _ = err - // intend to never return error - return nil - }).BuildAsStep("Check status").SetHidden(true) - - if report.Enable() { - builder.Parallel(false, convertStepDisplaysToTasks([]*task.StepDisplay{nodeInfoTask})...) - } + postDeployHook(builder, newPart) } diff --git a/tests/tiup-cluster/script/scale_core.sh b/tests/tiup-cluster/script/scale_core.sh index ef51697588..8936aaccf8 100755 --- a/tests/tiup-cluster/script/scale_core.sh +++ b/tests/tiup-cluster/script/scale_core.sh @@ -46,6 +46,8 @@ function scale_core() { topo=./topo/full_scale_in_tidb.yaml sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo + # after scale-out, ensure the service is enabled + tiup-cluster $client exec $name -N $ipprefix.101 --command "systemctl status tidb-4000 | grep Loaded |grep 'enabled; vendor'" # echo "start scale in tikv" # tiup-cluster --yes scale-in $name -N $ipprefix.103:20160 @@ -67,7 +69,7 @@ function scale_core() { topo=./topo/full_scale_in_pd.yaml sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo - # after scalue-out, ensure this instance come back + # after scale-out, ensure this instance come back tiup-cluster $client exec $name -N $ipprefix.101 --command "grep -q $ipprefix.103:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" echo "start scale in tidb" From cc41ad5af87bde821ecde48d63dd7c1c40d72ea2 Mon Sep 17 00:00:00 2001 From: 9547 Date: Fri, 13 Nov 2020 19:10:23 +0800 Subject: [PATCH 04/14] dry: merge dm's {alertmanager,grafana,prometheus} into cluster (#890) --- components/dm/ansible/import.go | 12 +- components/dm/ansible/import_test.go | 10 +- components/dm/command/scale_in.go | 2 +- components/dm/spec/alertmanager.go | 173 ----------- components/dm/spec/bindversion.go | 2 +- components/dm/spec/cluster.go | 6 +- components/dm/spec/grafana.go | 284 ------------------ components/dm/spec/grafana_test.go | 62 ---- components/dm/spec/logic.go | 50 ++- components/dm/spec/prometheus.go | 248 --------------- .../dm/spec/testdata/dashboards/tidb.json | 3 - components/dm/spec/topology_dm.go | 68 +++-- components/dm/spec/topology_dm_test.go | 42 +-- components/dm/task/update_dm_meta.go | 13 +- pkg/cluster/ansible/import.go | 4 +- pkg/cluster/ansible/import_test.go | 8 +- pkg/cluster/ansible/inventory.go | 22 +- pkg/cluster/ansible/service.go | 4 +- pkg/cluster/embed/autogen_pkger.go | 6 +- pkg/cluster/manager.go | 8 +- pkg/cluster/operation/destroy.go | 8 +- pkg/cluster/operation/scale_in.go | 6 +- pkg/cluster/spec/alertmanager.go | 40 +-- pkg/cluster/spec/bindversion.go | 2 +- pkg/cluster/spec/cdc.go | 19 +- pkg/cluster/spec/drainer.go | 19 +- pkg/cluster/spec/grafana.go | 34 ++- pkg/cluster/spec/grafana_test.go | 4 +- pkg/cluster/spec/instance.go | 2 +- pkg/cluster/spec/pd.go | 23 +- pkg/cluster/spec/prometheus.go | 153 +++++++--- pkg/cluster/spec/pump.go | 19 +- pkg/cluster/spec/spec.go | 57 +++- pkg/cluster/spec/spec_manager_test.go | 8 + pkg/cluster/spec/tidb.go | 19 +- pkg/cluster/spec/tiflash.go | 25 +- pkg/cluster/spec/tikv.go | 21 +- pkg/cluster/spec/tispark.go | 41 +-- pkg/cluster/spec/validate.go | 2 +- pkg/cluster/task/update_meta.go | 28 +- pkg/cluster/task/update_topology.go | 10 +- pkg/cluster/template/config/prometheus.go | 15 + templates/config/dm/prometheus.yml.tpl | 8 +- templates/config/prometheus.yml.tpl | 38 ++- templates/scripts/dm/run_grafana.sh.tpl | 16 - templates/scripts/dm/run_prometheus.sh.tpl | 25 -- tests/tiup-dm/script/util.sh | 46 +-- tests/tiup-dm/test_import.sh | 5 +- 48 files changed, 535 insertions(+), 1185 deletions(-) delete mode 100644 components/dm/spec/alertmanager.go delete mode 100644 components/dm/spec/grafana.go delete mode 100644 components/dm/spec/grafana_test.go delete mode 100644 components/dm/spec/prometheus.go delete mode 100644 components/dm/spec/testdata/dashboards/tidb.json delete mode 100644 templates/scripts/dm/run_grafana.sh.tpl delete mode 100644 templates/scripts/dm/run_prometheus.sh.tpl diff --git a/components/dm/ansible/import.go b/components/dm/ansible/import.go index e22e501561..b7553d5766 100644 --- a/components/dm/ansible/import.go +++ b/components/dm/ansible/import.go @@ -222,7 +222,7 @@ func (im *Importer) handleWorkerConfig(srv *spec.WorkerSpec, fname string) error // ScpSourceToMaster scp the source files to master, // and set V1SourcePath of the master spec. -func (im *Importer) ScpSourceToMaster(topo *spec.Topology) (err error) { +func (im *Importer) ScpSourceToMaster(topo *spec.Specification) (err error) { for i := 0; i < len(topo.Masters); i++ { master := &topo.Masters[i] target := filepath.Join(firstNonEmpty(master.DeployDir, topo.GlobalOptions.DeployDir), "v1source") @@ -293,7 +293,7 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } meta = &spec.Metadata{ - Topology: new(spec.Topology), + Topology: new(spec.Specification), } topo := meta.Topology @@ -480,7 +480,7 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } case "alertmanager_servers": for _, host := range group.Hosts { - srv := spec.AlertManagerSpec{ + srv := spec.AlertmanagerSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), DeployDir: firstNonEmpty(host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir), @@ -521,9 +521,9 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } } - srv.DeployDir = instancDeployDir(spec.ComponentAlertManager, srv.WebPort, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) + srv.DeployDir = instancDeployDir(spec.ComponentAlertmanager, srv.WebPort, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) - topo.Alertmanager = append(topo.Alertmanager, srv) + topo.Alertmanagers = append(topo.Alertmanagers, srv) } case "grafana_servers": for _, host := range group.Hosts { @@ -559,7 +559,7 @@ func (im *Importer) ImportFromAnsibleDir() (clusterName string, meta *spec.Metad } srv.DeployDir = instancDeployDir(spec.ComponentGrafana, srv.Port, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) - topo.Grafana = append(topo.Grafana, srv) + topo.Grafanas = append(topo.Grafanas, srv) } case "all", "ungrouped": // ignore intent diff --git a/components/dm/ansible/import_test.go b/components/dm/ansible/import_test.go index f560b4ae37..cef557dc86 100644 --- a/components/dm/ansible/import_test.go +++ b/components/dm/ansible/import_test.go @@ -188,9 +188,9 @@ func TestImportFromAnsible(t *testing.T) { assert.Equal(expectedWorker, worker) // check Alertmanager - assert.Len(topo.Alertmanager, 1) - aler := topo.Alertmanager[0] - expectedAlter := spec.AlertManagerSpec{ + assert.Len(topo.Alertmanagers, 1) + aler := topo.Alertmanagers[0] + expectedAlter := spec.AlertmanagerSpec{ Host: "172.19.0.101", SSHPort: 22, WebPort: 9093, @@ -201,8 +201,8 @@ func TestImportFromAnsible(t *testing.T) { assert.Equal(expectedAlter, aler) // Check Grafana - assert.Len(topo.Grafana, 1) - grafana := topo.Grafana[0] + assert.Len(topo.Grafanas, 1) + grafana := topo.Grafanas[0] expectedGrafana := spec.GrafanaSpec{ Host: "172.19.0.101", SSHPort: 22, diff --git a/components/dm/command/scale_in.go b/components/dm/command/scale_in.go index c1e772cbaf..cf66c65845 100644 --- a/components/dm/command/scale_in.go +++ b/components/dm/command/scale_in.go @@ -75,7 +75,7 @@ func newScaleInCmd() *cobra.Command { // ScaleInDMCluster scale in dm cluster. func ScaleInDMCluster( getter operator.ExecutorGetter, - topo *dm.Topology, + topo *dm.Specification, options operator.Options, ) error { // instances by uuid diff --git a/components/dm/spec/alertmanager.go b/components/dm/spec/alertmanager.go deleted file mode 100644 index 5fb124d764..0000000000 --- a/components/dm/spec/alertmanager.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/pingcap/errors" - "github.com/pingcap/tiup/pkg/cluster" - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/cluster/spec" - cspec "github.com/pingcap/tiup/pkg/cluster/spec" - "github.com/pingcap/tiup/pkg/cluster/task" - "github.com/pingcap/tiup/pkg/cluster/template/config" - "github.com/pingcap/tiup/pkg/cluster/template/scripts" - "github.com/pingcap/tiup/pkg/meta" -) - -// AlertManagerComponent represents Alertmanager component. -type AlertManagerComponent struct{ *Topology } - -// Name implements Component interface. -func (c *AlertManagerComponent) Name() string { - return ComponentAlertManager -} - -// Role implements Component interface. -func (c *AlertManagerComponent) Role() string { - return cspec.RoleMonitor -} - -// Instances implements Component interface. -func (c *AlertManagerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Alertmanager)) - for _, s := range c.Alertmanager { - ins = append(ins, &AlertManagerInstance{ - BaseInstance: cspec.BaseInstance{ - InstanceSpec: s, - Name: c.Name(), - Host: s.Host, - Port: s.WebPort, - SSHP: s.SSHPort, - - Ports: []int{ - s.WebPort, - s.ClusterPort, - }, - Dirs: []string{ - s.DeployDir, - s.DataDir, - }, - StatusFn: func(_ *tls.Config, _ ...string) string { - return "-" - }, - }, - topo: c.Topology, - }) - } - return ins -} - -// AlertManagerInstance represent the alert manager instance -type AlertManagerInstance struct { - cspec.BaseInstance - topo *Topology -} - -var _ cluster.DeployerInstance = &AlertManagerInstance{} - -// Deploy implements DeployerInstance interface. -func (i *AlertManagerInstance) Deploy(t *task.Builder, srcPath string, deployDir string, version string, clusterName string, clusterVersion string) { - t.CopyComponent( - i.ComponentName(), - i.OS(), - i.Arch(), - version, - srcPath, - i.GetHost(), - deployDir, - ).Func("CopyConfig", func(ctx *task.Context) error { - tempDir, err := ioutil.TempDir("", "tiup-*") - if err != nil { - return errors.AddStack(err) - } - // transfer config - e := ctx.Get(i.GetHost()) - fp := filepath.Join(tempDir, fmt.Sprintf("alertmanager_%s.yml", i.GetHost())) - if err := config.NewAlertManagerConfig().ConfigToFile(fp); err != nil { - return err - } - dst := filepath.Join(deployDir, "conf", "alertmanager.yml") - err = e.Transfer(fp, dst, false) - if err != nil { - return errors.Annotatef(err, "failed to transfer %s to %s@%s", fp, i.GetHost(), dst) - } - return nil - }) - -} - -// InitConfig implement Instance interface -func (i *AlertManagerInstance) InitConfig( - e executor.Executor, - clusterName, - clusterVersion, - deployUser string, - paths meta.DirPaths, -) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { - return err - } - - enableTLS := i.topo.GlobalOptions.TLSEnabled - - // Transfer start script - spec := i.InstanceSpec.(AlertManagerSpec) - cfg := scripts.NewAlertManagerScript(spec.Host, paths.Deploy, paths.Data[0], paths.Log, enableTLS). - WithWebPort(spec.WebPort).WithClusterPort(spec.ClusterPort).WithNumaNode(spec.NumaNode). - AppendEndpoints(cspec.AlertManagerEndpoints(i.topo.Alertmanager, deployUser, enableTLS)) - - fp := filepath.Join(paths.Cache, fmt.Sprintf("run_alertmanager_%s_%d.sh", i.GetHost(), i.GetPort())) - if err := cfg.ConfigToFile(fp); err != nil { - return err - } - - dst := filepath.Join(paths.Deploy, "scripts", "run_alertmanager.sh") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - if _, _, err := e.Execute("chmod +x "+dst, false); err != nil { - return err - } - - // If the user specific a local config file, we should overwrite the default one with it - if spec.ConfigFilePath != "" { - name := filepath.Base(spec.ConfigFilePath) - dst := filepath.Join(paths.Deploy, "conf", name) - if err := i.TransferLocalConfigFile(e, spec.ConfigFilePath, dst); err != nil { - return errors.Annotate(err, "transfer alertmanager config failed") - } - } - - return nil -} - -// ScaleConfig deploy temporary config on scaling -func (i *AlertManagerInstance) ScaleConfig( - e executor.Executor, - topo spec.Topology, - clusterName string, - clusterVersion string, - deployUser string, - paths meta.DirPaths, -) error { - s := i.topo - defer func() { i.topo = s }() - i.topo = topo.(*Topology) - return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) -} diff --git a/components/dm/spec/bindversion.go b/components/dm/spec/bindversion.go index 757954b81a..7bcee361c2 100644 --- a/components/dm/spec/bindversion.go +++ b/components/dm/spec/bindversion.go @@ -5,7 +5,7 @@ import "github.com/pingcap/tiup/pkg/cluster/spec" // DMComponentVersion maps the dm version to the third components binding version func DMComponentVersion(comp, version string) string { switch comp { - case spec.ComponentAlertManager: + case spec.ComponentAlertmanager: return "v0.17.0" case spec.ComponentGrafana, spec.ComponentPrometheus: return "v4.0.3" diff --git a/components/dm/spec/cluster.go b/components/dm/spec/cluster.go index f3bb49da1d..5b62ad2f66 100644 --- a/components/dm/spec/cluster.go +++ b/components/dm/spec/cluster.go @@ -29,7 +29,7 @@ type Metadata struct { Version string `yaml:"dm_version"` // the version of TiDB cluster //EnableFirewall bool `yaml:"firewall"` - Topology *Topology `yaml:"topology"` + Topology *Specification `yaml:"topology"` } var _ cspec.UpgradableMetadata = &Metadata{} @@ -51,7 +51,7 @@ func (m *Metadata) GetTopology() cspec.Topology { // SetTopology implements Metadata interface. func (m *Metadata) SetTopology(topo cspec.Topology) { - dmTopo, ok := topo.(*Topology) + dmTopo, ok := topo.(*Specification) if !ok { panic(fmt.Sprintln("wrong type: ", reflect.TypeOf(topo))) } @@ -72,7 +72,7 @@ func GetSpecManager() *cspec.SpecManager { if specManager == nil { specManager = cspec.NewSpec(filepath.Join(cspec.ProfileDir(), cspec.TiOpsClusterDir), func() cspec.Metadata { return &Metadata{ - Topology: new(Topology), + Topology: new(Specification), } }) } diff --git a/components/dm/spec/grafana.go b/components/dm/spec/grafana.go deleted file mode 100644 index 6c83c37227..0000000000 --- a/components/dm/spec/grafana.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "crypto/tls" - "fmt" - "path/filepath" - "strings" - - "github.com/pingcap/errors" - "github.com/pingcap/tiup/pkg/cluster" - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/cluster/spec" - "github.com/pingcap/tiup/pkg/cluster/task" - "github.com/pingcap/tiup/pkg/cluster/template/config" - "github.com/pingcap/tiup/pkg/cluster/template/scripts" - "github.com/pingcap/tiup/pkg/meta" -) - -// GrafanaComponent represents Grafana component. -type GrafanaComponent struct{ *Topology } - -// Name implements Component interface. -func (c *GrafanaComponent) Name() string { - return ComponentGrafana -} - -// Role implements Component interface. -func (c *GrafanaComponent) Role() string { - return spec.RoleMonitor -} - -// Instances implements Component interface. -func (c *GrafanaComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Grafana)) - for _, s := range c.Grafana { - ins = append(ins, &GrafanaInstance{ - BaseInstance: spec.BaseInstance{ - InstanceSpec: s, - Name: c.Name(), - Host: s.Host, - Port: s.Port, - SSHP: s.SSHPort, - - Ports: []int{ - s.Port, - }, - Dirs: []string{ - s.DeployDir, - }, - StatusFn: func(_ *tls.Config, _ ...string) string { - return "-" - }, - }, - topo: c.Topology, - }) - } - return ins -} - -// GrafanaInstance represent the grafana instance -type GrafanaInstance struct { - spec.BaseInstance - topo *Topology -} - -// InitConfig implement Instance interface -func (i *GrafanaInstance) InitConfig( - e executor.Executor, - clusterName, - clusterVersion, - deployUser string, - paths meta.DirPaths, -) error { - if len(i.topo.Monitors) == 0 { - return errors.New("no prometheus found in topology") - } - - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { - return err - } - - // transfer run script - tpl := filepath.Join("/templates", "scripts", "dm", "run_grafana.sh.tpl") - cfg := scripts.NewGrafanaScript(clusterName, paths.Deploy).WithTPLFile(tpl) - fp := filepath.Join(paths.Cache, fmt.Sprintf("run_grafana_%s_%d.sh", i.GetHost(), i.GetPort())) - if err := cfg.ConfigToFile(fp); err != nil { - return err - } - - dst := filepath.Join(paths.Deploy, "scripts", "run_grafana.sh") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - if _, _, err := e.Execute("chmod +x "+dst, false); err != nil { - return err - } - - // transfer config - fp = filepath.Join(paths.Cache, fmt.Sprintf("grafana_%s.ini", i.GetHost())) - if err := config.NewGrafanaConfig(i.GetHost(), paths.Deploy).WithPort(uint64(i.GetPort())).ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(paths.Deploy, "conf", "grafana.ini") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - if err := i.initDashboards(e, i.InstanceSpec.(GrafanaSpec), paths); err != nil { - return errors.Annotate(err, "initial dashboards") - } - - var dirs []string - - // provisioningDir Must same as in grafana.ini.tpl - provisioningDir := filepath.Join(paths.Deploy, "provisioning") - dirs = append(dirs, provisioningDir) - - datasourceDir := filepath.Join(provisioningDir, "datasources") - dirs = append(dirs, datasourceDir) - - dashboardDir := filepath.Join(provisioningDir, "dashboards") - dirs = append(dirs, dashboardDir) - - cmd := fmt.Sprintf("mkdir -p %s", strings.Join(dirs, " ")) - if _, _, err := e.Execute(cmd, false); err != nil { - return errors.AddStack(err) - } - - // transfer dashboard.yml - fp = filepath.Join(paths.Cache, fmt.Sprintf("dashboard_%s.yml", i.GetHost())) - if err := config.NewDashboardConfig(clusterName, paths.Deploy).ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(dashboardDir, "dashboard.yml") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - // transfer datasource.yml - fp = filepath.Join(paths.Cache, fmt.Sprintf("datasource_%s.yml", i.GetHost())) - if err := config.NewDatasourceConfig(clusterName, i.topo.Monitors[0].Host). - WithPort(uint64(i.topo.Monitors[0].Port)). - ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(datasourceDir, "datasource.yml") - return e.Transfer(fp, dst, false) -} - -func (i *GrafanaInstance) initDashboards(e executor.Executor, spec GrafanaSpec, paths meta.DirPaths) error { - dashboardsDir := filepath.Join(paths.Deploy, "dashboards") - // To make this step idempotent, we need cleanup old dashboards first - if _, _, err := e.Execute(fmt.Sprintf("rm -f %s/*.json", dashboardsDir), false); err != nil { - return err - } - - if spec.DashboardDir != "" { - return i.TransferLocalConfigDir(e, spec.DashboardDir, dashboardsDir, func(name string) bool { - return strings.HasSuffix(name, ".json") - }) - } - - // Use the default ones - cmd := fmt.Sprintf("cp %[1]s/bin/*.json %[1]s/dashboards/", paths.Deploy) - if _, _, err := e.Execute(cmd, false); err != nil { - return err - } - return nil -} - -// ScaleConfig deploy temporary config on scaling -func (i *GrafanaInstance) ScaleConfig( - e executor.Executor, - topo spec.Topology, - clusterName string, - clusterVersion string, - deployUser string, - paths meta.DirPaths, -) error { - s := i.topo - defer func() { i.topo = s }() - dmtopo := topo.(*Topology) - i.topo = dmtopo.Merge(i.topo) - return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) -} - -var _ cluster.DeployerInstance = &GrafanaInstance{} - -// Deploy implements DeployerInstance interface. -func (i *GrafanaInstance) Deploy(t *task.Builder, srcPath string, deployDir string, version string, clusterName string, clusterVersion string) { - t.CopyComponent( - i.ComponentName(), - i.OS(), - i.Arch(), - version, - srcPath, - i.GetHost(), - deployDir, - ).Shell( // rm the json file which relate to tidb cluster and useless. - i.GetHost(), - fmt.Sprintf("rm %s/*.json", filepath.Join(deployDir, "bin")), - false, /*sudo*/ - ).Func("Dashboards", func(ctx *task.Context) error { - e := ctx.Get(i.GetHost()) - - return i.installDashboards(e, deployDir, clusterName, clusterVersion) - }) -} - -func (i *GrafanaInstance) installDashboards(e executor.Executor, deployDir, clusterName, clusterVersion string) error { - tmp := filepath.Join(deployDir, "_tiup_tmp") - _, stderr, err := e.Execute(fmt.Sprintf("mkdir -p %s", tmp), false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - srcPath := task.PackagePath(ComponentDMMaster, clusterVersion, i.OS(), i.Arch()) - dstPath := filepath.Join(tmp, filepath.Base(srcPath)) - err = e.Transfer(srcPath, dstPath, false) - if err != nil { - return errors.AddStack(err) - } - - cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, tmp, dstPath) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // copy dm-master/scripts/*.json - targetDir := filepath.Join(deployDir, "dashboards") - _, stderr, err = e.Execute(fmt.Sprintf("mkdir -p %s", targetDir), false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - cmd = fmt.Sprintf("cp %s/dm-master/scripts/*.json %s", tmp, targetDir) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - for _, cmd := range []string{ - `find %s -type f -exec sed -i "s/\${DS_.*-CLUSTER}/%s/g" {} \;`, - `find %s -type f -exec sed -i "s/DS_.*-CLUSTER/%s/g" {} \;`, - `find %s -type f -exec sed -i "s/test-cluster/%s/g" {} \;`, - `find %s -type f -exec sed -i "s/Test-Cluster/%s/g" {} \;`, - } { - cmd := fmt.Sprintf(cmd, targetDir, clusterName) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - } - - cmd = fmt.Sprintf("rm -rf %s", tmp) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // backup *.json for later reload (in case that the user change dashboard_dir) - cmd = fmt.Sprintf("cp %s/*.json %s", targetDir, filepath.Join(deployDir, "bin")) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - return nil -} diff --git a/components/dm/spec/grafana_test.go b/components/dm/spec/grafana_test.go deleted file mode 100644 index bb7efe67f7..0000000000 --- a/components/dm/spec/grafana_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "io/ioutil" - "os" - "os/user" - "path" - "path/filepath" - "testing" - - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/meta" - "github.com/stretchr/testify/assert" -) - -func TestLocalDashboards(t *testing.T) { - deployDir, err := ioutil.TempDir("", "tiup-*") - assert.Nil(t, err) - defer os.RemoveAll(deployDir) - localDir, err := filepath.Abs("./testdata/dashboards") - assert.Nil(t, err) - - topo := new(Topology) - topo.Grafana = append(topo.Grafana, GrafanaSpec{ - Host: "127.0.0.1", - Port: 3000, - DashboardDir: localDir, - }) - - comp := GrafanaComponent{topo} - ints := comp.Instances() - - assert.Equal(t, len(ints), 1) - grafanaInstance := ints[0].(*GrafanaInstance) - - user, err := user.Current() - assert.Nil(t, err) - e, err := executor.New(executor.SSHTypeNone, false, executor.SSHConfig{Host: "127.0.0.1", User: user.Username}) - assert.Nil(t, err) - err = grafanaInstance.initDashboards(e, topo.Grafana[0], meta.DirPaths{Deploy: deployDir}) - assert.Nil(t, err) - - assert.FileExists(t, path.Join(deployDir, "dashboards", "tidb.json")) - fs, err := ioutil.ReadDir(localDir) - assert.Nil(t, err) - for _, f := range fs { - assert.FileExists(t, path.Join(deployDir, "dashboards", f.Name())) - } -} diff --git a/components/dm/spec/logic.go b/components/dm/spec/logic.go index cc564dca3f..161321980a 100644 --- a/components/dm/spec/logic.go +++ b/components/dm/spec/logic.go @@ -32,7 +32,7 @@ const ( ComponentDMWorker = "dm-worker" ComponentPrometheus = spec.ComponentPrometheus ComponentGrafana = spec.ComponentGrafana - ComponentAlertManager = spec.ComponentAlertManager + ComponentAlertmanager = spec.ComponentAlertmanager ) type ( @@ -52,7 +52,7 @@ type Component = spec.Component type Instance = spec.Instance // DMMasterComponent represents TiDB component. -type DMMasterComponent struct{ *Topology } +type DMMasterComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DMMasterComponent) Name() string { @@ -67,7 +67,7 @@ func (c *DMMasterComponent) Role() string { // Instances implements Component interface. func (c *DMMasterComponent) Instances() []Instance { ins := make([]Instance, 0) - for _, s := range c.Masters { + for _, s := range c.Topology.Masters { s := s ins = append(ins, &MasterInstance{ Name: s.Name, @@ -98,7 +98,7 @@ func (c *DMMasterComponent) Instances() []Instance { type MasterInstance struct { Name string spec.BaseInstance - topo *Topology + topo *Specification } // InitConfig implement Instance interface @@ -151,7 +151,7 @@ func (i *MasterInstance) ScaleConfig( return err } - c := topo.(*Topology) + c := topo.(*Specification) spec := i.InstanceSpec.(MasterSpec) cfg := scripts.NewDMMasterScaleScript( spec.Name, @@ -179,9 +179,7 @@ func (i *MasterInstance) ScaleConfig( } // DMWorkerComponent represents DM worker component. -type DMWorkerComponent struct { - *Topology -} +type DMWorkerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DMWorkerComponent) Name() string { @@ -196,7 +194,7 @@ func (c *DMWorkerComponent) Role() string { // Instances implements Component interface. func (c *DMWorkerComponent) Instances() []Instance { ins := make([]Instance, 0) - for _, s := range c.Workers { + for _, s := range c.Topology.Workers { s := s ins = append(ins, &WorkerInstance{ Name: s.Name, @@ -227,7 +225,7 @@ func (c *DMWorkerComponent) Instances() []Instance { type WorkerInstance struct { Name string spec.BaseInstance - topo *Topology + topo *Specification } // InitConfig implement Instance interface @@ -280,22 +278,22 @@ func (i *WorkerInstance) ScaleConfig( defer func() { i.topo = s }() - i.topo = topo.(*Topology) + i.topo = topo.(*Specification) return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) } // GetGlobalOptions returns cluster topology -func (topo *Topology) GetGlobalOptions() spec.GlobalOptions { +func (topo *Specification) GetGlobalOptions() spec.GlobalOptions { return topo.GlobalOptions } // GetMonitoredOptions returns MonitoredOptions -func (topo *Topology) GetMonitoredOptions() *spec.MonitoredOptions { +func (topo *Specification) GetMonitoredOptions() *spec.MonitoredOptions { return nil } // ComponentsByStopOrder return component in the order need to stop. -func (topo *Topology) ComponentsByStopOrder() (comps []Component) { +func (topo *Specification) ComponentsByStopOrder() (comps []Component) { comps = topo.ComponentsByStartOrder() // revert order i := 0 @@ -309,36 +307,36 @@ func (topo *Topology) ComponentsByStopOrder() (comps []Component) { } // ComponentsByStartOrder return component in the order need to start. -func (topo *Topology) ComponentsByStartOrder() (comps []Component) { +func (topo *Specification) ComponentsByStartOrder() (comps []Component) { // "dm-master", "dm-worker" comps = append(comps, &DMMasterComponent{topo}) comps = append(comps, &DMWorkerComponent{topo}) - comps = append(comps, &MonitorComponent{topo}) - comps = append(comps, &GrafanaComponent{topo}) - comps = append(comps, &AlertManagerComponent{topo}) + comps = append(comps, &spec.MonitorComponent{Topology: topo}) + comps = append(comps, &spec.GrafanaComponent{Topology: topo}) + comps = append(comps, &spec.AlertManagerComponent{Topology: topo}) return } // ComponentsByUpdateOrder return component in the order need to be updated. -func (topo *Topology) ComponentsByUpdateOrder() (comps []Component) { +func (topo *Specification) ComponentsByUpdateOrder() (comps []Component) { // "dm-master", "dm-worker" comps = append(comps, &DMMasterComponent{topo}) comps = append(comps, &DMWorkerComponent{topo}) - comps = append(comps, &MonitorComponent{topo}) - comps = append(comps, &GrafanaComponent{topo}) - comps = append(comps, &AlertManagerComponent{topo}) + comps = append(comps, &spec.MonitorComponent{Topology: topo}) + comps = append(comps, &spec.GrafanaComponent{Topology: topo}) + comps = append(comps, &spec.AlertManagerComponent{Topology: topo}) return } // IterComponent iterates all components in component starting order -func (topo *Topology) IterComponent(fn func(comp Component)) { +func (topo *Specification) IterComponent(fn func(comp Component)) { for _, comp := range topo.ComponentsByStartOrder() { fn(comp) } } // IterInstance iterates all instances in component starting order -func (topo *Topology) IterInstance(fn func(instance Instance)) { +func (topo *Specification) IterInstance(fn func(instance Instance)) { for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { fn(inst) @@ -347,7 +345,7 @@ func (topo *Topology) IterInstance(fn func(instance Instance)) { } // IterHost iterates one instance for each host -func (topo *Topology) IterHost(fn func(instance Instance)) { +func (topo *Specification) IterHost(fn func(instance Instance)) { hostMap := make(map[string]bool) for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { @@ -362,7 +360,7 @@ func (topo *Topology) IterHost(fn func(instance Instance)) { } // Endpoints returns the PD endpoints configurations -func (topo *Topology) Endpoints(user string) []*scripts.DMMasterScript { +func (topo *Specification) Endpoints(user string) []*scripts.DMMasterScript { var ends []*scripts.DMMasterScript for _, s := range topo.Masters { deployDir := spec.Abs(user, s.DeployDir) diff --git a/components/dm/spec/prometheus.go b/components/dm/spec/prometheus.go deleted file mode 100644 index cc5e1c08fd..0000000000 --- a/components/dm/spec/prometheus.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "crypto/tls" - "fmt" - "path/filepath" - "strings" - - "github.com/pingcap/errors" - "github.com/pingcap/tiup/pkg/cluster" - "github.com/pingcap/tiup/pkg/cluster/executor" - "github.com/pingcap/tiup/pkg/cluster/spec" - "github.com/pingcap/tiup/pkg/cluster/task" - "github.com/pingcap/tiup/pkg/cluster/template/config/dm" - "github.com/pingcap/tiup/pkg/cluster/template/scripts" - "github.com/pingcap/tiup/pkg/meta" -) - -// MonitorComponent represents Monitor component. -type MonitorComponent struct{ *Topology } - -// Name implements Component interface. -func (c *MonitorComponent) Name() string { - return ComponentPrometheus -} - -// Role implements Component interface. -func (c *MonitorComponent) Role() string { - return spec.RoleMonitor -} - -// Instances implements Component interface. -func (c *MonitorComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Monitors)) - for _, s := range c.Monitors { - ins = append(ins, &MonitorInstance{spec.BaseInstance{ - InstanceSpec: s, - Name: c.Name(), - Host: s.Host, - Port: s.Port, - SSHP: s.SSHPort, - - Ports: []int{ - s.Port, - }, - Dirs: []string{ - s.DeployDir, - s.DataDir, - }, - StatusFn: func(_ *tls.Config, _ ...string) string { - return "-" - }, - }, c.Topology}) - } - return ins -} - -// MonitorInstance represent the monitor instance -type MonitorInstance struct { - spec.BaseInstance - topo *Topology -} - -// InitConfig implement Instance interface -func (i *MonitorInstance) InitConfig( - e executor.Executor, - clusterName, - clusterVersion, - deployUser string, - paths meta.DirPaths, -) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { - return err - } - - enableTLS := i.topo.GlobalOptions.TLSEnabled - - // transfer run script - spec := i.InstanceSpec.(PrometheusSpec) - cfg := scripts.NewPrometheusScript( - i.GetHost(), - paths.Deploy, - paths.Data[0], - paths.Log, - ).WithPort(spec.Port). - WithNumaNode(spec.NumaNode). - WithTPLFile(filepath.Join("/templates", "scripts", "dm", "run_prometheus.sh.tpl")) - - fp := filepath.Join(paths.Cache, fmt.Sprintf("run_prometheus_%s_%d.sh", i.GetHost(), i.GetPort())) - if err := cfg.ConfigToFile(fp); err != nil { - return err - } - - dst := filepath.Join(paths.Deploy, "scripts", "run_prometheus.sh") - if err := e.Transfer(fp, dst, false); err != nil { - return err - } - - if _, _, err := e.Execute("chmod +x "+dst, false); err != nil { - return err - } - - topo := i.topo - - // transfer config - fp = filepath.Join(paths.Cache, fmt.Sprintf("prometheus_%s_%d.yml", i.GetHost(), i.GetPort())) - cfig := dm.NewPrometheusConfig(clusterName, enableTLS) - - for _, master := range topo.Masters { - cfig.AddMasterAddrs(master.Host, uint64(master.Port)) - } - - for _, worker := range topo.Workers { - cfig.AddWorkerAddrs(worker.Host, uint64(worker.Port)) - } - - for _, alertmanager := range topo.Alertmanager { - cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) - } - - if err := i.initRules(e, spec, paths); err != nil { - return errors.AddStack(err) - } - - if err := cfig.ConfigToFile(fp); err != nil { - return err - } - dst = filepath.Join(paths.Deploy, "conf", "prometheus.yml") - return e.Transfer(fp, dst, false) -} - -func (i *MonitorInstance) initRules(e executor.Executor, spec PrometheusSpec, paths meta.DirPaths) error { - confDir := filepath.Join(paths.Deploy, "conf") - // To make this step idempotent, we need cleanup old rules first - if _, _, err := e.Execute(fmt.Sprintf("rm -f %s/*.rules.yml", confDir), false); err != nil { - return err - } - - // If the user specify a rule directory, we should use the rules specified - if spec.RuleDir != "" { - return i.TransferLocalConfigDir(e, spec.RuleDir, confDir, func(name string) bool { - return strings.HasSuffix(name, ".rules.yml") - }) - } - - // Use the default ones - cmd := fmt.Sprintf("cp %[1]s/bin/prometheus/*.rules.yml %[1]s/conf/", paths.Deploy) - if _, _, err := e.Execute(cmd, false); err != nil { - return err - } - return nil -} - -// ScaleConfig deploy temporary config on scaling -func (i *MonitorInstance) ScaleConfig( - e executor.Executor, - topo spec.Topology, - clusterName string, - clusterVersion string, - deployUser string, - paths meta.DirPaths, -) error { - s := i.topo - defer func() { i.topo = s }() - i.topo = topo.(*Topology) - return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) -} - -var _ cluster.DeployerInstance = &MonitorInstance{} - -// Deploy implements DeployerInstance interface. -func (i *MonitorInstance) Deploy(t *task.Builder, srcPath string, deployDir string, version string, _ string, clusterVersion string) { - t.CopyComponent( - i.ComponentName(), - i.OS(), - i.Arch(), - version, - srcPath, - i.GetHost(), - deployDir, - ).Shell( // rm the rules file which relate to tidb cluster and useless. - i.GetHost(), - fmt.Sprintf("rm %s/*.rules.yml", filepath.Join(deployDir, "bin", "prometheus")), - false, /*sudo*/ - ).Func("CopyRulesYML", func(ctx *task.Context) error { - e := ctx.Get(i.GetHost()) - - return i.installRules(e, deployDir, clusterVersion) - }) -} - -func (i *MonitorInstance) installRules(e executor.Executor, deployDir, clusterVersion string) error { - tmp := filepath.Join(deployDir, "_tiup_tmp") - _, stderr, err := e.Execute(fmt.Sprintf("mkdir -p %s", tmp), false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - srcPath := task.PackagePath(ComponentDMMaster, clusterVersion, i.OS(), i.Arch()) - dstPath := filepath.Join(tmp, filepath.Base(srcPath)) - - err = e.Transfer(srcPath, dstPath, false) - if err != nil { - return errors.AddStack(err) - } - - cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, tmp, dstPath) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // copy dm-master/conf/*.rules.yml - targetDir := filepath.Join(deployDir, "conf") - cmd = fmt.Sprintf("cp %s/dm-master/conf/*.rules.yml %s", tmp, targetDir) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - cmd = fmt.Sprintf("rm -rf %s", tmp) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - // backup *.rules.yml for later reload (in case that the user change rule_dir) - cmd = fmt.Sprintf("cp %s/*.rules.yml %s", targetDir, filepath.Join(deployDir, "bin", "prometheus")) - _, stderr, err = e.Execute(cmd, false) - if err != nil { - return errors.Annotatef(err, "stderr: %s", string(stderr)) - } - - return nil -} diff --git a/components/dm/spec/testdata/dashboards/tidb.json b/components/dm/spec/testdata/dashboards/tidb.json deleted file mode 100644 index 7b9f3dc3f7..0000000000 --- a/components/dm/spec/testdata/dashboards/tidb.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "desc": "this is a dummy test file" -} \ No newline at end of file diff --git a/components/dm/spec/topology_dm.go b/components/dm/spec/topology_dm.go index d0c92312bf..0f0f7eb5cc 100644 --- a/components/dm/spec/topology_dm.go +++ b/components/dm/spec/topology_dm.go @@ -70,8 +70,8 @@ type ( PrometheusSpec = spec.PrometheusSpec // GrafanaSpec is the spec of Grafana GrafanaSpec = spec.GrafanaSpec - // AlertManagerSpec is the spec of Alertmanager - AlertManagerSpec = spec.AlertManagerSpec + // AlertmanagerSpec is the spec of Alertmanager + AlertmanagerSpec = spec.AlertmanagerSpec // ResourceControl is the spec of ResourceControl ResourceControl = meta.ResourceControl ) @@ -83,23 +83,23 @@ type ( Worker map[string]interface{} `yaml:"worker"` } - // Topology represents the specification of topology.yaml - Topology struct { + // Specification represents the specification of topology.yaml + Specification struct { GlobalOptions GlobalOptions `yaml:"global,omitempty" validate:"global:editable"` // MonitoredOptions MonitoredOptions `yaml:"monitored,omitempty" validate:"monitored:editable"` - ServerConfigs DMServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` - Masters []MasterSpec `yaml:"master_servers"` - Workers []WorkerSpec `yaml:"worker_servers"` - Monitors []PrometheusSpec `yaml:"monitoring_servers"` - Grafana []GrafanaSpec `yaml:"grafana_servers,omitempty"` - Alertmanager []AlertManagerSpec `yaml:"alertmanager_servers,omitempty"` + ServerConfigs DMServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` + Masters []MasterSpec `yaml:"master_servers"` + Workers []WorkerSpec `yaml:"worker_servers"` + Monitors []spec.PrometheusSpec `yaml:"monitoring_servers"` + Grafanas []spec.GrafanaSpec `yaml:"grafana_servers,omitempty"` + Alertmanagers []spec.AlertmanagerSpec `yaml:"alertmanager_servers,omitempty"` } ) // AllDMComponentNames contains the names of all dm components. // should include all components in ComponentsByStartOrder func AllDMComponentNames() (roles []string) { - tp := &Topology{} + tp := &Specification{} tp.IterComponent(func(c Component) { roles = append(roles, c.Name()) }) @@ -225,8 +225,8 @@ func (s WorkerSpec) IsImported() bool { } // UnmarshalYAML sets default values when unmarshaling the topology file -func (topo *Topology) UnmarshalYAML(unmarshal func(interface{}) error) error { - type topology Topology +func (topo *Specification) UnmarshalYAML(unmarshal func(interface{}) error) error { + type topology Specification if err := unmarshal((*topology)(topo)); err != nil { return err } @@ -244,7 +244,7 @@ func (topo *Topology) UnmarshalYAML(unmarshal func(interface{}) error) error { // platformConflictsDetect checks for conflicts in topology for different OS / Arch // for set to the same host / IP -func (topo *Topology) platformConflictsDetect() error { +func (topo *Specification) platformConflictsDetect() error { type ( conflict struct { os string @@ -305,7 +305,7 @@ func (topo *Topology) platformConflictsDetect() error { return nil } -func (topo *Topology) portConflictsDetect() error { +func (topo *Specification) portConflictsDetect() error { type ( usedPort struct { host string @@ -383,7 +383,7 @@ func (topo *Topology) portConflictsDetect() error { return nil } -func (topo *Topology) dirConflictsDetect() error { +func (topo *Specification) dirConflictsDetect() error { type ( usedDir struct { host string @@ -467,7 +467,7 @@ func (topo *Topology) dirConflictsDetect() error { // CountDir counts for dir paths used by any instance in the cluster with the same // prefix, useful to find potential path conflicts -func (topo *Topology) CountDir(targetHost, dirPrefix string) int { +func (topo *Specification) CountDir(targetHost, dirPrefix string) int { dirTypes := []string{ "DataDir", "DeployDir", @@ -532,7 +532,7 @@ func (topo *Topology) CountDir(targetHost, dirPrefix string) int { } // TLSConfig generates a tls.Config for the specification as needed -func (topo *Topology) TLSConfig(dir string) (*tls.Config, error) { +func (topo *Specification) TLSConfig(dir string) (*tls.Config, error) { if !topo.GlobalOptions.TLSEnabled { return nil, nil } @@ -541,7 +541,7 @@ func (topo *Topology) TLSConfig(dir string) (*tls.Config, error) { // Validate validates the topology specification and produce error if the // specification invalid (e.g: port conflicts or directory conflicts) -func (topo *Topology) Validate() error { +func (topo *Specification) Validate() error { if err := topo.platformConflictsDetect(); err != nil { return err } @@ -558,25 +558,28 @@ func (topo *Topology) Validate() error { } // BaseTopo implements Topology interface. -func (topo *Topology) BaseTopo() *spec.BaseTopo { +func (topo *Specification) BaseTopo() *spec.BaseTopo { return &spec.BaseTopo{ GlobalOptions: &topo.GlobalOptions, MonitoredOptions: topo.GetMonitoredOptions(), MasterList: topo.GetMasterList(), + Monitors: topo.Monitors, + Grafanas: topo.Grafanas, + Alertmanagers: topo.Alertmanagers, } } // NewPart implements ScaleOutTopology interface. -func (topo *Topology) NewPart() spec.Topology { - return &Topology{ +func (topo *Specification) NewPart() spec.Topology { + return &Specification{ GlobalOptions: topo.GlobalOptions, ServerConfigs: topo.ServerConfigs, } } // MergeTopo implements ScaleOutTopology interface. -func (topo *Topology) MergeTopo(rhs spec.Topology) spec.Topology { - other, ok := rhs.(*Topology) +func (topo *Specification) MergeTopo(rhs spec.Topology) spec.Topology { + other, ok := rhs.(*Specification) if !ok { panic("topo should be DM Topology") } @@ -585,7 +588,7 @@ func (topo *Topology) MergeTopo(rhs spec.Topology) spec.Topology { } // GetMasterList returns a list of Master API hosts of the current cluster -func (topo *Topology) GetMasterList() []string { +func (topo *Specification) GetMasterList() []string { var masterList []string for _, master := range topo.Masters { @@ -596,16 +599,17 @@ func (topo *Topology) GetMasterList() []string { } // Merge returns a new Topology which sum old ones -func (topo *Topology) Merge(that *Topology) *Topology { - return &Topology{ +func (topo *Specification) Merge(that spec.Topology) spec.Topology { + spec := that.(*Specification) + return &Specification{ GlobalOptions: topo.GlobalOptions, // MonitoredOptions: topo.MonitoredOptions, ServerConfigs: topo.ServerConfigs, - Masters: append(topo.Masters, that.Masters...), - Workers: append(topo.Workers, that.Workers...), - Monitors: append(topo.Monitors, that.Monitors...), - Grafana: append(topo.Grafana, that.Grafana...), - Alertmanager: append(topo.Alertmanager, that.Alertmanager...), + Masters: append(topo.Masters, spec.Masters...), + Workers: append(topo.Workers, spec.Workers...), + Monitors: append(topo.Monitors, spec.Monitors...), + Grafanas: append(topo.Grafanas, spec.Grafanas...), + Alertmanagers: append(topo.Alertmanagers, spec.Alertmanagers...), } } diff --git a/components/dm/spec/topology_dm_test.go b/components/dm/spec/topology_dm_test.go index fbfa04c528..bb92e63d98 100644 --- a/components/dm/spec/topology_dm_test.go +++ b/components/dm/spec/topology_dm_test.go @@ -32,14 +32,14 @@ var _ = Suite(&metaSuiteDM{}) func TestDefaultDataDir(t *testing.T) { // Test with without global DataDir. - topo := new(Topology) + topo := new(Specification) topo.Masters = append(topo.Masters, MasterSpec{Host: "1.1.1.1", Port: 1111}) topo.Workers = append(topo.Workers, WorkerSpec{Host: "1.1.2.1", Port: 2221}) data, err := yaml.Marshal(topo) assert.Nil(t, err) // Check default value. - topo = new(Topology) + topo = new(Specification) err = yaml.Unmarshal(data, topo) assert.Nil(t, err) assert.Equal(t, "data", topo.GlobalOptions.DataDir) @@ -49,7 +49,7 @@ func TestDefaultDataDir(t *testing.T) { // Can keep the default value. data, err = yaml.Marshal(topo) assert.Nil(t, err) - topo = new(Topology) + topo = new(Specification) err = yaml.Unmarshal(data, topo) assert.Nil(t, err) assert.Equal(t, "data", topo.GlobalOptions.DataDir) @@ -57,7 +57,7 @@ func TestDefaultDataDir(t *testing.T) { assert.Equal(t, "data", topo.Workers[0].DataDir) // Test with global DataDir. - topo = new(Topology) + topo = new(Specification) topo.GlobalOptions.DataDir = "/gloable_data" topo.Masters = append(topo.Masters, MasterSpec{Host: "1.1.1.1", Port: 1111}) topo.Masters = append(topo.Masters, MasterSpec{Host: "1.1.1.2", Port: 1112, DataDir: "/my_data"}) @@ -66,7 +66,7 @@ func TestDefaultDataDir(t *testing.T) { data, err = yaml.Marshal(topo) assert.Nil(t, err) - topo = new(Topology) + topo = new(Specification) err = yaml.Unmarshal(data, topo) assert.Nil(t, err) @@ -78,7 +78,7 @@ func TestDefaultDataDir(t *testing.T) { } func TestGlobalOptions(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" @@ -105,7 +105,7 @@ worker_servers: } func TestDirectoryConflicts(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" @@ -139,7 +139,7 @@ worker_servers: } func TestPortConflicts(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" @@ -159,7 +159,7 @@ worker_servers: func TestPlatformConflicts(t *testing.T) { // aarch64 and arm64 are equal - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: os: "linux" @@ -173,7 +173,7 @@ worker_servers: assert.Nil(t, err) // different arch defined for the same host - topo = Topology{} + topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" @@ -187,7 +187,7 @@ worker_servers: assert.Equal(t, "platform mismatch for '172.16.5.138' between 'master_servers:linux/arm64' and 'worker_servers:linux/amd64'", err.Error()) // different os defined for the same host - topo = Topology{} + topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" @@ -203,7 +203,7 @@ worker_servers: } func TestCountDir(t *testing.T) { - topo := Topology{} + topo := Specification{} err := yaml.Unmarshal([]byte(` global: @@ -296,8 +296,8 @@ func with2TempFile(content1, content2 string, fn func(string, string)) { }) } -func merge4test(base, scale string) (*Topology, error) { - baseTopo := Topology{} +func merge4test(base, scale string) (*Specification, error) { + baseTopo := Specification{} if err := spec.ParseTopologyYaml(base, &baseTopo); err != nil { return nil, err } @@ -312,7 +312,7 @@ func merge4test(base, scale string) (*Topology, error) { return nil, err } - return mergedTopo.(*Topology), nil + return mergedTopo.(*Specification), nil } func TestRelativePath(t *testing.T) { @@ -323,7 +323,7 @@ master_servers: worker_servers: - host: 172.16.5.140 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -339,7 +339,7 @@ master_servers: data_dir: my-data log_dir: my-log `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -356,7 +356,7 @@ global: master_servers: - host: 172.16.5.140 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -378,7 +378,7 @@ worker_servers: - host: 172.16.5.140 port: 20161 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -407,7 +407,7 @@ worker_servers: - host: 172.16.5.140 port: 20161 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) @@ -439,7 +439,7 @@ worker_servers: - host: 172.16.5.140 port: 20161 `, func(file string) { - topo := Topology{} + topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) assert.Nil(t, err) spec.ExpandRelativeDir(&topo) diff --git a/components/dm/task/update_dm_meta.go b/components/dm/task/update_dm_meta.go index 9e57c7cc17..c2456c4d34 100644 --- a/components/dm/task/update_dm_meta.go +++ b/components/dm/task/update_dm_meta.go @@ -19,6 +19,7 @@ import ( dmspec "github.com/pingcap/tiup/components/dm/spec" + "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/set" ) @@ -44,7 +45,7 @@ func (u *UpdateDMMeta) Execute(ctx *task.Context) error { // make a copy newMeta := &dmspec.Metadata{} *newMeta = *u.metadata - newMeta.Topology = &dmspec.Topology{ + newMeta.Topology = &dmspec.Specification{ GlobalOptions: u.metadata.Topology.GlobalOptions, // MonitoredOptions: u.metadata.Topology.MonitoredOptions, ServerConfigs: u.metadata.Topology.ServerConfigs, @@ -64,23 +65,23 @@ func (u *UpdateDMMeta) Execute(ctx *task.Context) error { } newMeta.Topology.Workers = append(newMeta.Topology.Workers, topo.Workers[i]) } - for i, instance := range (&dmspec.MonitorComponent{Topology: topo}).Instances() { + for i, instance := range (&spec.MonitorComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.Monitors = append(newMeta.Topology.Monitors, topo.Monitors[i]) } - for i, instance := range (&dmspec.GrafanaComponent{Topology: topo}).Instances() { + for i, instance := range (&spec.GrafanaComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Grafana = append(newMeta.Topology.Grafana, topo.Grafana[i]) + newMeta.Topology.Grafanas = append(newMeta.Topology.Grafanas, topo.Grafanas[i]) } - for i, instance := range (&dmspec.AlertManagerComponent{Topology: topo}).Instances() { + for i, instance := range (&spec.AlertManagerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Alertmanager = append(newMeta.Topology.Alertmanager, topo.Alertmanager[i]) + newMeta.Topology.Alertmanagers = append(newMeta.Topology.Alertmanagers, topo.Alertmanagers[i]) } return dmspec.GetSpecManager().SaveMeta(u.cluster, newMeta) diff --git a/pkg/cluster/ansible/import.go b/pkg/cluster/ansible/import.go index 08d3588c84..af5005226e 100644 --- a/pkg/cluster/ansible/import.go +++ b/pkg/cluster/ansible/import.go @@ -65,8 +65,8 @@ func parseInventoryFile(invFile io.Reader) (string, *spec.ClusterMeta, *aini.Inv PumpServers: make([]spec.PumpSpec, 0), Drainers: make([]spec.DrainerSpec, 0), Monitors: make([]spec.PrometheusSpec, 0), - Grafana: make([]spec.GrafanaSpec, 0), - Alertmanager: make([]spec.AlertManagerSpec, 0), + Grafanas: make([]spec.GrafanaSpec, 0), + Alertmanagers: make([]spec.AlertmanagerSpec, 0), }, } clsName := "" diff --git a/pkg/cluster/ansible/import_test.go b/pkg/cluster/ansible/import_test.go index 600ab130de..018db032ca 100644 --- a/pkg/cluster/ansible/import_test.go +++ b/pkg/cluster/ansible/import_test.go @@ -140,10 +140,10 @@ func sortClusterMeta(clsMeta *spec.ClusterMeta) { sort.Slice(clsMeta.Topology.Monitors, func(i, j int) bool { return clsMeta.Topology.Monitors[i].Host < clsMeta.Topology.Monitors[j].Host }) - sort.Slice(clsMeta.Topology.Grafana, func(i, j int) bool { - return clsMeta.Topology.Grafana[i].Host < clsMeta.Topology.Grafana[j].Host + sort.Slice(clsMeta.Topology.Grafanas, func(i, j int) bool { + return clsMeta.Topology.Grafanas[i].Host < clsMeta.Topology.Grafanas[j].Host }) - sort.Slice(clsMeta.Topology.Alertmanager, func(i, j int) bool { - return clsMeta.Topology.Alertmanager[i].Host < clsMeta.Topology.Alertmanager[j].Host + sort.Slice(clsMeta.Topology.Alertmanagers, func(i, j int) bool { + return clsMeta.Topology.Alertmanagers[i].Host < clsMeta.Topology.Alertmanagers[j].Host }) } diff --git a/pkg/cluster/ansible/inventory.go b/pkg/cluster/ansible/inventory.go index 0f47101815..e3338b5df1 100644 --- a/pkg/cluster/ansible/inventory.go +++ b/pkg/cluster/ansible/inventory.go @@ -110,21 +110,21 @@ func ParseAndImportInventory(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, } clsMeta.Topology.Monitors[i] = ins.(spec.PrometheusSpec) } - for i := 0; i < len(clsMeta.Topology.Alertmanager); i++ { - s := clsMeta.Topology.Alertmanager[i] + for i := 0; i < len(clsMeta.Topology.Alertmanagers); i++ { + s := clsMeta.Topology.Alertmanagers[i] ins, err := parseDirs(clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } - clsMeta.Topology.Alertmanager[i] = ins.(spec.AlertManagerSpec) + clsMeta.Topology.Alertmanagers[i] = ins.(spec.AlertmanagerSpec) } - for i := 0; i < len(clsMeta.Topology.Grafana); i++ { - s := clsMeta.Topology.Grafana[i] + for i := 0; i < len(clsMeta.Topology.Grafanas); i++ { + s := clsMeta.Topology.Grafanas[i] ins, err := parseDirs(clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } - clsMeta.Topology.Grafana[i] = ins.(spec.GrafanaSpec) + clsMeta.Topology.Grafanas[i] = ins.(spec.GrafanaSpec) } // TODO: get values from templates of roles to overwrite defaults @@ -429,7 +429,7 @@ func parseGroupVars(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini if host == "" { host = srv.Name } - tmpIns := spec.AlertManagerSpec{ + tmpIns := spec.AlertmanagerSpec{ Host: host, SSHPort: getHostPort(srv, ansCfg), Imported: true, @@ -452,9 +452,9 @@ func parseGroupVars(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini log.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) - clsMeta.Topology.Alertmanager = append(clsMeta.Topology.Alertmanager, tmpIns) + clsMeta.Topology.Alertmanagers = append(clsMeta.Topology.Alertmanagers, tmpIns) } - log.Infof("Imported %d Alertmanager node(s).", len(clsMeta.Topology.Alertmanager)) + log.Infof("Imported %d Alertmanager node(s).", len(clsMeta.Topology.Alertmanagers)) } // grafana_servers @@ -485,9 +485,9 @@ func parseGroupVars(dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini log.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) - clsMeta.Topology.Grafana = append(clsMeta.Topology.Grafana, tmpIns) + clsMeta.Topology.Grafanas = append(clsMeta.Topology.Grafanas, tmpIns) } - log.Infof("Imported %d Grafana node(s).", len(clsMeta.Topology.Alertmanager)) + log.Infof("Imported %d Grafana node(s).", len(clsMeta.Topology.Alertmanagers)) } // kafka_exporter_servers diff --git a/pkg/cluster/ansible/service.go b/pkg/cluster/ansible/service.go index b59bfec22c..d985b914e1 100644 --- a/pkg/cluster/ansible/service.go +++ b/pkg/cluster/ansible/service.go @@ -214,9 +214,9 @@ func parseDirs(user string, ins spec.InstanceSpec, sshTimeout uint64, sshType ex } } return newIns, nil - case spec.ComponentAlertManager: + case spec.ComponentAlertmanager: // parse dirs - newIns := ins.(spec.AlertManagerSpec) + newIns := ins.(spec.AlertmanagerSpec) for _, line := range strings.Split(string(stdout), "\n") { if strings.HasPrefix(line, "DEPLOY_DIR=") { newIns.DeployDir = strings.TrimPrefix(line, "DEPLOY_DIR=") diff --git a/pkg/cluster/embed/autogen_pkger.go b/pkg/cluster/embed/autogen_pkger.go index 079c83dc0a..e574df3297 100644 --- a/pkg/cluster/embed/autogen_pkger.go +++ b/pkg/cluster/embed/autogen_pkger.go @@ -20,13 +20,11 @@ func init() { autogenFiles["/templates/config/blackbox.yml"] = "bW9kdWxlczoKICAgIGh0dHBfMnh4OgogICAgICBwcm9iZXI6IGh0dHAKICAgICAgaHR0cDoKICAgICAgICBtZXRob2Q6IEdFVAogICAgaHR0cF9wb3N0XzJ4eDoKICAgICAgcHJvYmVyOiBodHRwCiAgICAgIGh0dHA6CiAgICAgICAgbWV0aG9kOiBQT1NUCiAgICB0Y3BfY29ubmVjdDoKICAgICAgcHJvYmVyOiB0Y3AKICAgIHBvcDNzX2Jhbm5lcjoKICAgICAgcHJvYmVyOiB0Y3AKICAgICAgdGNwOgogICAgICAgIHF1ZXJ5X3Jlc3BvbnNlOgogICAgICAgIC0gZXhwZWN0OiAiXitPSyIKICAgICAgICB0bHM6IHRydWUKICAgICAgICB0bHNfY29uZmlnOgogICAgICAgICAgaW5zZWN1cmVfc2tpcF92ZXJpZnk6IGZhbHNlCiAgICBzc2hfYmFubmVyOgogICAgICBwcm9iZXI6IHRjcAogICAgICB0Y3A6CiAgICAgICAgcXVlcnlfcmVzcG9uc2U6CiAgICAgICAgLSBleHBlY3Q6ICJeU1NILTIuMC0iCiAgICBpcmNfYmFubmVyOgogICAgICBwcm9iZXI6IHRjcAogICAgICB0Y3A6CiAgICAgICAgcXVlcnlfcmVzcG9uc2U6CiAgICAgICAgLSBzZW5kOiAiTklDSyBwcm9iZXIiCiAgICAgICAgLSBzZW5kOiAiVVNFUiBwcm9iZXIgcHJvYmVyIHByb2JlciA6cHJvYmVyIgogICAgICAgIC0gZXhwZWN0OiAiUElORyA6KFteIF0rKSIKICAgICAgICAgIHNlbmQ6ICJQT05HICR7MX0iCiAgICAgICAgLSBleHBlY3Q6ICJeOlteIF0rIDAwMSIKICAgIGljbXA6CiAgICAgIHByb2JlcjogaWNtcAogICAgICB0aW1lb3V0OiA1cwogICAgICBpY21wOgogICAgICAgIHByZWZlcnJlZF9pcF9wcm90b2NvbDogImlwNCI=" autogenFiles["/templates/config/dashboard.yml.tpl"] = "YXBpVmVyc2lvbjogMQpwcm92aWRlcnM6CiAgLSBuYW1lOiB7ey5DbHVzdGVyTmFtZX19CiAgICBmb2xkZXI6IHt7LkNsdXN0ZXJOYW1lfX0KICAgIHR5cGU6IGZpbGUKICAgIGRpc2FibGVEZWxldGlvbjogZmFsc2UKICAgIGVkaXRhYmxlOiB0cnVlCiAgICB1cGRhdGVJbnRlcnZhbFNlY29uZHM6IDMwCiAgICBvcHRpb25zOgogICAgICBwYXRoOiB7ey5EZXBsb3lEaXJ9fS9kYXNoYm9hcmRz" autogenFiles["/templates/config/datasource.yml.tpl"] = "YXBpVmVyc2lvbjogMQpkZWxldGVEYXRhc291cmNlczoKICAtIG5hbWU6IHt7LkNsdXN0ZXJOYW1lfX0KZGF0YXNvdXJjZXM6CiAgLSBuYW1lOiB7ey5DbHVzdGVyTmFtZX19CiAgICB0eXBlOiBwcm9tZXRoZXVzCiAgICBhY2Nlc3M6IHByb3h5CiAgICB1cmw6IGh0dHA6Ly97ey5JUH19Ont7LlBvcnR9fQogICAgd2l0aENyZWRlbnRpYWxzOiBmYWxzZQogICAgaXNEZWZhdWx0OiBmYWxzZQogICAgdGxzQXV0aDogZmFsc2UKICAgIHRsc0F1dGhXaXRoQ0FDZXJ0OiBmYWxzZQogICAgdmVyc2lvbjogMQogICAgZWRpdGFibGU6IHRydWU=" - autogenFiles["/templates/config/dm/prometheus.yml.tpl"] = "LS0tCmdsb2JhbDoKICBzY3JhcGVfaW50ZXJ2YWw6ICAgICAxNXMgIyBCeSBkZWZhdWx0LCBzY3JhcGUgdGFyZ2V0cyBldmVyeSAxNSBzZWNvbmRzLgogIGV2YWx1YXRpb25faW50ZXJ2YWw6IDE1cyAjIEJ5IGRlZmF1bHQsIHNjcmFwZSB0YXJnZXRzIGV2ZXJ5IDE1IHNlY29uZHMuCiAgIyBzY3JhcGVfdGltZW91dCBpcyBzZXQgdG8gdGhlIGdsb2JhbCBkZWZhdWx0ICgxMHMpLgogIGV4dGVybmFsX2xhYmVsczoKICAgIGNsdXN0ZXI6ICd7ey5DbHVzdGVyTmFtZX19JwogICAgbW9uaXRvcjogInByb21ldGhldXMiCgojIExvYWQgYW5kIGV2YWx1YXRlIHJ1bGVzIGluIHRoaXMgZmlsZSBldmVyeSAnZXZhbHVhdGlvbl9pbnRlcnZhbCcgc2Vjb25kcy4KcnVsZV9maWxlczoKICAtICdkbV93b3JrZXIucnVsZXMueW1sJwogIC0gJ2RtX21hc3Rlci5ydWxlcy55bWwnCgp7ey0gaWYgLkFsZXJ0bWFuYWdlckFkZHJzfX0KYWxlcnRpbmc6CiBhbGVydG1hbmFnZXJzOgogLSBzdGF0aWNfY29uZmlnczoKICAgLSB0YXJnZXRzOgp7ey0gcmFuZ2UgLkFsZXJ0bWFuYWdlckFkZHJzfX0KICAgICAtICd7ey59fScKe3stIGVuZH19Cnt7LSBlbmR9fQoKc2NyYXBlX2NvbmZpZ3M6Cnt7LSBpZiAuTWFzdGVyQWRkcnN9fQogIC0gam9iX25hbWU6ICJkbV9tYXN0ZXIiCiAgICBob25vcl9sYWJlbHM6IHRydWUgIyBkb24ndCBvdmVyd3JpdGUgam9iICYgaW5zdGFuY2UgbGFiZWxzCiAgICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgIHt7LSByYW5nZSAuTWFzdGVyQWRkcnN9fQogICAgICAgLSAne3sufX0nCiAgICB7ey0gZW5kfX0Ke3stIGVuZH19Cgp7ey0gaWYgLldvcmtlckFkZHJzfX0KICAtIGpvYl9uYW1lOiAiZG1fd29ya2VyIgogICAgaG9ub3JfbGFiZWxzOiB0cnVlICMgZG9uJ3Qgb3ZlcndyaXRlIGpvYiAmIGluc3RhbmNlIGxhYmVscwogICAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICB7ey0gcmFuZ2UgLldvcmtlckFkZHJzfX0KICAgICAgIC0gJ3t7Ln19JwogICAge3stIGVuZH19Cnt7LSBlbmR9fQo=" + autogenFiles["/templates/config/dm/prometheus.yml.tpl"] = "LS0tCmdsb2JhbDoKICBzY3JhcGVfaW50ZXJ2YWw6ICAgICAxNXMgIyBCeSBkZWZhdWx0LCBzY3JhcGUgdGFyZ2V0cyBldmVyeSAxNSBzZWNvbmRzLgogIGV2YWx1YXRpb25faW50ZXJ2YWw6IDE1cyAjIEJ5IGRlZmF1bHQsIHNjcmFwZSB0YXJnZXRzIGV2ZXJ5IDE1IHNlY29uZHMuCiAgIyBzY3JhcGVfdGltZW91dCBpcyBzZXQgdG8gdGhlIGdsb2JhbCBkZWZhdWx0ICgxMHMpLgogIGV4dGVybmFsX2xhYmVsczoKICAgIGNsdXN0ZXI6ICd7ey5DbHVzdGVyTmFtZX19JwogICAgbW9uaXRvcjogInByb21ldGhldXMiCgojIExvYWQgYW5kIGV2YWx1YXRlIHJ1bGVzIGluIHRoaXMgZmlsZSBldmVyeSAnZXZhbHVhdGlvbl9pbnRlcnZhbCcgc2Vjb25kcy4KcnVsZV9maWxlczoKICAtICdkbV93b3JrZXIucnVsZXMueW1sJwogIC0gJ2RtX21hc3Rlci5ydWxlcy55bWwnCgp7ey0gaWYgLkFsZXJ0bWFuYWdlckFkZHJzfX0KYWxlcnRpbmc6CiAgYWxlcnRtYW5hZ2VyczoKICAtIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgp7ey0gcmFuZ2UgLkFsZXJ0bWFuYWdlckFkZHJzfX0KICAgIC0gJ3t7Ln19Jwp7ey0gZW5kfX0Ke3stIGVuZH19CgpzY3JhcGVfY29uZmlnczoKe3stIGlmIC5NYXN0ZXJBZGRyc319CiAgLSBqb2JfbmFtZTogImRtX21hc3RlciIKICAgIGhvbm9yX2xhYmVsczogdHJ1ZSAjIGRvbid0IG92ZXJ3cml0ZSBqb2IgJiBpbnN0YW5jZSBsYWJlbHMKICAgIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAge3stIHJhbmdlIC5NYXN0ZXJBZGRyc319CiAgICAgICAtICd7ey59fScKICAgIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuV29ya2VyQWRkcnN9fQogIC0gam9iX25hbWU6ICJkbV93b3JrZXIiCiAgICBob25vcl9sYWJlbHM6IHRydWUgIyBkb24ndCBvdmVyd3JpdGUgam9iICYgaW5zdGFuY2UgbGFiZWxzCiAgICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgIHt7LSByYW5nZSAuV29ya2VyQWRkcnN9fQogICAgICAgLSAne3sufX0nCiAgICB7ey0gZW5kfX0Ke3stIGVuZH19Cg==" autogenFiles["/templates/config/grafana.ini.tpl"] = "##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change

# possible values : production, development
; app_mode = production

# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
; instance_name = ${HOSTNAME}

#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
#
data = {{.DeployDir}}/data
#
# Directory where grafana can store logs
#
logs = {{.DeployDir}}/logs
#
# Directory where grafana will automatically scan and look for plugins
#
plugins = {{.DeployDir}}/plugins
#
# folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = {{.DeployDir}}/provisioning

#
#################################### Server ####################################
[server]
# Protocol (http or https)
;protocol = http

# The ip address to bind to, empty will bind to all interfaces
;http_addr =

# The http port  to use
http_port = {{.Port}}

# The public facing domain name used to access grafana from a browser
domain = {{.IP}}

# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false

# The full public facing url
;root_url = %(protocol)s://%(domain)s:%(http_port)s/

# Log web requests
;router_logging = false

# the path relative working path
;static_root_path = public

# enable gzip
;enable_gzip = false

# https certs & key file
;cert_file =
;cert_key =

#################################### Database ####################################
[database]
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
;password =

# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable

# For "sqlite3" only, path relative to data_path setting
;path = grafana.db

#################################### Session ####################################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
;provider = file

# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
;provider_config = sessions

# Session cookie name
;cookie_name = grafana_sess

# If you use session in https only, default is false
;cookie_secure = false

# Session life time, default is 86400
;session_life_time = 86400

#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true

# Set to false to disable all checks to https://grafana.net
# for new vesions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.net to get latest versions
check_for_updates = true

# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =

#################################### Security ####################################
[security]
# default admin user, created on startup
;admin_user = admin

# default admin password, can be changed before first start of grafana,  or in profile settings
;admin_password = admin

# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm

# Auto-login remember days
;login_remember_days = 7
;cookie_username = grafana_user
;cookie_remember_name = grafana_remember

# disable gravatar profile images
;disable_gravatar = false

# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =

[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io

#################################### Users ####################################
[users]
# disable user signup / registration
;allow_sign_up = true

# Allow non admin users to create organizations
;allow_org_create = true

# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true

# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer

# Background text for the user field on the login page
;login_hint = email or username

# Default UI theme ("dark" or "light")
;default_theme = dark

#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
;enabled = false

# specify organization name that should be used for unauthenticated users
;org_name = Main Org.

# specify role for unauthenticated users
;org_role = Viewer

#################################### Basic Auth ##########################
[auth.basic]
;enabled = true

#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml

#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost

[emails]
;welcome_email_on_sign_up = false

#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and  file
# Use space to separate multiple modes, e.g. "console file"
mode = file

# Either "trace", "debug", "info", "warn", "error", "critical", default is "info"
;level = info

# For "console" mode only
[log.console]
;level =

# log line format, valid options are text, console and json
;format = console

# For "file" mode only
[log.file]
level = info

# log line format, valid options are text, console and json
format = text

# This enables automated log rotate(switch of following options), default is true
;log_rotate = true

# Max line number of single file, default is 1000000
;max_lines = 1000000

# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28

# Segment log daily, default is true
;daily_rotate = true

# Expired days of log file(delete after max days), default is 7
;max_days = 7

[log.syslog]
;level =

# log line format, valid options are text, console and json
;format = text

# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =

# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =

# Syslog tag. By default, the process' argv[0] is used.
;tag =


#################################### AMQP Event Publisher ##########################
[event_publisher]
;enabled = false
;rabbitmq_url = amqp://localhost/
;exchange = grafana_events

;#################################### Dashboard JSON files ##########################
[dashboards.json]
enabled = false
path = {{.DeployDir}}/dashboards

#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /api/metrics
[metrics]
# Disable / Enable internal metrics
;enabled           = true

# Publish interval
;interval_seconds  = 10

# Send internal metrics to Graphite
; [metrics.graphite]
; address = localhost:2003
; prefix = prod.grafana.%(instance_name)s.

#################################### Internal Grafana Metrics ##########################
# Url used to to import dashboards directly from Grafana.net
[grafana_net]
url = https://grafana.net" - autogenFiles["/templates/config/prometheus.yml.tpl"] = "---
global:
  scrape_interval:     15s # By default, scrape targets every 15 seconds.
  evaluation_interval: 15s # By default, scrape targets every 15 seconds.
  # scrape_timeout is set to the global default (10s).
  external_labels:
    cluster: '{{.ClusterName}}'
    monitor: "prometheus"

# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
  - 'node.rules.yml'
  - 'blacker.rules.yml'
  - 'bypass.rules.yml'
  - 'pd.rules.yml'
  - 'tidb.rules.yml'
  - 'tikv.rules.yml'
  - 'tikv.accelerate.rules.yml'
{{- if .TiFlashStatusAddrs}}
  - 'tiflash.rules.yml'
{{- end}}
{{- if .PumpAddrs}}
  - 'binlog.rules.yml'
{{- end}}
{{- if .CDCAddrs}}
  - 'ticdc.rules.yml'
{{- end}}
{{- if .KafkaAddrs}}
  - 'kafka.rules.yml'
{{- end}}
{{- if .LightningAddrs}}
  - 'lightning.rules.yml'
{{- end}}

{{- if .AlertmanagerAddrs}}
alerting:
 alertmanagers:
 - static_configs:
   - targets:
{{- range .AlertmanagerAddrs}}
     - '{{.}}'
{{- end}}
{{- end}}

scrape_configs:
{{- if .PushgatewayAddr}}
  - job_name: 'overwritten-cluster'
    scrape_interval: 15s
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
      - targets: ['{{.PushgatewayAddr}}']

  - job_name: "blackbox_exporter_http"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [http_2xx]
    static_configs:
    - targets:
      - 'http://{{.PushgatewayAddr}}/metrics'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .LightningAddrs}}
  - job_name: "lightning"
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
      - targets: ['{{index .LightningAddrs 0}}']
{{- end}}
  - job_name: "overwritten-nodes"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
{{- range .NodeExporterAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tidb"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiDBStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tikv"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiKVStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "pd"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .PDAddrs}}
      - '{{.}}'
{{- end}}
{{- if .TiFlashStatusAddrs}}
  - job_name: "tiflash"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
    {{- range .TiFlashLearnerStatusAddrs}}
       - '{{.}}'
    {{- end}}
{{- end}}
{{- if .PumpAddrs}}
{{- if .KafkaExporterAddr}}
  - job_name: 'kafka_exporter'
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
      - '{{.KafkaExporterAddr}}'
{{- end}}
  - job_name: 'pump'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .PumpAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: 'drainer'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: "port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
{{- if .KafkaAddrs}}
    - targets:
    {{- range .KafkaAddrs}}
        - '{{.}}'
    {{- end}}
      labels:
        group: 'kafka'
{{- end}}
{{- if .ZookeeperAddrs}}
    - targets:
    {{- range .ZookeeperAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'zookeeper'
{{- end}}
    - targets:
{{- range .PumpAddrs}}
      - '{{.}}'
{{- end}}
      labels:
        group: 'pump'
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'drainer'
{{- if .KafkaExporterAddr}}
    - targets:
      - '{{.KafkaExporterAddr}}'
      labels:
        group: 'kafka_exporter'
{{- end}}
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .CDCAddrs}}
  - job_name: "ticdc"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .CDCAddrs}}
      - '{{.}}'
{{- end}}
{{- end}}
  - job_name: "tidb_port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
    - targets:
    {{- range .TiDBStatusAddrs}}
      - '{{.}}' 
    {{- end}}
      labels:
        group: 'tidb'
    - targets:
    {{- range .TiKVStatusAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'tikv'
    - targets:
    {{- range .PDAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'pd'
{{- if .TiFlashStatusAddrs}}
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
      labels:
        group: 'tiflash'
{{- end}}
{{- if .PushgatewayAddr}}
    - targets:
      - '{{.PushgatewayAddr}}'
      labels:
        group: 'pushgateway'
{{- end}}
{{- if .GrafanaAddr}}
    - targets:
      - '{{.GrafanaAddr}}'
      labels:
        group: 'grafana'
{{- end}}
    - targets:
    {{- range .NodeExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'node_exporter'
    - targets:
    {{- range .BlackboxExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'blackbox_exporter'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- range $addr := .BlackboxExporterAddrs}}
  - job_name: "blackbox_exporter_{{$addr}}_icmp"
    scrape_interval: 6s
    metrics_path: /probe
    params:
      module: [icmp]
    static_configs:
    - targets:
    {{- range $.MonitoredServers}}
      - '{{.}}'
    {{- end}}
    relabel_configs:
      - source_labels: [__address__]
        regex: (.*)(:80)?
        target_label: __param_target
        replacement: ${1}
      - source_labels: [__param_target]
        regex: (.*)
        target_label: ping
        replacement: ${1}
      - source_labels: []
        regex: .*
        target_label: __address__
        replacement: {{$addr}}
{{- end}}" + autogenFiles["/templates/config/prometheus.yml.tpl"] = "---
global:
  scrape_interval:     15s # By default, scrape targets every 15 seconds.
  evaluation_interval: 15s # By default, scrape targets every 15 seconds.
  # scrape_timeout is set to the global default (10s).
  external_labels:
    cluster: '{{.ClusterName}}'
    monitor: "prometheus"

# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
{{- if .MonitoredServers}}
  - 'node.rules.yml'
  - 'blacker.rules.yml'
  - 'bypass.rules.yml'
{{- end}}
{{- if .PDAddrs}}
  - 'pd.rules.yml'
{{- end}}
{{- if .TiDBStatusAddrs}}
  - 'tidb.rules.yml'
{{- end}}
{{- if .TiKVStatusAddrs}}
  - 'tikv.rules.yml'
  - 'tikv.accelerate.rules.yml'
{{- end}}
{{- if .TiFlashStatusAddrs}}
  - 'tiflash.rules.yml'
{{- end}}
{{- if .PumpAddrs}}
  - 'binlog.rules.yml'
{{- end}}
{{- if .CDCAddrs}}
  - 'ticdc.rules.yml'
{{- end}}
{{- if .KafkaAddrs}}
  - 'kafka.rules.yml'
{{- end}}
{{- if .LightningAddrs}}
  - 'lightning.rules.yml'
{{- end}}
{{- if .DMWorkerAddrs}}
  - 'dm_worker.rules.yml'
{{- end}}
{{- if .DMMasterAddrs}}
  - 'dm_master.rules.yml'
{{- end}}

{{- if .AlertmanagerAddrs}}
alerting:
 alertmanagers:
 - static_configs:
   - targets:
{{- range .AlertmanagerAddrs}}
     - '{{.}}'
{{- end}}
{{- end}}

scrape_configs:
{{- if .PushgatewayAddr}}
  - job_name: 'overwritten-cluster'
    scrape_interval: 15s
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
      - targets: ['{{.PushgatewayAddr}}']

  - job_name: "blackbox_exporter_http"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [http_2xx]
    static_configs:
    - targets:
      - 'http://{{.PushgatewayAddr}}/metrics'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .LightningAddrs}}
  - job_name: "lightning"
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
      - targets: ['{{index .LightningAddrs 0}}']
{{- end}}
  - job_name: "overwritten-nodes"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
{{- range .NodeExporterAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tidb"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiDBStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "tikv"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .TiKVStatusAddrs}}
      - '{{.}}'
{{- end}}
  - job_name: "pd"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .PDAddrs}}
      - '{{.}}'
{{- end}}
{{- if .TiFlashStatusAddrs}}
  - job_name: "tiflash"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
    {{- range .TiFlashLearnerStatusAddrs}}
       - '{{.}}'
    {{- end}}
{{- end}}
{{- if .PumpAddrs}}
{{- if .KafkaExporterAddr}}
  - job_name: 'kafka_exporter'
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
      - '{{.KafkaExporterAddr}}'
{{- end}}
  - job_name: 'pump'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .PumpAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: 'drainer'
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
  - job_name: "port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
{{- if .KafkaAddrs}}
    - targets:
    {{- range .KafkaAddrs}}
        - '{{.}}'
    {{- end}}
      labels:
        group: 'kafka'
{{- end}}
{{- if .ZookeeperAddrs}}
    - targets:
    {{- range .ZookeeperAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'zookeeper'
{{- end}}
    - targets:
{{- range .PumpAddrs}}
      - '{{.}}'
{{- end}}
      labels:
        group: 'pump'
    - targets:
    {{- range .DrainerAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'drainer'
{{- if .KafkaExporterAddr}}
    - targets:
      - '{{.KafkaExporterAddr}}'
      labels:
        group: 'kafka_exporter'
{{- end}}
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- end}}
{{- if .CDCAddrs}}
  - job_name: "ticdc"
    honor_labels: true # don't overwrite job & instance labels
{{- if .TLSEnabled}}
    scheme: https
    tls_config:
      insecure_skip_verify: false
      ca_file: ../tls/ca.crt
      cert_file: ../tls/prometheus.crt
      key_file: ../tls/prometheus.pem
{{- end}}
    static_configs:
    - targets:
{{- range .CDCAddrs}}
      - '{{.}}'
{{- end}}
{{- end}}
  - job_name: "tidb_port_probe"
    scrape_interval: 30s
    metrics_path: /probe
    params:
      module: [tcp_connect]
    static_configs:
    - targets:
    {{- range .TiDBStatusAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'tidb'
    - targets:
    {{- range .TiKVStatusAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'tikv'
    - targets:
    {{- range .PDAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'pd'
{{- if .TiFlashStatusAddrs}}
    - targets:
    {{- range .TiFlashStatusAddrs}}
       - '{{.}}'
    {{- end}}
      labels:
        group: 'tiflash'
{{- end}}
{{- if .PushgatewayAddr}}
    - targets:
      - '{{.PushgatewayAddr}}'
      labels:
        group: 'pushgateway'
{{- end}}
{{- if .GrafanaAddr}}
    - targets:
      - '{{.GrafanaAddr}}'
      labels:
        group: 'grafana'
{{- end}}
    - targets:
    {{- range .NodeExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'node_exporter'
    - targets:
    {{- range .BlackboxExporterAddrs}}
      - '{{.}}'
    {{- end}}
      labels:
        group: 'blackbox_exporter'
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: {{.BlackboxAddr}}
{{- range $addr := .BlackboxExporterAddrs}}
  - job_name: "blackbox_exporter_{{$addr}}_icmp"
    scrape_interval: 6s
    metrics_path: /probe
    params:
      module: [icmp]
    static_configs:
    - targets:
    {{- range $.MonitoredServers}}
      - '{{.}}'
    {{- end}}
    relabel_configs:
      - source_labels: [__address__]
        regex: (.*)(:80)?
        target_label: __param_target
        replacement: ${1}
      - source_labels: [__param_target]
        regex: (.*)
        target_label: ping
        replacement: ${1}
      - source_labels: []
        regex: .*
        target_label: __address__
        replacement: {{$addr}}
{{- end}}

{{- if .DMMasterAddrs}}
  - job_name: "dm_master"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
    {{- range .DMMasterAddrs}}
      - '{{.}}'
    {{- end}}
{{- end}}

{{- if .DMWorkerAddrs}}
  - job_name: "dm_worker"
    honor_labels: true # don't overwrite job & instance labels
    static_configs:
    - targets:
    {{- range .DMWorkerAddrs}}
      - '{{.}}'
    {{- end}}
{{- end}}
" autogenFiles["/templates/config/spark-defaults.conf.tpl"] = "IwojIExpY2Vuc2VkIHRvIHRoZSBBcGFjaGUgU29mdHdhcmUgRm91bmRhdGlvbiAoQVNGKSB1bmRlciBvbmUgb3IgbW9yZQojIGNvbnRyaWJ1dG9yIGxpY2Vuc2UgYWdyZWVtZW50cy4gIFNlZSB0aGUgTk9USUNFIGZpbGUgZGlzdHJpYnV0ZWQgd2l0aAojIHRoaXMgd29yayBmb3IgYWRkaXRpb25hbCBpbmZvcm1hdGlvbiByZWdhcmRpbmcgY29weXJpZ2h0IG93bmVyc2hpcC4KIyBUaGUgQVNGIGxpY2Vuc2VzIHRoaXMgZmlsZSB0byBZb3UgdW5kZXIgdGhlIEFwYWNoZSBMaWNlbnNlLCBWZXJzaW9uIDIuMAojICh0aGUgIkxpY2Vuc2UiKTsgeW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoCiMgdGhlIExpY2Vuc2UuICBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgIGh0dHA6Ly93d3cuYXBhY2hlLm9yZy9saWNlbnNlcy9MSUNFTlNFLTIuMAojCiMgVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQojIGRpc3RyaWJ1dGVkIHVuZGVyIHRoZSBMaWNlbnNlIGlzIGRpc3RyaWJ1dGVkIG9uIGFuICJBUyBJUyIgQkFTSVMsCiMgV0lUSE9VVCBXQVJSQU5USUVTIE9SIENPTkRJVElPTlMgT0YgQU5ZIEtJTkQsIGVpdGhlciBleHByZXNzIG9yIGltcGxpZWQuCiMgU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAojIGxpbWl0YXRpb25zIHVuZGVyIHRoZSBMaWNlbnNlLgojCgojIERlZmF1bHQgc3lzdGVtIHByb3BlcnRpZXMgaW5jbHVkZWQgd2hlbiBydW5uaW5nIHNwYXJrLXN1Ym1pdC4KIyBUaGlzIGlzIHVzZWZ1bCBmb3Igc2V0dGluZyBkZWZhdWx0IGVudmlyb25tZW50YWwgc2V0dGluZ3MuCgojIEV4YW1wbGU6CiNzcGFyay5ldmVudExvZy5kaXI6ICJoZGZzOi8vbmFtZW5vZGU6ODAyMS9kaXJlY3RvcnkiCiMgc3BhcmsuZXhlY3V0b3IuZXh0cmFKYXZhT3B0aW9ucyAgLVhYOitQcmludEdDRGV0YWlscyAtRGtleT12YWx1ZSAtRG51bWJlcnM9Im9uZSB0d28gdGhyZWUiCgp7ey0gZGVmaW5lICJQRExpc3QifX0KICB7ey0gcmFuZ2UgJGlkeCwgJHBkIDo9IC59fQogICAge3stIGlmIGVxICRpZHggMH19CiAgICAgIHt7LSAkcGR9fQogICAge3stIGVsc2UgLX19CiAgICAgICx7eyRwZH19CiAgICB7ey0gZW5kfX0KICB7ey0gZW5kfX0Ke3stIGVuZH19Cgp7eyByYW5nZSAkaywgJHYgOj0gLkN1c3RvbUZpZWxkc319Cnt7ICRrIH19ICAge3sgJHYgfX0Ke3stIGVuZCB9fQpzcGFyay5zcWwuZXh0ZW5zaW9ucyAgIG9yZy5hcGFjaGUuc3Bhcmsuc3FsLlRpRXh0ZW5zaW9ucwoKe3stIGlmIC5UaVNwYXJrTWFzdGVyc319CnNwYXJrLm1hc3RlciAgIHNwYXJrOi8ve3suVGlTcGFya01hc3RlcnN9fQp7ey0gZW5kfX0KCnNwYXJrLnRpc3BhcmsucGQuYWRkcmVzc2VzIHt7dGVtcGxhdGUgIlBETGlzdCIgLkVuZHBvaW50c319Cg==" autogenFiles["/templates/config/spark-log4j.properties.tpl"] = "IwojIExpY2Vuc2VkIHRvIHRoZSBBcGFjaGUgU29mdHdhcmUgRm91bmRhdGlvbiAoQVNGKSB1bmRlciBvbmUgb3IgbW9yZQojIGNvbnRyaWJ1dG9yIGxpY2Vuc2UgYWdyZWVtZW50cy4gIFNlZSB0aGUgTk9USUNFIGZpbGUgZGlzdHJpYnV0ZWQgd2l0aAojIHRoaXMgd29yayBmb3IgYWRkaXRpb25hbCBpbmZvcm1hdGlvbiByZWdhcmRpbmcgY29weXJpZ2h0IG93bmVyc2hpcC4KIyBUaGUgQVNGIGxpY2Vuc2VzIHRoaXMgZmlsZSB0byBZb3UgdW5kZXIgdGhlIEFwYWNoZSBMaWNlbnNlLCBWZXJzaW9uIDIuMAojICh0aGUgIkxpY2Vuc2UiKTsgeW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoCiMgdGhlIExpY2Vuc2UuICBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgIGh0dHA6Ly93d3cuYXBhY2hlLm9yZy9saWNlbnNlcy9MSUNFTlNFLTIuMAojCiMgVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQojIGRpc3RyaWJ1dGVkIHVuZGVyIHRoZSBMaWNlbnNlIGlzIGRpc3RyaWJ1dGVkIG9uIGFuICJBUyBJUyIgQkFTSVMsCiMgV0lUSE9VVCBXQVJSQU5USUVTIE9SIENPTkRJVElPTlMgT0YgQU5ZIEtJTkQsIGVpdGhlciBleHByZXNzIG9yIGltcGxpZWQuCiMgU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAojIGxpbWl0YXRpb25zIHVuZGVyIHRoZSBMaWNlbnNlLgojCgojIFNldCBldmVyeXRoaW5nIHRvIGJlIGxvZ2dlZCB0byB0aGUgY29uc29sZQpsb2c0ai5yb290Q2F0ZWdvcnk9SU5GTywgY29uc29sZQpsb2c0ai5hcHBlbmRlci5jb25zb2xlPW9yZy5hcGFjaGUubG9nNGouQ29uc29sZUFwcGVuZGVyCmxvZzRqLmFwcGVuZGVyLmNvbnNvbGUudGFyZ2V0PVN5c3RlbS5lcnIKbG9nNGouYXBwZW5kZXIuY29uc29sZS5sYXlvdXQ9b3JnLmFwYWNoZS5sb2c0ai5QYXR0ZXJuTGF5b3V0CmxvZzRqLmFwcGVuZGVyLmNvbnNvbGUubGF5b3V0LkNvbnZlcnNpb25QYXR0ZXJuPSVke3l5L01NL2RkIEhIOm1tOnNzfSAlcCAlY3sxfTogJW0lbgoKIyBTZXQgdGhlIGRlZmF1bHQgc3Bhcmstc2hlbGwgbG9nIGxldmVsIHRvIFdBUk4uIFdoZW4gcnVubmluZyB0aGUgc3Bhcmstc2hlbGwsIHRoZQojIGxvZyBsZXZlbCBmb3IgdGhpcyBjbGFzcyBpcyB1c2VkIHRvIG92ZXJ3cml0ZSB0aGUgcm9vdCBsb2dnZXIncyBsb2cgbGV2ZWwsIHNvIHRoYXQKIyB0aGUgdXNlciBjYW4gaGF2ZSBkaWZmZXJlbnQgZGVmYXVsdHMgZm9yIHRoZSBzaGVsbCBhbmQgcmVndWxhciBTcGFyayBhcHBzLgpsb2c0ai5sb2dnZXIub3JnLmFwYWNoZS5zcGFyay5yZXBsLk1haW49V0FSTgoKIyBTZXR0aW5ncyB0byBxdWlldCB0aGlyZCBwYXJ0eSBsb2dzIHRoYXQgYXJlIHRvbyB2ZXJib3NlCmxvZzRqLmxvZ2dlci5vcmcuc3BhcmtfcHJvamVjdC5qZXR0eT1XQVJOCmxvZzRqLmxvZ2dlci5vcmcuc3BhcmtfcHJvamVjdC5qZXR0eS51dGlsLmNvbXBvbmVudC5BYnN0cmFjdExpZmVDeWNsZT1FUlJPUgpsb2c0ai5sb2dnZXIub3JnLmFwYWNoZS5zcGFyay5yZXBsLlNwYXJrSU1haW4kZXhwclR5cGVyPUlORk8KbG9nNGoubG9nZ2VyLm9yZy5hcGFjaGUuc3BhcmsucmVwbC5TcGFya0lMb29wJFNwYXJrSUxvb3BJbnRlcnByZXRlcj1JTkZPCmxvZzRqLmxvZ2dlci5vcmcuYXBhY2hlLnBhcnF1ZXQ9RVJST1IKbG9nNGoubG9nZ2VyLnBhcnF1ZXQ9RVJST1IKCiMgU1BBUkstOTE4MzogU2V0dGluZ3MgdG8gYXZvaWQgYW5ub3lpbmcgbWVzc2FnZXMgd2hlbiBsb29raW5nIHVwIG5vbmV4aXN0ZW50IFVERnMgaW4gU3BhcmtTUUwgd2l0aCBIaXZlIHN1cHBvcnQKbG9nNGoubG9nZ2VyLm9yZy5hcGFjaGUuaGFkb29wLmhpdmUubWV0YXN0b3JlLlJldHJ5aW5nSE1TSGFuZGxlcj1GQVRBTApsb2c0ai5sb2dnZXIub3JnLmFwYWNoZS5oYWRvb3AuaGl2ZS5xbC5leGVjLkZ1bmN0aW9uUmVnaXN0cnk9RVJST1IKCiMgdGlzcGFyayBkaXNhYmxlICJXQVJOIE9iamVjdFN0b3JlOjU2OCAtIEZhaWxlZCB0byBnZXQgZGF0YWJhc2UiCmxvZzRqLmxvZ2dlci5vcmcuYXBhY2hlLmhhZG9vcC5oaXZlLm1ldGFzdG9yZS5PYmplY3RTdG9yZT1FUlJPUgo=" - autogenFiles["/templates/scripts/dm/run_grafana.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKTEFORz1lbl9VUy5VVEYtOCBcCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9iaW4vZ3JhZmFuYS1zZXJ2ZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2Jpbi9ncmFmYW5hLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1ob21lcGF0aD0ie3suRGVwbG95RGlyfX0vYmluIiBcCiAgICAtLWNvbmZpZz0ie3suRGVwbG95RGlyfX0vY29uZi9ncmFmYW5hLmluaSIK" - autogenFiles["/templates/scripts/dm/run_prometheus.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgpERVBMT1lfRElSPXt7LkRlcGxveURpcn19CmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCiMgV0FSTklORzogVGhpcyBmaWxlIHdhcyBhdXRvLWdlbmVyYXRlZC4gRG8gbm90IGVkaXQhCiMgICAgICAgICAgQWxsIHlvdXIgZWRpdCBtaWdodCBiZSBvdmVyd3JpdHRlbiEKCgpleGVjID4gPih0ZWUgLWkgLWEgInt7LkxvZ0Rpcn19L3Byb21ldGhldXMubG9nIikKZXhlYyAyPiYxCgp7ey0gaWYgLk51bWFOb2RlfX0KZXhlYyBudW1hY3RsIC0tY3B1bm9kZWJpbmQ9e3suTnVtYU5vZGV9fSAtLW1lbWJpbmQ9e3suTnVtYU5vZGV9fSBiaW4vcHJvbWV0aGV1cy9wcm9tZXRoZXVzIFwKe3stIGVsc2V9fQpleGVjIGJpbi9wcm9tZXRoZXVzL3Byb21ldGhldXMgXAp7ey0gZW5kfX0KICAgIC0tY29uZmlnLmZpbGU9Int7LkRlcGxveURpcn19L2NvbmYvcHJvbWV0aGV1cy55bWwiIFwKICAgIC0td2ViLmxpc3Rlbi1hZGRyZXNzPSI6e3suUG9ydH19IiBcCiAgICAtLXdlYi5leHRlcm5hbC11cmw9Imh0dHA6Ly97ey5JUH19Ont7LlBvcnR9fS8iIFwKICAgIC0td2ViLmVuYWJsZS1hZG1pbi1hcGkgXAogICAgLS1sb2cubGV2ZWw9ImluZm8iIFwKICAgIC0tc3RvcmFnZS50c2RiLnBhdGg9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1zdG9yYWdlLnRzZGIucmV0ZW50aW9uPSIzMGQiCg==" autogenFiles["/templates/scripts/run_alertmanager.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgpERVBMT1lfRElSPXt7LkRlcGxveURpcn19CmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCiMgV0FSTklORzogVGhpcyBmaWxlIHdhcyBhdXRvLWdlbmVyYXRlZC4gRG8gbm90IGVkaXQhCiMgICAgICAgICAgQWxsIHlvdXIgZWRpdCBtaWdodCBiZSBvdmVyd3JpdHRlbiEKCmV4ZWMgPiA+KHRlZSAtaSAtYSAie3suTG9nRGlyfX0vYWxlcnRtYW5hZ2VyLmxvZyIpCmV4ZWMgMj4mMQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2FsZXJ0bWFuYWdlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vYWxlcnRtYW5hZ2VyL2FsZXJ0bWFuYWdlciBcCnt7LSBlbmR9fQogICAgLS1jb25maWcuZmlsZT0iY29uZi9hbGVydG1hbmFnZXIueW1sIiBcCiAgICAtLXN0b3JhZ2UucGF0aD0ie3suRGF0YURpcn19IiBcCiAgICAtLWRhdGEucmV0ZW50aW9uPTEyMGggXAogICAgLS1sb2cubGV2ZWw9ImluZm8iIFwKICAgIC0td2ViLmxpc3Rlbi1hZGRyZXNzPSJ7ey5JUH19Ont7LldlYlBvcnR9fSIgXAp7ey0gaWYgLkVuZFBvaW50c319Cnt7LSByYW5nZSAkaWR4LCAkYW0gOj0gLkVuZFBvaW50c319CiAgICAtLWNsdXN0ZXIucGVlcj0ie3skYW0uSVB9fTp7eyRhbS5DbHVzdGVyUG9ydH19IiBcCnt7LSBlbmR9fQp7ey0gZW5kfX0KICAgIC0tY2x1c3Rlci5saXN0ZW4tYWRkcmVzcz0ie3suSVB9fTp7ey5DbHVzdGVyUG9ydH19Igo=" autogenFiles["/templates/scripts/run_blackbox_exporter.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKZXhlYyA+ID4odGVlIC1pIC1hICJ7ey5Mb2dEaXJ9fS9ibGFja2JveF9leHBvcnRlci5sb2ciKQpleGVjIDI+JjEKCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9ibGFja2JveF9leHBvcnRlci9ibGFja2JveF9leHBvcnRlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vYmxhY2tib3hfZXhwb3J0ZXIvYmxhY2tib3hfZXhwb3J0ZXIgXAp7ey0gZW5kfX0KICAgIC0td2ViLmxpc3Rlbi1hZGRyZXNzPSI6e3suUG9ydH19IiBcCiAgICAtLWxvZy5sZXZlbD0iaW5mbyIgXAogICAgLS1jb25maWcuZmlsZT0iY29uZi9ibGFja2JveC55bWwiCg==" autogenFiles["/templates/scripts/run_cdc.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKe3stIGRlZmluZSAiUERMaXN0In19CiAge3stIHJhbmdlICRpZHgsICRwZCA6PSAufX0KICAgIHt7LSBpZiBlcSAkaWR4IDB9fQogICAgICB7ey0gJHBkLlNjaGVtZX19Oi8ve3skcGQuSVB9fTp7eyRwZC5DbGllbnRQb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZW5kfX0KICB7ey0gZW5kfX0Ke3stIGVuZH19Cgp7ey0gaWYgLk51bWFOb2RlfX0KZXhlYyBudW1hY3RsIC0tY3B1bm9kZWJpbmQ9e3suTnVtYU5vZGV9fSAtLW1lbWJpbmQ9e3suTnVtYU5vZGV9fSBiaW4vY2RjIHNlcnZlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vY2RjIHNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1hZGRyICIwLjAuMC4wOnt7LlBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtYWRkciAie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQgInt7dGVtcGxhdGUgIlBETGlzdCIgLkVuZHBvaW50c319IiBcCnt7LSBpZiAuVExTRW5hYmxlZH19CiAgICAtLWNhIHRscy9jYS5jcnQgXAogICAgLS1jZXJ0IHRscy9jZGMuY3J0IFwKICAgIC0ta2V5IHRscy9jZGMucGVtIFwKe3stIGVuZH19Cnt7LSBpZiAuR0NUVEx9fQogICAgLS1nYy10dGwge3suR0NUVEx9fSBcCnt7LSBlbmR9fQp7ey0gaWYgLlRafX0KICAgIC0tdHogInt7LlRafX0iIFwKe3stIGVuZH19CiAgICAtLWxvZy1maWxlICJ7ey5Mb2dEaXJ9fS9jZGMubG9nIiAyPj4gInt7LkxvZ0Rpcn19L2NkY19zdGRlcnIubG9nIgo=" diff --git a/pkg/cluster/manager.go b/pkg/cluster/manager.go index 2e0bb4be49..9e950b579a 100644 --- a/pkg/cluster/manager.go +++ b/pkg/cluster/manager.go @@ -716,7 +716,7 @@ func (m *Manager) Reload(clusterName string, opt operator.Options, skipRestart b tb := task.NewBuilder().UserSSH(inst.GetHost(), inst.GetSSHPort(), base.User, opt.SSHTimeout, opt.SSHType, topo.BaseTopo().GlobalOptions.SSHType) if inst.IsImported() { switch compName := inst.ComponentName(); compName { - case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertManager: + case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: version := m.bindVersion(compName, base.Version) tb.Download(compName, inst.OS(), inst.Arch(), version). CopyComponent(compName, inst.OS(), inst.Arch(), version, "", inst.GetHost(), deployDir) @@ -842,7 +842,7 @@ func (m *Manager) Upgrade(clusterName string, clusterVersion string, opt operato tb := task.NewBuilder() if inst.IsImported() { switch inst.ComponentName() { - case spec.ComponentPrometheus, spec.ComponentGrafana, spec.ComponentAlertManager: + case spec.ComponentPrometheus, spec.ComponentGrafana, spec.ComponentAlertmanager: tb.CopyComponent( inst.ComponentName(), inst.OS(), @@ -1394,7 +1394,7 @@ func (m *Manager) ScaleIn( tb := task.NewBuilder() if instance.IsImported() { switch compName := instance.ComponentName(); compName { - case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertManager: + case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: version := m.bindVersion(compName, base.Version) tb.Download(compName, instance.OS(), instance.Arch(), version). CopyComponent( @@ -2078,7 +2078,7 @@ func buildScaleOutTask( tb := task.NewBuilder() if inst.IsImported() { switch compName := inst.ComponentName(); compName { - case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertManager: + case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: version := m.bindVersion(compName, base.Version) tb.Download(compName, inst.OS(), inst.Arch(), version). CopyComponent(compName, inst.OS(), inst.Arch(), version, "", inst.GetHost(), deployDir) diff --git a/pkg/cluster/operation/destroy.go b/pkg/cluster/operation/destroy.go index 8cf99b89ac..0387b86df3 100644 --- a/pkg/cluster/operation/destroy.go +++ b/pkg/cluster/operation/destroy.go @@ -490,7 +490,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.TiKVComponent{Specification: cluster}).Instances() + instances := (&spec.TiKVComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } @@ -520,7 +520,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.TiFlashComponent{Specification: cluster}).Instances() + instances := (&spec.TiFlashComponent{Topology: cluster}).Instances() id = s.Host + ":" + strconv.Itoa(s.GetMainPort()) if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err @@ -550,7 +550,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.PumpComponent{Specification: cluster}).Instances() + instances := (&spec.PumpComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } @@ -579,7 +579,7 @@ func DestroyClusterTombstone( continue } - instances := (&spec.DrainerComponent{Specification: cluster}).Instances() + instances := (&spec.DrainerComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } diff --git a/pkg/cluster/operation/scale_in.go b/pkg/cluster/operation/scale_in.go index a2003efd76..b084bb4f4c 100644 --- a/pkg/cluster/operation/scale_in.go +++ b/pkg/cluster/operation/scale_in.go @@ -118,7 +118,7 @@ func ScaleInCluster( } var pdEndpoint []string - for _, instance := range (&spec.PDComponent{Specification: cluster}).Instances() { + for _, instance := range (&spec.PDComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { pdEndpoint = append(pdEndpoint, Addr(instance)) } @@ -176,7 +176,7 @@ func ScaleInCluster( // TODO if binlog is switch on, cannot delete all pump servers. var tiflashInstances []spec.Instance - for _, instance := range (&spec.TiFlashComponent{Specification: cluster}).Instances() { + for _, instance := range (&spec.TiFlashComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { tiflashInstances = append(tiflashInstances, instance) } @@ -184,7 +184,7 @@ func ScaleInCluster( if len(tiflashInstances) > 0 { var tikvInstances []spec.Instance - for _, instance := range (&spec.TiKVComponent{Specification: cluster}).Instances() { + for _, instance := range (&spec.TiKVComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { tikvInstances = append(tikvInstances, instance) } diff --git a/pkg/cluster/spec/alertmanager.go b/pkg/cluster/spec/alertmanager.go index 0793ad1387..2005e72ace 100644 --- a/pkg/cluster/spec/alertmanager.go +++ b/pkg/cluster/spec/alertmanager.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/tiup/pkg/meta" ) -// AlertManagerSpec represents the AlertManager topology specification in topology.yaml -type AlertManagerSpec struct { +// AlertmanagerSpec represents the AlertManager topology specification in topology.yaml +type AlertmanagerSpec struct { Host string `yaml:"host"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` @@ -42,31 +42,31 @@ type AlertManagerSpec struct { } // Role returns the component role of the instance -func (s AlertManagerSpec) Role() string { - return ComponentAlertManager +func (s AlertmanagerSpec) Role() string { + return ComponentAlertmanager } // SSH returns the host and SSH port of the instance -func (s AlertManagerSpec) SSH() (string, int) { +func (s AlertmanagerSpec) SSH() (string, int) { return s.Host, s.SSHPort } // GetMainPort returns the main port of the instance -func (s AlertManagerSpec) GetMainPort() int { +func (s AlertmanagerSpec) GetMainPort() int { return s.WebPort } // IsImported returns if the node is imported from TiDB-Ansible -func (s AlertManagerSpec) IsImported() bool { +func (s AlertmanagerSpec) IsImported() bool { return s.Imported } // AlertManagerComponent represents Alertmanager component. -type AlertManagerComponent struct{ *Specification } +type AlertManagerComponent struct{ Topology } // Name implements Component interface. func (c *AlertManagerComponent) Name() string { - return ComponentAlertManager + return ComponentAlertmanager } // Role implements Component interface. @@ -76,8 +76,11 @@ func (c *AlertManagerComponent) Role() string { // Instances implements Component interface. func (c *AlertManagerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Alertmanager)) - for _, s := range c.Alertmanager { + alertmanagers := c.Topology.BaseTopo().Alertmanagers + + ins := make([]Instance, 0, len(alertmanagers)) + + for _, s := range alertmanagers { ins = append(ins, &AlertManagerInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -98,7 +101,7 @@ func (c *AlertManagerComponent) Instances() []Instance { return "-" }, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -107,7 +110,7 @@ func (c *AlertManagerComponent) Instances() []Instance { // AlertManagerInstance represent the alert manager instance type AlertManagerInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -118,16 +121,19 @@ func (i *AlertManagerInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + gOpts := *i.topo.BaseTopo().GlobalOptions + if err := i.BaseInstance.InitConfig(e, gOpts, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + alertmanagers := i.topo.BaseTopo().Alertmanagers + + enableTLS := gOpts.TLSEnabled // Transfer start script - spec := i.InstanceSpec.(AlertManagerSpec) + spec := i.InstanceSpec.(AlertmanagerSpec) cfg := scripts.NewAlertManagerScript(spec.Host, paths.Deploy, paths.Data[0], paths.Log, enableTLS). WithWebPort(spec.WebPort).WithClusterPort(spec.ClusterPort).WithNumaNode(spec.NumaNode). - AppendEndpoints(AlertManagerEndpoints(i.topo.Alertmanager, deployUser, enableTLS)) + AppendEndpoints(AlertManagerEndpoints(alertmanagers, deployUser, enableTLS)) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_alertmanager_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { diff --git a/pkg/cluster/spec/bindversion.go b/pkg/cluster/spec/bindversion.go index 4a7692bb75..e91203cf5c 100644 --- a/pkg/cluster/spec/bindversion.go +++ b/pkg/cluster/spec/bindversion.go @@ -21,7 +21,7 @@ import ( // TiDBComponentVersion maps the TiDB version to the third components binding version func TiDBComponentVersion(comp, version string) string { switch comp { - case ComponentAlertManager: + case ComponentAlertmanager: return "v0.17.0" case ComponentBlackboxExporter: return "v0.12.0" diff --git a/pkg/cluster/spec/cdc.go b/pkg/cluster/spec/cdc.go index 262ffdd4fe..9beac6c93b 100644 --- a/pkg/cluster/spec/cdc.go +++ b/pkg/cluster/spec/cdc.go @@ -62,7 +62,7 @@ func (s CDCSpec) IsImported() bool { } // CDCComponent represents CDC component. -type CDCComponent struct{ *Specification } +type CDCComponent struct{ Topology *Specification } // Name implements Component interface. func (c *CDCComponent) Name() string { @@ -76,8 +76,8 @@ func (c *CDCComponent) Role() string { // Instances implements Component interface. func (c *CDCComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.CDCServers)) - for _, s := range c.CDCServers { + ins := make([]Instance, 0, len(c.Topology.CDCServers)) + for _, s := range c.Topology.CDCServers { s := s ins = append(ins, &CDCInstance{BaseInstance{ InstanceSpec: s, @@ -100,7 +100,7 @@ func (c *CDCComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.Port) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -108,7 +108,7 @@ func (c *CDCComponent) Instances() []Instance { // CDCInstance represent the CDC instance. type CDCInstance struct { BaseInstance - topo *Specification + topo Topology } // ScaleConfig deploy temporary config on scaling @@ -137,11 +137,12 @@ func (i *CDCInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(CDCSpec) cfg := scripts.NewCDCScript( i.GetHost(), @@ -150,7 +151,7 @@ func (i *CDCInstance) InitConfig( enableTLS, spec.GCTTL, spec.TZ, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(i.topo.Endpoints(deployUser)...) + ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(topo.Endpoints(deployUser)...) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_cdc_%s_%d.sh", i.GetHost(), i.GetPort())) @@ -168,5 +169,5 @@ func (i *CDCInstance) InitConfig( specConfig := spec.Config - return i.MergeServerConfig(e, i.topo.ServerConfigs.CDC, specConfig, paths) + return i.MergeServerConfig(e, topo.ServerConfigs.CDC, specConfig, paths) } diff --git a/pkg/cluster/spec/drainer.go b/pkg/cluster/spec/drainer.go index ff0b948713..dc22666ab9 100644 --- a/pkg/cluster/spec/drainer.go +++ b/pkg/cluster/spec/drainer.go @@ -64,7 +64,7 @@ func (s DrainerSpec) IsImported() bool { } // DrainerComponent represents Drainer component. -type DrainerComponent struct{ *Specification } +type DrainerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DrainerComponent) Name() string { @@ -78,8 +78,8 @@ func (c *DrainerComponent) Role() string { // Instances implements Component interface. func (c *DrainerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Drainers)) - for _, s := range c.Drainers { + ins := make([]Instance, 0, len(c.Topology.Drainers)) + for _, s := range c.Topology.Drainers { s := s ins = append(ins, &DrainerInstance{BaseInstance{ InstanceSpec: s, @@ -103,7 +103,7 @@ func (c *DrainerComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.Port) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -111,7 +111,7 @@ func (c *DrainerComponent) Instances() []Instance { // DrainerInstance represent the Drainer instance. type DrainerInstance struct { BaseInstance - topo *Specification + topo Topology } // ScaleConfig deploy temporary config on scaling @@ -140,11 +140,12 @@ func (i *DrainerInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(DrainerSpec) cfg := scripts.NewDrainerScript( i.GetHost()+":"+strconv.Itoa(i.GetPort()), @@ -152,7 +153,7 @@ func (i *DrainerInstance) InitConfig( paths.Deploy, paths.Data[0], paths.Log, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(i.topo.Endpoints(deployUser)...) + ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(topo.Endpoints(deployUser)...) cfg.WithCommitTs(spec.CommitTS) @@ -170,7 +171,7 @@ func (i *DrainerInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.Drainer + globalConfig := topo.ServerConfigs.Drainer // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/grafana.go b/pkg/cluster/spec/grafana.go index eff06eb9a8..79dd98d69f 100644 --- a/pkg/cluster/spec/grafana.go +++ b/pkg/cluster/spec/grafana.go @@ -18,6 +18,7 @@ import ( "fmt" "path" "path/filepath" + "reflect" "strings" "github.com/pingcap/errors" @@ -61,7 +62,7 @@ func (s GrafanaSpec) IsImported() bool { } // GrafanaComponent represents Grafana component. -type GrafanaComponent struct{ *Specification } +type GrafanaComponent struct{ Topology } // Name implements Component interface. func (c *GrafanaComponent) Name() string { @@ -75,8 +76,10 @@ func (c *GrafanaComponent) Role() string { // Instances implements Component interface. func (c *GrafanaComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Grafana)) - for _, s := range c.Grafana { + servers := c.BaseTopo().Grafanas + ins := make([]Instance, 0, len(servers)) + + for _, s := range servers { ins = append(ins, &GrafanaInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -95,7 +98,7 @@ func (c *GrafanaComponent) Instances() []Instance { return "-" }, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -104,7 +107,7 @@ func (c *GrafanaComponent) Instances() []Instance { // GrafanaInstance represent the grafana instance type GrafanaInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -115,7 +118,8 @@ func (i *GrafanaInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + gOpts := *i.topo.BaseTopo().GlobalOptions + if err := i.BaseInstance.InitConfig(e, gOpts, deployUser, paths); err != nil { return err } @@ -160,13 +164,22 @@ func (i *GrafanaInstance) InitConfig( return err } + topo := reflect.ValueOf(i.topo) + if topo.Kind() == reflect.Ptr { + topo = topo.Elem() + } + val := topo.FieldByName("Monitors") + if (val == reflect.Value{}) { + return errors.Errorf("field Monitors not found in topology: %v", topo) + } + monitors := val.Interface().([]PrometheusSpec) // transfer datasource.yml - if len(i.topo.Monitors) == 0 { + if len(monitors) == 0 { return errors.New("no prometheus found in topology") } fp = filepath.Join(paths.Cache, fmt.Sprintf("datasource_%s.yml", i.GetHost())) - if err := config.NewDatasourceConfig(clusterName, i.topo.Monitors[0].Host). - WithPort(uint64(i.topo.Monitors[0].Port)). + if err := config.NewDatasourceConfig(clusterName, monitors[0].Host). + WithPort(uint64(monitors[0].Port)). ConfigToFile(fp); err != nil { return err } @@ -221,7 +234,6 @@ func (i *GrafanaInstance) ScaleConfig( ) error { s := i.topo defer func() { i.topo = s }() - cluster := mustBeClusterTopo(topo) - i.topo = cluster.Merge(i.topo) + i.topo = topo.Merge(i.topo) return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) } diff --git a/pkg/cluster/spec/grafana_test.go b/pkg/cluster/spec/grafana_test.go index 1dff7ac73b..1222dcd3bd 100644 --- a/pkg/cluster/spec/grafana_test.go +++ b/pkg/cluster/spec/grafana_test.go @@ -35,7 +35,7 @@ func TestLocalDashboards(t *testing.T) { assert.Nil(t, err) topo := new(Specification) - topo.Grafana = append(topo.Grafana, GrafanaSpec{ + topo.Grafanas = append(topo.Grafanas, GrafanaSpec{ Host: "127.0.0.1", Port: 3000, DashboardDir: localDir, @@ -53,7 +53,7 @@ func TestLocalDashboards(t *testing.T) { assert.Nil(t, err) clusterName := "tiup-test-cluster-" + uuid.New().String() - err = grafanaInstance.initDashboards(e, topo.Grafana[0], meta.DirPaths{Deploy: deployDir}, clusterName) + err = grafanaInstance.initDashboards(e, topo.Grafanas[0], meta.DirPaths{Deploy: deployDir}, clusterName) assert.Nil(t, err) assert.FileExists(t, path.Join(deployDir, "dashboards", "tidb.json")) diff --git a/pkg/cluster/spec/instance.go b/pkg/cluster/spec/instance.go index 40b2534c3d..85b6b2113d 100644 --- a/pkg/cluster/spec/instance.go +++ b/pkg/cluster/spec/instance.go @@ -44,7 +44,7 @@ const ( ComponentCDC = "cdc" ComponentTiSpark = "tispark" ComponentSpark = "spark" - ComponentAlertManager = "alertmanager" + ComponentAlertmanager = "alertmanager" ComponentPrometheus = "prometheus" ComponentPushwaygate = "pushgateway" ComponentBlackboxExporter = "blackbox_exporter" diff --git a/pkg/cluster/spec/pd.go b/pkg/cluster/spec/pd.go index edb9cf1fe1..f95506393f 100644 --- a/pkg/cluster/spec/pd.go +++ b/pkg/cluster/spec/pd.go @@ -106,7 +106,7 @@ func (s PDSpec) IsImported() bool { } // PDComponent represents PD component. -type PDComponent struct{ *Specification } +type PDComponent struct{ Topology *Specification } // Name implements Component interface. func (c *PDComponent) Name() string { @@ -120,8 +120,8 @@ func (c *PDComponent) Role() string { // Instances implements Component interface. func (c *PDComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.PDServers)) - for _, s := range c.PDServers { + ins := make([]Instance, 0, len(c.Topology.PDServers)) + for _, s := range c.Topology.PDServers { s := s ins = append(ins, &PDInstance{ Name: s.Name, @@ -143,7 +143,7 @@ func (c *PDComponent) Instances() []Instance { }, StatusFn: s.Status, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -153,7 +153,7 @@ func (c *PDComponent) Instances() []Instance { type PDInstance struct { Name string BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -164,11 +164,12 @@ func (i *PDInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(PDSpec) cfg := scripts.NewPDScript( spec.Name, @@ -178,7 +179,7 @@ func (i *PDInstance) InitConfig( paths.Log, ).WithClientPort(spec.ClientPort). WithPeerPort(spec.PeerPort). - AppendEndpoints(i.topo.Endpoints(deployUser)...). + AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) scheme := "http" @@ -200,15 +201,15 @@ func (i *PDInstance) InitConfig( } // Set the PD metrics storage address - if semver.Compare(clusterVersion, "v3.1.0") >= 0 && len(i.topo.Monitors) > 0 { + if semver.Compare(clusterVersion, "v3.1.0") >= 0 && len(topo.Monitors) > 0 { if spec.Config == nil { spec.Config = map[string]interface{}{} } - prom := i.topo.Monitors[0] + prom := topo.Monitors[0] spec.Config["pd-server.metric-storage"] = fmt.Sprintf("%s://%s:%d", scheme, prom.Host, prom.Port) } - globalConfig := i.topo.ServerConfigs.PD + globalConfig := topo.ServerConfigs.PD // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/prometheus.go b/pkg/cluster/spec/prometheus.go index aaa79ffb3b..6274ddaad9 100644 --- a/pkg/cluster/spec/prometheus.go +++ b/pkg/cluster/spec/prometheus.go @@ -18,6 +18,7 @@ import ( "fmt" "path" "path/filepath" + "reflect" "strings" "github.com/pingcap/errors" @@ -66,7 +67,7 @@ func (s PrometheusSpec) IsImported() bool { } // MonitorComponent represents Monitor component. -type MonitorComponent struct{ *Specification } +type MonitorComponent struct{ Topology } // Name implements Component interface. func (c *MonitorComponent) Name() string { @@ -80,8 +81,10 @@ func (c *MonitorComponent) Role() string { // Instances implements Component interface. func (c *MonitorComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.Monitors)) - for _, s := range c.Monitors { + servers := c.BaseTopo().Monitors + ins := make([]Instance, 0, len(servers)) + + for _, s := range servers { ins = append(ins, &MonitorInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), @@ -99,7 +102,7 @@ func (c *MonitorComponent) Instances() []Instance { StatusFn: func(_ *tls.Config, _ ...string) string { return "-" }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -107,7 +110,7 @@ func (c *MonitorComponent) Instances() []Instance { // MonitorInstance represent the monitor instance type MonitorInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -118,11 +121,12 @@ func (i *MonitorInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + gOpts := *i.topo.BaseTopo().GlobalOptions + if err := i.BaseInstance.InitConfig(e, gOpts, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := gOpts.TLSEnabled // transfer run script spec := i.InstanceSpec.(PrometheusSpec) cfg := scripts.NewPrometheusScript( @@ -147,54 +151,103 @@ func (i *MonitorInstance) InitConfig( return err } - topo := i.topo + topoHasField := func(field string) (reflect.Value, bool) { + return findSliceField(i.topo, field) + } + monitoredOptions := i.topo.GetMonitoredOptions() // transfer config fp = filepath.Join(paths.Cache, fmt.Sprintf("prometheus_%s_%d.yml", i.GetHost(), i.GetPort())) cfig := config.NewPrometheusConfig(clusterName, enableTLS) - cfig.AddBlackbox(i.GetHost(), uint64(topo.MonitoredOptions.BlackboxExporterPort)) + if monitoredOptions != nil { + cfig.AddBlackbox(i.GetHost(), uint64(monitoredOptions.BlackboxExporterPort)) + } uniqueHosts := set.NewStringSet() - for _, pd := range topo.PDServers { - uniqueHosts.Insert(pd.Host) - cfig.AddPD(pd.Host, uint64(pd.ClientPort)) - } - for _, kv := range topo.TiKVServers { - uniqueHosts.Insert(kv.Host) - cfig.AddTiKV(kv.Host, uint64(kv.StatusPort)) - } - for _, db := range topo.TiDBServers { - uniqueHosts.Insert(db.Host) - cfig.AddTiDB(db.Host, uint64(db.StatusPort)) - } - for _, flash := range topo.TiFlashServers { - uniqueHosts.Insert(flash.Host) - cfig.AddTiFlashLearner(flash.Host, uint64(flash.FlashProxyStatusPort)) - cfig.AddTiFlash(flash.Host, uint64(flash.StatusPort)) - } - for _, pump := range topo.PumpServers { - uniqueHosts.Insert(pump.Host) - cfig.AddPump(pump.Host, uint64(pump.Port)) - } - for _, drainer := range topo.Drainers { - uniqueHosts.Insert(drainer.Host) - cfig.AddDrainer(drainer.Host, uint64(drainer.Port)) - } - for _, cdc := range topo.CDCServers { - uniqueHosts.Insert(cdc.Host) - cfig.AddCDC(cdc.Host, uint64(cdc.Port)) - } - for _, grafana := range topo.Grafana { - uniqueHosts.Insert(grafana.Host) - cfig.AddGrafana(grafana.Host, uint64(grafana.Port)) - } - for _, alertmanager := range topo.Alertmanager { - uniqueHosts.Insert(alertmanager.Host) - cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) - } - for host := range uniqueHosts { - cfig.AddNodeExpoertor(host, uint64(topo.MonitoredOptions.NodeExporterPort)) - cfig.AddBlackboxExporter(host, uint64(topo.MonitoredOptions.BlackboxExporterPort)) - cfig.AddMonitoredServer(host) + + if servers, found := topoHasField("PDServers"); found { + for i := 0; i < servers.Len(); i++ { + pd := servers.Index(i).Interface().(PDSpec) + uniqueHosts.Insert(pd.Host) + cfig.AddPD(pd.Host, uint64(pd.ClientPort)) + } + } + if servers, found := topoHasField("TiKVServers"); found { + for i := 0; i < servers.Len(); i++ { + kv := servers.Index(i).Interface().(TiKVSpec) + uniqueHosts.Insert(kv.Host) + cfig.AddTiKV(kv.Host, uint64(kv.StatusPort)) + } + } + if servers, found := topoHasField("TiDBServers"); found { + for i := 0; i < servers.Len(); i++ { + db := servers.Index(i).Interface().(TiDBSpec) + uniqueHosts.Insert(db.Host) + cfig.AddTiDB(db.Host, uint64(db.StatusPort)) + } + } + if servers, found := topoHasField("TiFlashServers"); found { + for i := 0; i < servers.Len(); i++ { + flash := servers.Index(i).Interface().(TiFlashSpec) + cfig.AddTiFlashLearner(flash.Host, uint64(flash.FlashProxyStatusPort)) + cfig.AddTiFlash(flash.Host, uint64(flash.StatusPort)) + } + } + if servers, found := topoHasField("PumpServers"); found { + for i := 0; i < servers.Len(); i++ { + pump := servers.Index(i).Interface().(PumpSpec) + uniqueHosts.Insert(pump.Host) + cfig.AddPump(pump.Host, uint64(pump.Port)) + } + } + if servers, found := topoHasField("Trainers"); found { + for i := 0; i < servers.Len(); i++ { + drainer := servers.Index(i).Interface().(DrainerSpec) + uniqueHosts.Insert(drainer.Host) + cfig.AddDrainer(drainer.Host, uint64(drainer.Port)) + } + } + if servers, found := topoHasField("CDCServers"); found { + for i := 0; i < servers.Len(); i++ { + cdc := servers.Index(i).Interface().(CDCSpec) + uniqueHosts.Insert(cdc.Host) + cfig.AddCDC(cdc.Host, uint64(cdc.Port)) + } + } + if servers, found := topoHasField("Grafana"); found { + for i := 0; i < servers.Len(); i++ { + grafana := servers.Index(i).Interface().(GrafanaSpec) + uniqueHosts.Insert(grafana.Host) + cfig.AddGrafana(grafana.Host, uint64(grafana.Port)) + } + } + if servers, found := topoHasField("Alertmanager"); found { + for i := 0; i < servers.Len(); i++ { + alertmanager := servers.Index(i).Interface().(AlertmanagerSpec) + uniqueHosts.Insert(alertmanager.Host) + cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) + } + } + if servers, found := topoHasField("Masters"); found { + for i := 0; i < servers.Len(); i++ { + master := servers.Index(i) + host, port := master.FieldByName("Host").String(), master.FieldByName("Port").Int() + cfig.AddDMMaster(host, uint64(port)) + } + } + + if servers, found := topoHasField("Workers"); found { + for i := 0; i < servers.Len(); i++ { + master := servers.Index(i) + host, port := master.FieldByName("Host").String(), master.FieldByName("Port").Int() + cfig.AddDMWorker(host, uint64(port)) + } + } + if monitoredOptions != nil { + for host := range uniqueHosts { + cfig.AddNodeExpoertor(host, uint64(monitoredOptions.NodeExporterPort)) + cfig.AddBlackboxExporter(host, uint64(monitoredOptions.BlackboxExporterPort)) + cfig.AddMonitoredServer(host) + } } if err := i.initRules(e, spec, paths); err != nil { diff --git a/pkg/cluster/spec/pump.go b/pkg/cluster/spec/pump.go index b46255b862..0a7e82cffc 100644 --- a/pkg/cluster/spec/pump.go +++ b/pkg/cluster/spec/pump.go @@ -63,7 +63,7 @@ func (s PumpSpec) IsImported() bool { } // PumpComponent represents Pump component. -type PumpComponent struct{ *Specification } +type PumpComponent struct{ Topology *Specification } // Name implements Component interface. func (c *PumpComponent) Name() string { @@ -77,8 +77,8 @@ func (c *PumpComponent) Role() string { // Instances implements Component interface. func (c *PumpComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.PumpServers)) - for _, s := range c.PumpServers { + ins := make([]Instance, 0, len(c.Topology.PumpServers)) + for _, s := range c.Topology.PumpServers { s := s ins = append(ins, &PumpInstance{BaseInstance{ InstanceSpec: s, @@ -102,7 +102,7 @@ func (c *PumpComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.Port) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -110,7 +110,7 @@ func (c *PumpComponent) Instances() []Instance { // PumpInstance represent the Pump instance. type PumpInstance struct { BaseInstance - topo *Specification + topo Topology } // ScaleConfig deploy temporary config on scaling @@ -139,11 +139,12 @@ func (i *PumpInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(PumpSpec) cfg := scripts.NewPumpScript( i.GetHost()+":"+strconv.Itoa(i.GetPort()), @@ -151,7 +152,7 @@ func (i *PumpInstance) InitConfig( paths.Deploy, paths.Data[0], paths.Log, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(i.topo.Endpoints(deployUser)...) + ).WithPort(spec.Port).WithNumaNode(spec.NumaNode).AppendEndpoints(topo.Endpoints(deployUser)...) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pump_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -166,7 +167,7 @@ func (i *PumpInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.Pump + globalConfig := topo.ServerConfigs.Pump // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/spec.go b/pkg/cluster/spec/spec.go index ec7452d84a..d0a700b33c 100644 --- a/pkg/cluster/spec/spec.go +++ b/pkg/cluster/spec/spec.go @@ -104,8 +104,8 @@ type ( TiSparkMasters []TiSparkMasterSpec `yaml:"tispark_masters,omitempty"` TiSparkWorkers []TiSparkWorkerSpec `yaml:"tispark_workers,omitempty"` Monitors []PrometheusSpec `yaml:"monitoring_servers"` - Grafana []GrafanaSpec `yaml:"grafana_servers,omitempty"` - Alertmanager []AlertManagerSpec `yaml:"alertmanager_servers,omitempty"` + Grafanas []GrafanaSpec `yaml:"grafana_servers,omitempty"` + Alertmanagers []AlertmanagerSpec `yaml:"alertmanager_servers,omitempty"` } ) @@ -114,6 +114,10 @@ type BaseTopo struct { GlobalOptions *GlobalOptions MonitoredOptions *MonitoredOptions MasterList []string + + Monitors []PrometheusSpec + Grafanas []GrafanaSpec + Alertmanagers []AlertmanagerSpec } // Topology represents specification of the cluster. @@ -132,6 +136,7 @@ type Topology interface { // count how many time a path is used by instances in cluster CountDir(host string, dir string) int TLSConfig(dir string) (*tls.Config, error) + Merge(that Topology) Topology ScaleOutTopology } @@ -206,6 +211,9 @@ func (s *Specification) BaseTopo() *BaseTopo { GlobalOptions: &s.GlobalOptions, MonitoredOptions: s.GetMonitoredOptions(), MasterList: s.GetPDList(), + Monitors: s.Monitors, + Grafanas: s.Grafanas, + Alertmanagers: s.Alertmanagers, } } @@ -313,6 +321,22 @@ func findField(v reflect.Value, fieldName string) (int, bool) { return -1, false } +func findSliceField(v Topology, fieldName string) (reflect.Value, bool) { + topo := reflect.ValueOf(v) + if topo.Kind() == reflect.Ptr { + topo = topo.Elem() + } + + j, found := findField(topo, fieldName) + if found { + val := topo.Field(j) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + return val, true + } + } + return reflect.Value{}, false +} + // GetPDList returns a list of PD API hosts of the current cluster func (s *Specification) GetPDList() []string { var pdList []string @@ -333,23 +357,24 @@ func (s *Specification) GetEtcdClient(tlsCfg *tls.Config) (*clientv3.Client, err } // Merge returns a new Specification which sum old ones -func (s *Specification) Merge(that *Specification) *Specification { +func (s *Specification) Merge(that Topology) Topology { + spec := that.(*Specification) return &Specification{ GlobalOptions: s.GlobalOptions, MonitoredOptions: s.MonitoredOptions, ServerConfigs: s.ServerConfigs, - TiDBServers: append(s.TiDBServers, that.TiDBServers...), - TiKVServers: append(s.TiKVServers, that.TiKVServers...), - PDServers: append(s.PDServers, that.PDServers...), - TiFlashServers: append(s.TiFlashServers, that.TiFlashServers...), - PumpServers: append(s.PumpServers, that.PumpServers...), - Drainers: append(s.Drainers, that.Drainers...), - CDCServers: append(s.CDCServers, that.CDCServers...), - TiSparkMasters: append(s.TiSparkMasters, that.TiSparkMasters...), - TiSparkWorkers: append(s.TiSparkWorkers, that.TiSparkWorkers...), - Monitors: append(s.Monitors, that.Monitors...), - Grafana: append(s.Grafana, that.Grafana...), - Alertmanager: append(s.Alertmanager, that.Alertmanager...), + TiDBServers: append(s.TiDBServers, spec.TiDBServers...), + TiKVServers: append(s.TiKVServers, spec.TiKVServers...), + PDServers: append(s.PDServers, spec.PDServers...), + TiFlashServers: append(s.TiFlashServers, spec.TiFlashServers...), + PumpServers: append(s.PumpServers, spec.PumpServers...), + Drainers: append(s.Drainers, spec.Drainers...), + CDCServers: append(s.CDCServers, spec.CDCServers...), + TiSparkMasters: append(s.TiSparkMasters, spec.TiSparkMasters...), + TiSparkWorkers: append(s.TiSparkWorkers, spec.TiSparkWorkers...), + Monitors: append(s.Monitors, spec.Monitors...), + Grafanas: append(s.Grafanas, spec.Grafanas...), + Alertmanagers: append(s.Alertmanagers, spec.Alertmanagers...), } } @@ -639,7 +664,7 @@ func (s *Specification) Endpoints(user string) []*scripts.PDScript { } // AlertManagerEndpoints returns the AlertManager endpoints configurations -func AlertManagerEndpoints(alertmanager []AlertManagerSpec, user string, enableTLS bool) []*scripts.AlertManagerScript { +func AlertManagerEndpoints(alertmanager []AlertmanagerSpec, user string, enableTLS bool) []*scripts.AlertManagerScript { var ends []*scripts.AlertManagerScript for _, spec := range alertmanager { deployDir := Abs(user, spec.DeployDir) diff --git a/pkg/cluster/spec/spec_manager_test.go b/pkg/cluster/spec/spec_manager_test.go index 6f52ef995c..1c8d958810 100644 --- a/pkg/cluster/spec/spec_manager_test.go +++ b/pkg/cluster/spec/spec_manager_test.go @@ -45,6 +45,10 @@ func (m *TestMetadata) GetBaseMeta() *BaseMeta { return &m.BaseMeta } +func (t *TestTopology) Merge(topo Topology) Topology { + panic("not support") +} + func (m *TestMetadata) SetTopology(topo Topology) { testTopo, ok := topo.(*TestTopology) if !ok { @@ -97,6 +101,10 @@ func (t *TestTopology) GetMonitoredOptions() *MonitoredOptions { return nil } +func (t *TestTopology) GetGlobalOptions() GlobalOptions { + return GlobalOptions{} +} + func (t *TestTopology) CountDir(host string, dir string) int { return 0 } diff --git a/pkg/cluster/spec/tidb.go b/pkg/cluster/spec/tidb.go index 263f140685..64168d456c 100644 --- a/pkg/cluster/spec/tidb.go +++ b/pkg/cluster/spec/tidb.go @@ -62,7 +62,7 @@ func (s TiDBSpec) IsImported() bool { } // TiDBComponent represents TiDB component. -type TiDBComponent struct{ *Specification } +type TiDBComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiDBComponent) Name() string { @@ -76,8 +76,8 @@ func (c *TiDBComponent) Role() string { // Instances implements Component interface. func (c *TiDBComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiDBServers)) - for _, s := range c.TiDBServers { + ins := make([]Instance, 0, len(c.Topology.TiDBServers)) + for _, s := range c.Topology.TiDBServers { s := s ins = append(ins, &TiDBInstance{BaseInstance{ InstanceSpec: s, @@ -102,7 +102,7 @@ func (c *TiDBComponent) Instances() []Instance { url := fmt.Sprintf("%s://%s:%d/status", scheme, s.Host, s.StatusPort) return statusByURL(url, tlsCfg) }, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -110,7 +110,7 @@ func (c *TiDBComponent) Instances() []Instance { // TiDBInstance represent the TiDB instance type TiDBInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -121,11 +121,12 @@ func (i *TiDBInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(TiDBSpec) cfg := scripts.NewTiDBScript( i.GetHost(), @@ -133,7 +134,7 @@ func (i *TiDBInstance) InitConfig( paths.Log, ).WithPort(spec.Port).WithNumaNode(spec.NumaNode). WithStatusPort(spec.StatusPort). - AppendEndpoints(i.topo.Endpoints(deployUser)...). + AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tidb_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -148,7 +149,7 @@ func (i *TiDBInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.TiDB + globalConfig := topo.ServerConfigs.TiDB // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/tiflash.go b/pkg/cluster/spec/tiflash.go index 9066999beb..0decb8c4a0 100644 --- a/pkg/cluster/spec/tiflash.go +++ b/pkg/cluster/spec/tiflash.go @@ -88,7 +88,7 @@ func (s TiFlashSpec) IsImported() bool { } // TiFlashComponent represents TiFlash component. -type TiFlashComponent struct{ *Specification } +type TiFlashComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiFlashComponent) Name() string { @@ -102,8 +102,8 @@ func (c *TiFlashComponent) Role() string { // Instances implements Component interface. func (c *TiFlashComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiFlashServers)) - for _, s := range c.TiFlashServers { + ins := make([]Instance, 0, len(c.Topology.TiFlashServers)) + for _, s := range c.Topology.TiFlashServers { ins = append(ins, &TiFlashInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), @@ -124,7 +124,7 @@ func (c *TiFlashComponent) Instances() []Instance { s.DataDir, }, StatusFn: s.Status, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -132,7 +132,7 @@ func (c *TiFlashComponent) Instances() []Instance { // TiFlashInstance represent the TiFlash instance type TiFlashInstance struct { BaseInstance - topo *Specification + topo Topology } // GetServicePort returns the service port of TiFlash @@ -146,7 +146,7 @@ func (i *TiFlashInstance) checkIncorrectKey(key string) error { if dir, ok := i.InstanceSpec.(TiFlashSpec).Config[key].(string); ok && dir != "" { return fmt.Errorf(errMsg, key, "host") } - if dir, ok := i.topo.ServerConfigs.TiFlash[key].(string); ok && dir != "" { + if dir, ok := i.topo.(*Specification).ServerConfigs.TiFlash[key].(string); ok && dir != "" { return fmt.Errorf(errMsg, key, "server_configs") } return nil @@ -298,14 +298,15 @@ func (i *TiFlashInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } spec := i.InstanceSpec.(TiFlashSpec) tidbStatusAddrs := []string{} - for _, tidb := range i.topo.TiDBServers { + for _, tidb := range topo.TiDBServers { tidbStatusAddrs = append(tidbStatusAddrs, fmt.Sprintf("%s:%d", tidb.Host, uint64(tidb.StatusPort))) } tidbStatusStr := strings.Join(tidbStatusAddrs, ",") @@ -327,7 +328,7 @@ func (i *TiFlashInstance) InitConfig( WithStatusPort(spec.StatusPort). WithTmpDir(spec.TmpDir). WithNumaNode(spec.NumaNode). - AppendEndpoints(i.topo.Endpoints(deployUser)...) + AppendEndpoints(topo.Endpoints(deployUser)...) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tiflash_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -343,7 +344,7 @@ func (i *TiFlashInstance) InitConfig( return err } - conf, err := i.InitTiFlashLearnerConfig(cfg, clusterVersion, i.topo.ServerConfigs.TiFlashLearner) + conf, err := i.InitTiFlashLearnerConfig(cfg, clusterVersion, topo.ServerConfigs.TiFlashLearner) if err != nil { return err } @@ -375,7 +376,7 @@ func (i *TiFlashInstance) InitConfig( return err } - conf, err = i.InitTiFlashConfig(cfg, i.topo.ServerConfigs.TiFlash) + conf, err = i.InitTiFlashConfig(cfg, topo.ServerConfigs.TiFlash) if err != nil { return err } @@ -428,7 +429,7 @@ type replicateConfig struct { func (i *TiFlashInstance) getEndpoints() []string { var endpoints []string - for _, pd := range i.topo.PDServers { + for _, pd := range i.topo.(*Specification).PDServers { endpoints = append(endpoints, fmt.Sprintf("%s:%d", pd.Host, uint64(pd.ClientPort))) } return endpoints diff --git a/pkg/cluster/spec/tikv.go b/pkg/cluster/spec/tikv.go index 71c3b676db..e11251aa8f 100644 --- a/pkg/cluster/spec/tikv.go +++ b/pkg/cluster/spec/tikv.go @@ -143,9 +143,7 @@ func (s TiKVSpec) Labels() (map[string]string, error) { } // TiKVComponent represents TiKV component. -type TiKVComponent struct { - *Specification -} +type TiKVComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiKVComponent) Name() string { @@ -159,8 +157,8 @@ func (c *TiKVComponent) Role() string { // Instances implements Component interface. func (c *TiKVComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiKVServers)) - for _, s := range c.TiKVServers { + ins := make([]Instance, 0, len(c.Topology.TiKVServers)) + for _, s := range c.Topology.TiKVServers { s := s ins = append(ins, &TiKVInstance{BaseInstance{ InstanceSpec: s, @@ -179,7 +177,7 @@ func (c *TiKVComponent) Instances() []Instance { s.DataDir, }, StatusFn: s.Status, - }, c.Specification}) + }, c.Topology}) } return ins } @@ -187,7 +185,7 @@ func (c *TiKVComponent) Instances() []Instance { // TiKVInstance represent the TiDB instance type TiKVInstance struct { BaseInstance - topo *Specification + topo Topology } // InitConfig implement Instance interface @@ -198,11 +196,12 @@ func (i *TiKVInstance) InitConfig( deployUser string, paths meta.DirPaths, ) error { - if err := i.BaseInstance.InitConfig(e, i.topo.GlobalOptions, deployUser, paths); err != nil { + topo := i.topo.(*Specification) + if err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil { return err } - enableTLS := i.topo.GlobalOptions.TLSEnabled + enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(TiKVSpec) cfg := scripts.NewTiKVScript( i.GetHost(), @@ -212,7 +211,7 @@ func (i *TiKVInstance) InitConfig( ).WithPort(spec.Port). WithNumaNode(spec.NumaNode). WithStatusPort(spec.StatusPort). - AppendEndpoints(i.topo.Endpoints(deployUser)...). + AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tikv_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -228,7 +227,7 @@ func (i *TiKVInstance) InitConfig( return err } - globalConfig := i.topo.ServerConfigs.TiKV + globalConfig := topo.ServerConfigs.TiKV // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( diff --git a/pkg/cluster/spec/tispark.go b/pkg/cluster/spec/tispark.go index 1c546dfdce..49dee7d893 100644 --- a/pkg/cluster/spec/tispark.go +++ b/pkg/cluster/spec/tispark.go @@ -121,7 +121,7 @@ func (s TiSparkWorkerSpec) Status(tlsCfg *tls.Config, pdList ...string) string { } // TiSparkMasterComponent represents TiSpark master component. -type TiSparkMasterComponent struct{ *Specification } +type TiSparkMasterComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiSparkMasterComponent) Name() string { @@ -135,8 +135,8 @@ func (c *TiSparkMasterComponent) Role() string { // Instances implements Component interface. func (c *TiSparkMasterComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiSparkMasters)) - for _, s := range c.TiSparkMasters { + ins := make([]Instance, 0, len(c.Topology.TiSparkMasters)) + for _, s := range c.Topology.TiSparkMasters { ins = append(ins, &TiSparkMasterInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -154,7 +154,7 @@ func (c *TiSparkMasterComponent) Instances() []Instance { }, StatusFn: s.Status, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -163,7 +163,7 @@ func (c *TiSparkMasterComponent) Instances() []Instance { // TiSparkMasterInstance represent the TiSpark master instance type TiSparkMasterInstance struct { BaseInstance - topo *Specification + topo Topology } // GetCustomFields get custom spark configs of the instance @@ -201,6 +201,7 @@ func (i *TiSparkMasterInstance) InitConfig( comp := i.Role() host := i.GetHost() port := i.GetPort() + topo := i.topo.(*Specification) sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) systemCfg := system.NewTiSparkConfig(comp, deployUser, paths.Deploy, i.GetJavaHome()) @@ -219,11 +220,11 @@ func (i *TiSparkMasterInstance) InitConfig( // transfer default config pdList := make([]string, 0) - for _, pd := range i.topo.Endpoints(deployUser) { + for _, pd := range topo.Endpoints(deployUser) { pdList = append(pdList, fmt.Sprintf("%s:%d", pd.IP, pd.ClientPort)) } masterList := make([]string, 0) - for _, master := range i.topo.TiSparkMasters { + for _, master := range topo.TiSparkMasters { masterList = append(masterList, fmt.Sprintf("%s:%d", master.Host, master.Port)) } @@ -285,7 +286,7 @@ func (i *TiSparkMasterInstance) ScaleConfig( } // TiSparkWorkerComponent represents TiSpark slave component. -type TiSparkWorkerComponent struct{ *Specification } +type TiSparkWorkerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiSparkWorkerComponent) Name() string { @@ -299,8 +300,8 @@ func (c *TiSparkWorkerComponent) Role() string { // Instances implements Component interface. func (c *TiSparkWorkerComponent) Instances() []Instance { - ins := make([]Instance, 0, len(c.TiSparkWorkers)) - for _, s := range c.TiSparkWorkers { + ins := make([]Instance, 0, len(c.Topology.TiSparkWorkers)) + for _, s := range c.Topology.TiSparkWorkers { ins = append(ins, &TiSparkWorkerInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, @@ -318,7 +319,7 @@ func (c *TiSparkWorkerComponent) Instances() []Instance { }, StatusFn: s.Status, }, - topo: c.Specification, + topo: c.Topology, }) } return ins @@ -327,7 +328,7 @@ func (c *TiSparkWorkerComponent) Instances() []Instance { // TiSparkWorkerInstance represent the TiSpark slave instance type TiSparkWorkerInstance struct { BaseInstance - topo *Specification + topo Topology } // GetJavaHome returns the java_home value in spec @@ -347,6 +348,7 @@ func (i *TiSparkWorkerInstance) InitConfig( comp := i.Role() host := i.GetHost() port := i.GetPort() + topo := i.topo.(*Specification) sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) systemCfg := system.NewTiSparkConfig(comp, deployUser, paths.Deploy, i.GetJavaHome()) @@ -365,16 +367,16 @@ func (i *TiSparkWorkerInstance) InitConfig( // transfer default config pdList := make([]string, 0) - for _, pd := range i.topo.Endpoints(deployUser) { + for _, pd := range topo.Endpoints(deployUser) { pdList = append(pdList, fmt.Sprintf("%s:%d", pd.IP, pd.ClientPort)) } masterList := make([]string, 0) - for _, master := range i.topo.TiSparkMasters { + for _, master := range topo.TiSparkMasters { masterList = append(masterList, fmt.Sprintf("%s:%d", master.Host, master.Port)) } cfg := config.NewTiSparkConfig(pdList).WithMasters(strings.Join(masterList, ",")). - WithCustomFields(i.topo.TiSparkMasters[0].SparkConfigs) + WithCustomFields(topo.TiSparkMasters[0].SparkConfigs) // transfer spark-defaults.conf fp := filepath.Join(paths.Cache, fmt.Sprintf("spark-defaults-%s-%d.conf", host, port)) if err := cfg.ConfigToFile(fp); err != nil { @@ -387,10 +389,10 @@ func (i *TiSparkWorkerInstance) InitConfig( env := scripts.NewTiSparkEnv(host). WithLocalIP(i.GetListenHost()). - WithMaster(i.topo.TiSparkMasters[0].Host). - WithMasterPorts(i.topo.TiSparkMasters[0].Port, i.topo.TiSparkMasters[0].WebPort). + WithMaster(topo.TiSparkMasters[0].Host). + WithMasterPorts(topo.TiSparkMasters[0].Port, topo.TiSparkMasters[0].WebPort). WithWorkerPorts(i.Ports[0], i.Ports[1]). - WithCustomEnv(i.topo.TiSparkMasters[0].SparkEnvs) + WithCustomEnv(topo.TiSparkMasters[0].SparkEnvs) // transfer spark-env.sh file fp = filepath.Join(paths.Cache, fmt.Sprintf("spark-env-%s-%d.sh", host, port)) if err := env.ScriptToFile(fp); err != nil { @@ -440,7 +442,6 @@ func (i *TiSparkWorkerInstance) ScaleConfig( ) error { s := i.topo defer func() { i.topo = s }() - cluster := mustBeClusterTopo(topo) - i.topo = cluster.Merge(i.topo) + i.topo = topo.Merge(i.topo) return i.InitConfig(e, clusterName, clusterVersion, deployUser, paths) } diff --git a/pkg/cluster/spec/validate.go b/pkg/cluster/spec/validate.go index 2f423df854..fdc1461fe5 100644 --- a/pkg/cluster/spec/validate.go +++ b/pkg/cluster/spec/validate.go @@ -785,7 +785,7 @@ func (s *Specification) validateTLSEnabled() error { ComponentDrainer, ComponentCDC, ComponentPrometheus, - ComponentAlertManager, + ComponentAlertmanager, ComponentGrafana: default: return errors.Errorf("component %s is not supported in TLS enabled cluster", c.Name()) diff --git a/pkg/cluster/task/update_meta.go b/pkg/cluster/task/update_meta.go index 798c340949..8e6c1f5caa 100644 --- a/pkg/cluster/task/update_meta.go +++ b/pkg/cluster/task/update_meta.go @@ -41,77 +41,77 @@ func (u *UpdateMeta) Execute(ctx *Context) error { deleted := set.NewStringSet(u.deletedNodesID...) topo := u.metadata.Topology - for i, instance := range (&spec.TiDBComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiDBComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiDBServers = append(newMeta.Topology.TiDBServers, topo.TiDBServers[i]) } - for i, instance := range (&spec.TiKVComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiKVComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiKVServers = append(newMeta.Topology.TiKVServers, topo.TiKVServers[i]) } - for i, instance := range (&spec.PDComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.PDComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.PDServers = append(newMeta.Topology.PDServers, topo.PDServers[i]) } - for i, instance := range (&spec.TiFlashComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiFlashComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiFlashServers = append(newMeta.Topology.TiFlashServers, topo.TiFlashServers[i]) } - for i, instance := range (&spec.PumpComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.PumpComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.PumpServers = append(newMeta.Topology.PumpServers, topo.PumpServers[i]) } - for i, instance := range (&spec.DrainerComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.DrainerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.Drainers = append(newMeta.Topology.Drainers, topo.Drainers[i]) } - for i, instance := range (&spec.CDCComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.CDCComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.CDCServers = append(newMeta.Topology.CDCServers, topo.CDCServers[i]) } - for i, instance := range (&spec.TiSparkWorkerComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiSparkWorkerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiSparkWorkers = append(newMeta.Topology.TiSparkWorkers, topo.TiSparkWorkers[i]) } - for i, instance := range (&spec.TiSparkMasterComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.TiSparkMasterComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.TiSparkMasters = append(newMeta.Topology.TiSparkMasters, topo.TiSparkMasters[i]) } - for i, instance := range (&spec.MonitorComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.MonitorComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } newMeta.Topology.Monitors = append(newMeta.Topology.Monitors, topo.Monitors[i]) } - for i, instance := range (&spec.GrafanaComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.GrafanaComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Grafana = append(newMeta.Topology.Grafana, topo.Grafana[i]) + newMeta.Topology.Grafanas = append(newMeta.Topology.Grafanas, topo.Grafanas[i]) } - for i, instance := range (&spec.AlertManagerComponent{Specification: topo}).Instances() { + for i, instance := range (&spec.AlertManagerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } - newMeta.Topology.Alertmanager = append(newMeta.Topology.Alertmanager, topo.Alertmanager[i]) + newMeta.Topology.Alertmanagers = append(newMeta.Topology.Alertmanagers, topo.Alertmanagers[i]) } return spec.SaveClusterMeta(u.cluster, newMeta) } diff --git a/pkg/cluster/task/update_topology.go b/pkg/cluster/task/update_topology.go index 96178728ac..c83ebccd4b 100644 --- a/pkg/cluster/task/update_topology.go +++ b/pkg/cluster/task/update_topology.go @@ -46,11 +46,11 @@ func (u *UpdateTopology) Execute(ctx *Context) error { var ops []clientv3.Op var instances []spec.Instance - ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.MonitorComponent{Specification: topo}).Instances(), "prometheus") - ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.GrafanaComponent{Specification: topo}).Instances(), "grafana") - ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.AlertManagerComponent{Specification: topo}).Instances(), "alertmanager") + ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.MonitorComponent{Topology: topo}).Instances(), "prometheus") + ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.GrafanaComponent{Topology: topo}).Instances(), "grafana") + ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.AlertManagerComponent{Topology: topo}).Instances(), "alertmanager") - for _, instance := range (&spec.TiDBComponent{Specification: topo}).Instances() { + for _, instance := range (&spec.TiDBComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { ops = append(ops, clientv3.OpDelete(fmt.Sprintf("/topology/tidb/%s:%d", instance.GetHost(), instance.GetPort()), clientv3.WithPrefix())) } @@ -98,7 +98,7 @@ func updateInstancesAndOps(ops []clientv3.Op, destInstances []spec.Instance, del // for update it's topology. func updateTopologyOp(instance spec.Instance) (*clientv3.Op, error) { switch instance.ComponentName() { - case spec.ComponentAlertManager, spec.ComponentPrometheus, spec.ComponentGrafana: + case spec.ComponentAlertmanager, spec.ComponentPrometheus, spec.ComponentGrafana: topology := componentTopology{ IP: instance.GetHost(), Port: instance.GetPort(), diff --git a/pkg/cluster/template/config/prometheus.go b/pkg/cluster/template/config/prometheus.go index 98df697cbc..c62f56acd3 100644 --- a/pkg/cluster/template/config/prometheus.go +++ b/pkg/cluster/template/config/prometheus.go @@ -46,6 +46,9 @@ type PrometheusConfig struct { BlackboxAddr string KafkaExporterAddr string GrafanaAddr string + + DMMasterAddrs []string + DMWorkerAddrs []string } // NewPrometheusConfig returns a PrometheusConfig @@ -170,6 +173,18 @@ func (c *PrometheusConfig) AddGrafana(ip string, port uint64) *PrometheusConfig return c } +// AddDMMaster add an dm-master address +func (c *PrometheusConfig) AddDMMaster(ip string, port uint64) *PrometheusConfig { + c.DMMasterAddrs = append(c.DMMasterAddrs, fmt.Sprintf("%s:%d", ip, port)) + return c +} + +// AddDMWorker add an dm-worker address +func (c *PrometheusConfig) AddDMWorker(ip string, port uint64) *PrometheusConfig { + c.DMWorkerAddrs = append(c.DMWorkerAddrs, fmt.Sprintf("%s:%d", ip, port)) + return c +} + // Config generate the config file data. func (c *PrometheusConfig) Config() ([]byte, error) { fp := path.Join("/templates", "config", "prometheus.yml.tpl") diff --git a/templates/config/dm/prometheus.yml.tpl b/templates/config/dm/prometheus.yml.tpl index d5681b5efa..fcb2c9ca2f 100644 --- a/templates/config/dm/prometheus.yml.tpl +++ b/templates/config/dm/prometheus.yml.tpl @@ -14,11 +14,11 @@ rule_files: {{- if .AlertmanagerAddrs}} alerting: - alertmanagers: - - static_configs: - - targets: + alertmanagers: + - static_configs: + - targets: {{- range .AlertmanagerAddrs}} - - '{{.}}' + - '{{.}}' {{- end}} {{- end}} diff --git a/templates/config/prometheus.yml.tpl b/templates/config/prometheus.yml.tpl index a1bee0523b..43af42822a 100644 --- a/templates/config/prometheus.yml.tpl +++ b/templates/config/prometheus.yml.tpl @@ -9,13 +9,21 @@ global: # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: +{{- if .MonitoredServers}} - 'node.rules.yml' - 'blacker.rules.yml' - 'bypass.rules.yml' +{{- end}} +{{- if .PDAddrs}} - 'pd.rules.yml' +{{- end}} +{{- if .TiDBStatusAddrs}} - 'tidb.rules.yml' +{{- end}} +{{- if .TiKVStatusAddrs}} - 'tikv.rules.yml' - 'tikv.accelerate.rules.yml' +{{- end}} {{- if .TiFlashStatusAddrs}} - 'tiflash.rules.yml' {{- end}} @@ -31,6 +39,12 @@ rule_files: {{- if .LightningAddrs}} - 'lightning.rules.yml' {{- end}} +{{- if .DMWorkerAddrs}} + - 'dm_worker.rules.yml' +{{- end}} +{{- if .DMMasterAddrs}} + - 'dm_master.rules.yml' +{{- end}} {{- if .AlertmanagerAddrs}} alerting: @@ -262,7 +276,7 @@ scrape_configs: static_configs: - targets: {{- range .TiDBStatusAddrs}} - - '{{.}}' + - '{{.}}' {{- end}} labels: group: 'tidb' @@ -341,4 +355,24 @@ scrape_configs: regex: .* target_label: __address__ replacement: {{$addr}} -{{- end}} \ No newline at end of file +{{- end}} + +{{- if .DMMasterAddrs}} + - job_name: "dm_master" + honor_labels: true # don't overwrite job & instance labels + static_configs: + - targets: + {{- range .DMMasterAddrs}} + - '{{.}}' + {{- end}} +{{- end}} + +{{- if .DMWorkerAddrs}} + - job_name: "dm_worker" + honor_labels: true # don't overwrite job & instance labels + static_configs: + - targets: + {{- range .DMWorkerAddrs}} + - '{{.}}' + {{- end}} +{{- end}} diff --git a/templates/scripts/dm/run_grafana.sh.tpl b/templates/scripts/dm/run_grafana.sh.tpl deleted file mode 100644 index 341a56e49d..0000000000 --- a/templates/scripts/dm/run_grafana.sh.tpl +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -# WARNING: This file was auto-generated. Do not edit! -# All your edit might be overwritten! -DEPLOY_DIR={{.DeployDir}} -cd "${DEPLOY_DIR}" || exit 1 - -LANG=en_US.UTF-8 \ -{{- if .NumaNode}} -exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/bin/grafana-server \ -{{- else}} -exec bin/bin/grafana-server \ -{{- end}} - --homepath="{{.DeployDir}}/bin" \ - --config="{{.DeployDir}}/conf/grafana.ini" diff --git a/templates/scripts/dm/run_prometheus.sh.tpl b/templates/scripts/dm/run_prometheus.sh.tpl deleted file mode 100644 index a221eae406..0000000000 --- a/templates/scripts/dm/run_prometheus.sh.tpl +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -e - -DEPLOY_DIR={{.DeployDir}} -cd "${DEPLOY_DIR}" || exit 1 - -# WARNING: This file was auto-generated. Do not edit! -# All your edit might be overwritten! - - -exec > >(tee -i -a "{{.LogDir}}/prometheus.log") -exec 2>&1 - -{{- if .NumaNode}} -exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/prometheus/prometheus \ -{{- else}} -exec bin/prometheus/prometheus \ -{{- end}} - --config.file="{{.DeployDir}}/conf/prometheus.yml" \ - --web.listen-address=":{{.Port}}" \ - --web.external-url="http://{{.IP}}:{{.Port}}/" \ - --web.enable-admin-api \ - --log.level="info" \ - --storage.tsdb.path="{{.DataDir}}" \ - --storage.tsdb.retention="30d" diff --git a/tests/tiup-dm/script/util.sh b/tests/tiup-dm/script/util.sh index 9ddf37afa3..bc04fc5790 100755 --- a/tests/tiup-dm/script/util.sh +++ b/tests/tiup-dm/script/util.sh @@ -8,35 +8,35 @@ set -eu # PASS # coverage: 12.7% of statements in github.com/pingcap/tiup/components/dm/... function instance_num() { - name=$1 + name=$1 - count=$(tiup-dm display $name | grep "Total nodes" | awk -F ' ' '{print $3}') + count=$(tiup-dm display $name | grep "Total nodes" | awk -F ' ' '{print $3}') - echo $count + echo $count } # wait_instance_num_reach # wait the instance number of dm reach the target_num. # timeout 120 second function wait_instance_num_reach() { - name=$1 - target_num=$2 - - for ((i=0;i<120;i++)) - do - tiup-dm prune $name --yes - count=$(instance_num $name) - if [ "$count" == "$target_num" ]; then - echo "instance number reach $target_num" - return - else - sleep 1 - fi - - sleep 1 - done - - echo "fail to wait instance number reach $target_num, count $count, retry num: $i" - tiup-dm display $name - exit -1 + name=$1 + target_num=$2 + + for ((i=0;i<120;i++)) + do + tiup-dm prune $name --yes + count=$(instance_num $name) + if [ "$count" == "$target_num" ]; then + echo "instance number reach $target_num" + return + else + sleep 1 + fi + + sleep 1 + done + + echo "fail to wait instance number reach $target_num, count $count, retry num: $i" + tiup-dm display $name + exit -1 } diff --git a/tests/tiup-dm/test_import.sh b/tests/tiup-dm/test_import.sh index 032d7b90f7..8808f1fa91 100755 --- a/tests/tiup-dm/test_import.sh +++ b/tests/tiup-dm/test_import.sh @@ -10,8 +10,9 @@ function deploy_by_ansible() { apt-get -y install git curl sshpass python-pip sudo # step 2 - useradd -m -d /home/tidb tidb + id tidb || useradd -m -d /home/tidb tidb echo "tidb:tidb" | chpasswd + sed -i '/tidb/d' /etc/sudoers echo "tidb ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers # use the same key from root instead of create one. @@ -22,7 +23,7 @@ function deploy_by_ansible() { # step 3 su tidb < Date: Mon, 16 Nov 2020 18:06:01 +0800 Subject: [PATCH 05/14] Fix tiflash not start with newest nightly PD (#902) --- components/playground/instance/tiflash.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/components/playground/instance/tiflash.go b/components/playground/instance/tiflash.go index 959827693b..8e8c314076 100644 --- a/components/playground/instance/tiflash.go +++ b/components/playground/instance/tiflash.go @@ -77,7 +77,8 @@ type scheduleConfig struct { } type replicateMaxReplicaConfig struct { - MaxReplicas int `json:"max-replicas"` + MaxReplicas int `json:"max-replicas"` + EnablePlacementRules string `json:"enable-placement-rules"` } type replicateEnablePlacementRulesConfig struct { @@ -128,7 +129,8 @@ func (inst *TiFlashInstance) Start(ctx context.Context, version v0manifest.Versi } // Update maxReplicas before placement rules so that it would not be overwritten maxReplicas, err := json.Marshal(replicateMaxReplicaConfig{ - MaxReplicas: 1, + MaxReplicas: 1, + EnablePlacementRules: "false", }) if err != nil { return err From eacd119de4ddd5cccdd4e68d13304de0da41c3ec Mon Sep 17 00:00:00 2001 From: Zhi Qi <30543181+LittleFall@users.noreply.github.com> Date: Tue, 17 Nov 2020 10:19:51 +0800 Subject: [PATCH 06/14] set max_memory_usage from 10GB to 0(unlimited). (#907) --- components/playground/instance/tiflash_config.go | 2 +- pkg/cluster/spec/tiflash.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/components/playground/instance/tiflash_config.go b/components/playground/instance/tiflash_config.go index b643931e69..0051a650a2 100644 --- a/components/playground/instance/tiflash_config.go +++ b/components/playground/instance/tiflash_config.go @@ -54,7 +54,7 @@ size = "1000M" [profiles] [profiles.default] load_balancing = "random" -max_memory_usage = 10000000000 +max_memory_usage = 0 use_uncompressed_cache = 0 [profiles.readonly] readonly = 1 diff --git a/pkg/cluster/spec/tiflash.go b/pkg/cluster/spec/tiflash.go index 0decb8c4a0..da271826cf 100644 --- a/pkg/cluster/spec/tiflash.go +++ b/pkg/cluster/spec/tiflash.go @@ -229,7 +229,7 @@ server_configs: users.readonly.quota: "default" users.readonly.networks.ip: "::/0" profiles.default.load_balancing: "random" - profiles.default.max_memory_usage: 10000000000 + profiles.default.max_memory_usage: 0 profiles.default.use_uncompressed_cache: 0 profiles.readonly.readonly: 1 `, cfg.DataDir, cfg.LogDir, cfg.TCPPort, cfg.HTTPPort, cfg.TiDBStatusAddrs, cfg.IP, cfg.FlashServicePort, From cd971a510119f28b093ace6316f00ebd9cacfa2b Mon Sep 17 00:00:00 2001 From: yuzhibotao <52073478+yuzhibotao@users.noreply.github.com> Date: Tue, 17 Nov 2020 11:48:18 +0800 Subject: [PATCH 07/14] Import configuration when migrating a cluster from Ansible (#766) Co-authored-by: SIGSEGV Co-authored-by: Lonng Co-authored-by: Allen Zhong --- components/cluster/command/import.go | 5 + go.mod | 1 + go.sum | 2 + pkg/cluster/ansible/import.go | 165 ++++++++++++++++++++++++- pkg/cluster/ansible/import_test.go | 157 ++++++++++++++++++++--- pkg/cluster/spec/server_config.go | 66 +++++++--- pkg/cluster/spec/server_config_test.go | 82 ++++++++++-- pkg/cluster/spec/spec_test.go | 49 +++++++- pkg/cluster/spec/tikv.go | 12 +- pkg/set/any_set.go | 90 ++++++++++++++ pkg/set/any_set_test.go | 32 +++++ pkg/set/string_set_test.go | 6 +- 12 files changed, 610 insertions(+), 57 deletions(-) create mode 100644 pkg/set/any_set.go create mode 100644 pkg/set/any_set_test.go diff --git a/components/cluster/command/import.go b/components/cluster/command/import.go index 4063fd6cb4..58e8463479 100644 --- a/components/cluster/command/import.go +++ b/components/cluster/command/import.go @@ -128,6 +128,11 @@ func newImportCmd() *cobra.Command { return err } + // copy config detail to meta file + if err = ansible.LoadConfig(clsName, clsMeta); err != nil { + return err + } + if err = spec.SaveClusterMeta(clsName, clsMeta); err != nil { return err } diff --git a/go.mod b/go.mod index bfeea173d8..7ac8dd8635 100644 --- a/go.mod +++ b/go.mod @@ -79,6 +79,7 @@ require ( gopkg.in/mattn/go-runewidth.v0 v0.0.4 // indirect gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 honnef.co/go/tools v0.0.1-2020.1.4 // indirect sigs.k8s.io/yaml v1.2.0 // indirect software.sslmate.com/src/go-pkcs12 v0.0.0-20200619203921-c9ed90bd32dc diff --git a/go.sum b/go.sum index e60e089def..71f33b3a6f 100644 --- a/go.sum +++ b/go.sum @@ -1194,6 +1194,8 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/cluster/ansible/import.go b/pkg/cluster/ansible/import.go index af5005226e..b44780d1b7 100644 --- a/pkg/cluster/ansible/import.go +++ b/pkg/cluster/ansible/import.go @@ -18,11 +18,14 @@ import ( "io" "os" "path/filepath" + "reflect" "strconv" + "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/logger/log" + "github.com/pingcap/tiup/pkg/set" "github.com/relex/aini" ) @@ -81,11 +84,20 @@ func parseInventoryFile(invFile io.Reader) (string, *spec.ClusterMeta, *aini.Inv clsMeta.Topology.GlobalOptions.DeployDir = grp.Vars["deploy_dir"] // deploy_dir and data_dir of monitored need to be set, otherwise they will be // subdirs of deploy_dir in global options - clsMeta.Topology.MonitoredOptions.DeployDir = clsMeta.Topology.GlobalOptions.DeployDir - clsMeta.Topology.MonitoredOptions.DataDir = filepath.Join( - clsMeta.Topology.MonitoredOptions.DeployDir, - "data", - ) + allSame := uniqueVar("deploy_dir", inventory.Groups["monitored_servers"].Hosts) + if len(allSame) == 1 { + clsMeta.Topology.MonitoredOptions.DeployDir = allSame[0] + clsMeta.Topology.MonitoredOptions.DataDir = filepath.Join( + clsMeta.Topology.MonitoredOptions.DeployDir, + "data", + ) + } else { + clsMeta.Topology.MonitoredOptions.DeployDir = clsMeta.Topology.GlobalOptions.DeployDir + clsMeta.Topology.MonitoredOptions.DataDir = filepath.Join( + clsMeta.Topology.MonitoredOptions.DeployDir, + "data", + ) + } if grp.Vars["process_supervision"] != "systemd" { return "", nil, inventory, errors.New("only support cluster deployed with systemd") @@ -100,6 +112,7 @@ func parseInventoryFile(invFile io.Reader) (string, *spec.ClusterMeta, *aini.Inv } else { return "", nil, inventory, errors.New("no available host in the inventory file") } + return clsName, clsMeta, inventory, err } @@ -113,3 +126,145 @@ func SSHKeyPath() string { return fmt.Sprintf("%s/.ssh/id_rsa", homeDir) } + +func uniqueVar(key string, hosts map[string]*aini.Host) []string { + vars := set.NewStringSet() + for _, h := range hosts { + vars.Insert(h.Vars[key]) + } + return vars.Slice() +} + +// parse config files +func parseConfigFile(cfgfile string) (map[string]interface{}, error) { + srvConfigs := make(map[string]interface{}) + if _, err := toml.DecodeFile(cfgfile, &srvConfigs); err != nil { + return nil, errors.Annotate(err, "decode toml file") + } + return spec.FlattenMap(srvConfigs), nil +} + +func diffConfigs(configs []map[string]interface{}) (global map[string]interface{}, locals []map[string]interface{}) { + global = make(map[string]interface{}) + keySet := set.NewStringSet() + + // parse all configs from file + for _, config := range configs { + locals = append(locals, config) + for k := range config { + keySet.Insert(k) + } + } + + // summary global config + for k := range keySet { + valSet := set.NewAnySet(reflect.DeepEqual) + for _, config := range locals { + valSet.Insert(config[k]) + } + if len(valSet.Slice()) > 1 { + // this key can't be put into global + continue + } + global[k] = valSet.Slice()[0] + } + + // delete global config from local + for _, config := range locals { + for k := range global { + delete(config, k) + } + } + + return +} + +// LoadConfig files to clusterMeta, include tidbservers, tikvservers, pdservers pumpservers and drainerservers +func LoadConfig(clsName string, cls *spec.ClusterMeta) error { + // deal with tidb config + configs := []map[string]interface{}{} + for _, srv := range cls.Topology.TiDBServers { + prefixkey := spec.ComponentTiDB + fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) + if config, err := parseConfigFile(fname); err == nil { + configs = append(configs, config) + } else { + return err + } + } + global, locals := diffConfigs(configs) + cls.Topology.ServerConfigs.TiDB = global + for i, local := range locals { + cls.Topology.TiDBServers[i].Config = local + } + + // deal with tikv config + configs = []map[string]interface{}{} + for _, srv := range cls.Topology.TiKVServers { + prefixkey := spec.ComponentTiKV + fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) + if config, err := parseConfigFile(fname); err == nil { + configs = append(configs, config) + } else { + return err + } + } + global, locals = diffConfigs(configs) + cls.Topology.ServerConfigs.TiKV = global + for i, local := range locals { + cls.Topology.TiKVServers[i].Config = local + } + + // deal with pd config + configs = []map[string]interface{}{} + for _, srv := range cls.Topology.PDServers { + prefixkey := spec.ComponentPD + fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.ClientPort)) + if config, err := parseConfigFile(fname); err == nil { + configs = append(configs, config) + } else { + return err + } + } + global, locals = diffConfigs(configs) + cls.Topology.ServerConfigs.PD = global + for i, local := range locals { + cls.Topology.PDServers[i].Config = local + } + + // deal with pump config + configs = []map[string]interface{}{} + for _, srv := range cls.Topology.PumpServers { + prefixkey := spec.ComponentPump + fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) + if config, err := parseConfigFile(fname); err == nil { + configs = append(configs, config) + } else { + return err + } + } + global, locals = diffConfigs(configs) + cls.Topology.ServerConfigs.Pump = global + for i, local := range locals { + cls.Topology.PumpServers[i].Config = local + } + + // deal with drainer config + configs = []map[string]interface{}{} + for _, srv := range cls.Topology.Drainers { + prefixkey := spec.ComponentDrainer + fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) + if config, err := parseConfigFile(fname); err == nil { + configs = append(configs, config) + } else { + return err + } + } + global, locals = diffConfigs(configs) + cls.Topology.ServerConfigs.Drainer = global + for i, local := range locals { + cls.Topology.Drainers[i].Config = local + } + + return nil +} diff --git a/pkg/cluster/ansible/import_test.go b/pkg/cluster/ansible/import_test.go index 018db032ca..a334c3cb0f 100644 --- a/pkg/cluster/ansible/import_test.go +++ b/pkg/cluster/ansible/import_test.go @@ -19,12 +19,13 @@ import ( "os" "path/filepath" "sort" + "strings" "testing" "github.com/creasty/defaults" . "github.com/pingcap/check" "github.com/pingcap/tiup/pkg/cluster/spec" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) type ansSuite struct { @@ -36,6 +37,67 @@ func TestAnsible(t *testing.T) { TestingT(t) } +func (s *ansSuite) TestMonitoredDeployDir(c *C) { + r := strings.NewReader(` +[monitored_servers] +172.16.10.1 +172.16.10.2 +172.16.10.3 + +[all:vars] +process_supervision = systemd + `) + + _, clsMeta, _, err := parseInventoryFile(r) + c.Assert(err, IsNil) + c.Assert(clsMeta.Topology.MonitoredOptions.DeployDir, Equals, "") + + r = strings.NewReader(` +[monitored_servers] +172.16.10.1 +172.16.10.2 +172.16.10.3 + +[all:vars] +deploy_dir = /data1/deploy +process_supervision = systemd + `) + + _, clsMeta, _, err = parseInventoryFile(r) + c.Assert(err, IsNil) + c.Assert(clsMeta.Topology.MonitoredOptions.DeployDir, Equals, "/data1/deploy") + + r = strings.NewReader(` +[monitored_servers] +172.16.10.1 deploy_dir=/data/deploy +172.16.10.2 deploy_dir=/data/deploy +172.16.10.3 deploy_dir=/data/deploy + +[all:vars] +deploy_dir = /data1/deploy +process_supervision = systemd + `) + + _, clsMeta, _, err = parseInventoryFile(r) + c.Assert(err, IsNil) + c.Assert(clsMeta.Topology.MonitoredOptions.DeployDir, Equals, "/data/deploy") + + r = strings.NewReader(` +[monitored_servers] +172.16.10.1 deploy_dir=/data/deploy1 +172.16.10.2 deploy_dir=/data/deploy2 +172.16.10.3 deploy_dir=/data/deploy3 + +[all:vars] +deploy_dir = /data1/deploy +process_supervision = systemd + `) + + _, clsMeta, _, err = parseInventoryFile(r) + c.Assert(err, IsNil) + c.Assert(clsMeta.Topology.MonitoredOptions.DeployDir, Equals, "/data1/deploy") +} + func (s *ansSuite) TestParseInventoryFile(c *C) { dir := "test-data" invData, err := os.Open(filepath.Join(dir, "inventory.ini")) @@ -50,21 +112,21 @@ func (s *ansSuite) TestParseInventoryFile(c *C) { c.Assert(clsMeta.User, Equals, "tiops") expected := []byte(`global: - user: tiops - deploy_dir: /home/tiopsimport/ansible-deploy + user: tiops + deploy_dir: /home/tiopsimport/ansible-deploy monitored: - deploy_dir: /home/tiopsimport/ansible-deploy - data_dir: /home/tiopsimport/ansible-deploy/data + deploy_dir: /home/tiopsimport/ansible-deploy + data_dir: /home/tiopsimport/ansible-deploy/data server_configs: - tidb: - binlog.enable: true - tikv: {} - pd: {} - tiflash: {} - tiflash-learner: {} - pump: {} - drainer: {} - cdc: {} + tidb: + binlog.enable: true + tikv: {} + pd: {} + tiflash: {} + tiflash-learner: {} + pump: {} + drainer: {} + cdc: {} tidb_servers: [] tikv_servers: [] tiflash_servers: [] @@ -73,7 +135,6 @@ monitoring_servers: [] `) topo, err := yaml.Marshal(clsMeta.Topology) - fmt.Printf("Got initial topo:\n%s\n", topo) c.Assert(err, IsNil) c.Assert(topo, DeepEquals, expected) } @@ -108,10 +169,8 @@ func (s *ansSuite) TestParseGroupVars(c *C) { sortClusterMeta(&metaFull) sortClusterMeta(&expected) - actual, err := yaml.Marshal(metaFull) + _, err = yaml.Marshal(metaFull) c.Assert(err, IsNil) - fmt.Printf("Got initial meta:\n%s\n", actual) - c.Assert(metaFull, DeepEquals, expected) } @@ -147,3 +206,65 @@ func sortClusterMeta(clsMeta *spec.ClusterMeta) { return clsMeta.Topology.Alertmanagers[i].Host < clsMeta.Topology.Alertmanagers[j].Host }) } + +func withTempFile(content string, fn func(string)) { + file, err := ioutil.TempFile("/tmp", "topology-test") + if err != nil { + panic(fmt.Sprintf("create temp file: %s", err)) + } + defer os.Remove(file.Name()) + + _, err = file.WriteString(content) + if err != nil { + panic(fmt.Sprintf("write temp file: %s", err)) + } + file.Close() + + fn(file.Name()) +} + +func (s *ansSuite) TestParseConfig(c *C) { + // base test + withTempFile(` +a = true + +[b] +c = 1 +d = "\"" +`, func(file string) { + m, err := parseConfigFile(file) + c.Assert(err, IsNil) + c.Assert(m["x"], IsNil) + c.Assert(m["a"], Equals, true) + c.Assert(m["b.c"], Equals, int64(1)) + c.Assert(m["b.d"], Equals, "\"") + }) +} + +func (s *ansSuite) TestDiffConfig(c *C) { + global, locals := diffConfigs([]map[string]interface{}{ + { + "a": true, + "b": 1, + "foo.bar": 1, + }, + { + "a": true, + "b": 2, + "foo.bar": 1, + }, + { + "a": true, + "b": 3, + "foo.bar": 1, + }, + }) + + c.Assert(global["a"], NotNil) + c.Assert(global["b"], IsNil) + c.Assert(global["a"], Equals, true) + c.Assert(global["foo.bar"], Equals, 1) + c.Assert(locals[0]["b"], Equals, 1) + c.Assert(locals[1]["b"], Equals, 2) + c.Assert(locals[2]["b"], Equals, 3) +} diff --git a/pkg/cluster/spec/server_config.go b/pkg/cluster/spec/server_config.go index 0d1de10da3..f657d59eda 100644 --- a/pkg/cluster/spec/server_config.go +++ b/pkg/cluster/spec/server_config.go @@ -70,12 +70,12 @@ func strKeyMap(val interface{}) interface{} { return val } -func flattenKey(key string, val interface{}) (string, interface{}) { +func foldKey(key string, val interface{}) (string, interface{}) { parts := strings.SplitN(key, ".", 2) if len(parts) == 1 { return key, strKeyMap(val) } - subKey, subVal := flattenKey(parts[1], val) + subKey, subVal := foldKey(parts[1], val) return parts[0], map[string]interface{}{ subKey: strKeyMap(subVal), } @@ -99,19 +99,52 @@ func patch(origin map[string]interface{}, key string, val interface{}) { } } -func flattenMap(ms map[string]interface{}) map[string]interface{} { +// FoldMap convert single layer map to multi-layer +func FoldMap(ms map[string]interface{}) map[string]interface{} { + // we flatten map first to deal with the case like: + // a.b: + // c.d: xxx + ms = FlattenMap(ms) result := map[string]interface{}{} for k, v := range ms { - key, val := flattenKey(k, v) + key, val := foldKey(k, v) patch(result, key, val) } return result } +// FlattenMap convert mutil-layer map to single layer +func FlattenMap(ms map[string]interface{}) map[string]interface{} { + result := map[string]interface{}{} + for k, v := range ms { + var sub map[string]interface{} + + if m, ok := v.(map[string]interface{}); ok { + sub = FlattenMap(m) + } else if m, ok := v.(map[interface{}]interface{}); ok { + fixM := map[string]interface{}{} + for k, v := range m { + if sk, ok := k.(string); ok { + fixM[sk] = v + } + } + sub = FlattenMap(fixM) + } else { + result[k] = v + continue + } + + for sk, sv := range sub { + result[k+"."+sk] = sv + } + } + return result +} + func merge(orig map[string]interface{}, overwrites ...map[string]interface{}) (map[string]interface{}, error) { - lhs := flattenMap(orig) + lhs := FoldMap(orig) for _, overwrite := range overwrites { - rhs := flattenMap(overwrite) + rhs := FoldMap(overwrite) for k, v := range rhs { patch(lhs, k, v) } @@ -124,6 +157,7 @@ func GetValueFromPath(m map[string]interface{}, p string) interface{} { ss := strings.Split(p, ".") searchMap := make(map[interface{}]interface{}) + m = FoldMap(m) for k, v := range m { searchMap[k] = v } @@ -140,19 +174,15 @@ func searchValue(m map[interface{}]interface{}, ss []string) interface{} { return m[ss[0]] } - if m[strings.Join(ss, ".")] != nil { - return m[strings.Join(ss, ".")] - } - - for i := l - 1; i > 0; i-- { - key := strings.Join(ss[:i], ".") - if m[key] == nil { - continue + key := ss[0] + if pm, ok := m[key].(map[interface{}]interface{}); ok { + return searchValue(pm, ss[1:]) + } else if pm, ok := m[key].(map[string]interface{}); ok { + searchMap := make(map[interface{}]interface{}) + for k, v := range pm { + searchMap[k] = v } - if pm, ok := m[key].(map[interface{}]interface{}); ok { - return searchValue(pm, ss[i:]) - } - return nil + return searchValue(searchMap, ss[1:]) } return nil diff --git a/pkg/cluster/spec/server_config_test.go b/pkg/cluster/spec/server_config_test.go index ab390934ea..a6dbd5613b 100644 --- a/pkg/cluster/spec/server_config_test.go +++ b/pkg/cluster/spec/server_config_test.go @@ -41,15 +41,13 @@ func (s *configSuite) TestGetValueFromPath(c *check.C) { server_configs: tidb: a.b.c.d: 1 - a: - b: - c: - d: 2 a.b: c.e: 3 a.b.c: f: 4 - h.i.j.k: [1, 2, 3] + h.i.j.k: [1, 2, 4] + e: + f: true `) topo := new(Specification) @@ -58,7 +56,77 @@ server_configs: c.Assert(err, check.IsNil) c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.d"), check.Equals, 1) - c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.e"), check.Equals, nil) + c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.e"), check.Equals, 3) c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.f"), check.Equals, 4) - c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "h.i.j.k"), check.DeepEquals, []interface{}{1, 2, 3}) + c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "h.i.j.k"), check.DeepEquals, []interface{}{1, 2, 4}) + c.Assert(GetValueFromPath(topo.ServerConfigs.TiDB, "e.f"), check.Equals, true) +} + +func (s *configSuite) TestFlattenMap(c *check.C) { + var ( + m map[string]interface{} + r map[string]interface{} + ) + + m = map[string]interface{}{ + "a": 1, + "b": map[string]interface{}{ + "c": 2, + }, + "d.e": 3, + "f.g": map[string]interface{}{ + "h": 4, + "i": 5, + }, + "j": []int{6, 7}, + } + r = FlattenMap(m) + c.Assert(r["a"], check.Equals, 1) + c.Assert(r["b.c"], check.Equals, 2) + c.Assert(r["d.e"], check.Equals, 3) + c.Assert(r["f.g.h"], check.Equals, 4) + c.Assert(r["f.g.i"], check.Equals, 5) + c.Assert(r["j"], check.DeepEquals, []int{6, 7}) +} + +func (s *configSuite) TestFoldMap(c *check.C) { + var ( + m map[string]interface{} + r map[string]interface{} + ) + + m = map[string]interface{}{ + "a": 1, + "b.c": 2, + "b.d": 3, + "e.f": map[string]interface{}{ + "g.h": 4, + }, + "i": map[string]interface{}{ + "j.k": 5, + "l": 6, + }, + } + + r = FoldMap(m) + c.Assert(r, check.DeepEquals, map[string]interface{}{ + "a": 1, + "b": map[string]interface{}{ + "c": 2, + "d": 3, + }, + "e": map[string]interface{}{ + "f": map[string]interface{}{ + "g": map[string]interface{}{ + "h": 4, + }, + }, + }, + "i": map[string]interface{}{ + "j": map[string]interface{}{ + "k": 5, + }, + "l": 6, + }, + }) } diff --git a/pkg/cluster/spec/spec_test.go b/pkg/cluster/spec/spec_test.go index e40f9c2f67..eb91fbb7a2 100644 --- a/pkg/cluster/spec/spec_test.go +++ b/pkg/cluster/spec/spec_test.go @@ -171,7 +171,7 @@ tidb_servers: }, }, } - got := flattenMap(topo.ServerConfigs.TiDB) + got := FoldMap(topo.ServerConfigs.TiDB) c.Assert(got, DeepEquals, expected) buf := &bytes.Buffer{} err = toml.NewEncoder(buf).Encode(expected) @@ -199,7 +199,7 @@ tidb_servers: }, }, } - got = flattenMap(topo.TiDBServers[0].Config) + got = FoldMap(topo.TiDBServers[0].Config) c.Assert(err, IsNil) c.Assert(got, DeepEquals, expected) @@ -213,7 +213,7 @@ tidb_servers: }, }, } - got = flattenMap(topo.TiDBServers[1].Config) + got = FoldMap(topo.TiDBServers[1].Config) c.Assert(err, IsNil) c.Assert(got, DeepEquals, expected) } @@ -243,7 +243,7 @@ tikv_servers: }, }, } - got := flattenMap(topo.TiKVServers[0].Config) + got := FoldMap(topo.TiKVServers[0].Config) c.Assert(err, IsNil) c.Assert(got, DeepEquals, expected) } @@ -471,6 +471,47 @@ item7 = 700 c.Assert(string(merge2), DeepEquals, expected) } +func (s *metaSuiteTopo) TestTiKVLabels(c *C) { + spec := Specification{} + err := yaml.Unmarshal([]byte(` +tikv_servers: + - host: 172.16.5.138 + config: + server.labels: + dc: dc1 + zone: zone1 + host: host1 +`), &spec) + c.Assert(err, IsNil) + labels, err := spec.TiKVServers[0].Labels() + c.Assert(err, IsNil) + c.Assert(labels, DeepEquals, map[string]string{ + "dc": "dc1", + "zone": "zone1", + "host": "host1", + }) + + spec = Specification{} + err = yaml.Unmarshal([]byte(` +tikv_servers: + - host: 172.16.5.138 + config: + server.labels.dc: dc1 + server.labels.zone: zone1 + server.labels.host: host1 +`), &spec) + c.Assert(err, IsNil) + /* + labels, err = spec.TiKVServers[0].Labels() + c.Assert(err, IsNil) + c.Assert(labels, DeepEquals, map[string]string{ + "dc": "dc1", + "zone": "zone1", + "host": "host1", + }) + */ +} + func (s *metaSuiteTopo) TestLocationLabels(c *C) { spec := Specification{} diff --git a/pkg/cluster/spec/tikv.go b/pkg/cluster/spec/tikv.go index e11251aa8f..8019dc6ea2 100644 --- a/pkg/cluster/spec/tikv.go +++ b/pkg/cluster/spec/tikv.go @@ -124,8 +124,16 @@ func (s TiKVSpec) IsImported() bool { func (s TiKVSpec) Labels() (map[string]string, error) { lbs := make(map[string]string) - if serverLbs := GetValueFromPath(s.Config, "server.labels"); serverLbs != nil { - for k, v := range serverLbs.(map[interface{}]interface{}) { + if serverLabels := GetValueFromPath(s.Config, "server.labels"); serverLabels != nil { + m := map[interface{}]interface{}{} + if sm, ok := serverLabels.(map[string]interface{}); ok { + for k, v := range sm { + m[k] = v + } + } else if im, ok := serverLabels.(map[interface{}]interface{}); ok { + m = im + } + for k, v := range m { key, ok := k.(string) if !ok { return nil, errors.Errorf("TiKV label name %v is not a string, check the instance: %s:%d", k, s.Host, s.GetMainPort()) diff --git a/pkg/set/any_set.go b/pkg/set/any_set.go new file mode 100644 index 0000000000..2815d9cacc --- /dev/null +++ b/pkg/set/any_set.go @@ -0,0 +1,90 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +// AnySet is a set stores interface{} +type AnySet struct { + eq func(a interface{}, b interface{}) bool + slice []interface{} +} + +// NewAnySet builds a AnySet +func NewAnySet(eq func(a interface{}, b interface{}) bool, aa ...interface{}) *AnySet { + slice := []interface{}{} +out: + for _, a := range aa { + for _, b := range slice { + if eq(a, b) { + continue out + } + } + slice = append(slice, a) + } + return &AnySet{eq, slice} +} + +// Exist checks whether `val` exists in `s`. +func (s *AnySet) Exist(val interface{}) bool { + for _, a := range s.slice { + if s.eq(a, val) { + return true + } + } + return false +} + +// Insert inserts `val` into `s`. +func (s *AnySet) Insert(val interface{}) { + if !s.Exist(val) { + s.slice = append(s.slice, val) + } +} + +// Intersection returns the intersection of two sets +func (s *AnySet) Intersection(rhs *AnySet) *AnySet { + newSet := NewAnySet(s.eq) + for elt := range rhs.slice { + if s.Exist(elt) { + newSet.Insert(elt) + } + } + return newSet +} + +// Remove removes `val` from `s` +func (s *AnySet) Remove(val interface{}) { + for i, a := range s.slice { + if s.eq(a, val) { + s.slice = append(s.slice[:i], s.slice[i+1:]...) + return + } + } +} + +// Difference returns the difference of two sets +func (s *AnySet) Difference(rhs *AnySet) *AnySet { + newSet := NewAnySet(s.eq) + diffSet := NewAnySet(s.eq, rhs.slice...) + for elt := range s.slice { + if !diffSet.Exist(elt) { + newSet.Insert(elt) + } + } + return newSet +} + +// Slice converts the set to a slice +func (s *AnySet) Slice() []interface{} { + return append([]interface{}{}, s.slice...) +} diff --git a/pkg/set/any_set_test.go b/pkg/set/any_set_test.go new file mode 100644 index 0000000000..fbca880cba --- /dev/null +++ b/pkg/set/any_set_test.go @@ -0,0 +1,32 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +import ( + "reflect" + + "github.com/pingcap/check" +) + +func (s *setTestSuite) TestAnySet(c *check.C) { + set := NewAnySet(reflect.DeepEqual) + set.Insert(true) + set.Insert(9527) + + c.Assert(set.slice[0], check.DeepEquals, true) + c.Assert(set.Slice()[0], check.DeepEquals, true) + + c.Assert(set.slice[1], check.DeepEquals, 9527) + c.Assert(set.Slice()[1], check.DeepEquals, 9527) +} diff --git a/pkg/set/string_set_test.go b/pkg/set/string_set_test.go index eec5da0b55..1af63c8be9 100644 --- a/pkg/set/string_set_test.go +++ b/pkg/set/string_set_test.go @@ -20,15 +20,15 @@ import ( "github.com/pingcap/check" ) -var _ = check.Suite(&stringSetTestSuite{}) +var _ = check.Suite(&setTestSuite{}) -type stringSetTestSuite struct{} +type setTestSuite struct{} func TestNewStringSet(t *testing.T) { check.TestingT(t) } -func (s *stringSetTestSuite) TestStringSet(c *check.C) { +func (s *setTestSuite) TestStringSet(c *check.C) { set := NewStringSet() vals := []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"} for i := range vals { From f9591eb9be013fb74a2626bdf873b5238e926e31 Mon Sep 17 00:00:00 2001 From: 9547 Date: Tue, 17 Nov 2020 12:32:53 +0800 Subject: [PATCH 08/14] feat(node_exporter): add --collector.buddyinfo (#904) --- pkg/cluster/embed/autogen_pkger.go | 2 +- templates/scripts/run_node_exporter.sh.tpl | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/embed/autogen_pkger.go b/pkg/cluster/embed/autogen_pkger.go index e574df3297..60af7d427a 100644 --- a/pkg/cluster/embed/autogen_pkger.go +++ b/pkg/cluster/embed/autogen_pkger.go @@ -33,7 +33,7 @@ func init() { autogenFiles["/templates/scripts/run_dm-worker.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIk1hc3Rlckxpc3QifX0KICB7ey0gcmFuZ2UgJGlkeCwgJG1hc3RlciA6PSAufX0KICAgIHt7LSBpZiBlcSAkaWR4IDB9fQogICAgICB7ey0gJG1hc3Rlci5JUH19Ont7JG1hc3Rlci5Qb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3skbWFzdGVyLklQfX06e3skbWFzdGVyLlBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2RtLXdvcmtlci9kbS13b3JrZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2RtLXdvcmtlci9kbS13b3JrZXIgXAp7ey0gZW5kfX0KICAgIC0tbmFtZT0ie3suTmFtZX19IiBcCiAgICAtLXdvcmtlci1hZGRyPSIwLjAuMC4wOnt7LlBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtYWRkcj0ie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tbG9nLWZpbGU9Int7LkxvZ0Rpcn19L2RtLXdvcmtlci5sb2ciIFwKICAgIC0tam9pbj0ie3t0ZW1wbGF0ZSAiTWFzdGVyTGlzdCIgLkVuZHBvaW50c319IiBcCiAgICAtLWNvbmZpZz1jb25mL2RtLXdvcmtlci50b21sID4+ICJ7ey5Mb2dEaXJ9fS9kbS13b3JrZXJfc3Rkb3V0LmxvZyIgMj4+ICJ7ey5Mb2dEaXJ9fS9kbS13b3JrZXJfc3RkZXJyLmxvZyIK" autogenFiles["/templates/scripts/run_drainer.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2RyYWluZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2RyYWluZXIgXAp7ey0gZW5kfX0KICAgIC0tbm9kZS1pZD0ie3suTm9kZUlEfX0iIFwKICAgIC0tYWRkcj0ie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQtdXJscz0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tZGF0YS1kaXI9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1sb2ctZmlsZT0ie3suTG9nRGlyfX0vZHJhaW5lci5sb2ciIFwKICAgIC0tY29uZmlnPWNvbmYvZHJhaW5lci50b21sIFwKICAgIC0taW5pdGlhbC1jb21taXQtdHM9Int7LkNvbW1pdFRzfX0iIDI+PiAie3suTG9nRGlyfX0vZHJhaW5lcl9zdGRlcnIubG9nIgo=" autogenFiles["/templates/scripts/run_grafana.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKTEFORz1lbl9VUy5VVEYtOCBcCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9iaW4vZ3JhZmFuYS1zZXJ2ZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2Jpbi9ncmFmYW5hLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1ob21lcGF0aD0ie3suRGVwbG95RGlyfX0vYmluIiBcCiAgICAtLWNvbmZpZz0ie3suRGVwbG95RGlyfX0vY29uZi9ncmFmYW5hLmluaSIK" - autogenFiles["/templates/scripts/run_node_exporter.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKZXhlYyA+ID4odGVlIC1pIC1hICJ7ey5Mb2dEaXJ9fS9ub2RlX2V4cG9ydGVyLmxvZyIpCmV4ZWMgMj4mMQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL25vZGVfZXhwb3J0ZXIvbm9kZV9leHBvcnRlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vbm9kZV9leHBvcnRlci9ub2RlX2V4cG9ydGVyIFwKe3stIGVuZH19CiAgICAtLXdlYi5saXN0ZW4tYWRkcmVzcz0iOnt7LlBvcnR9fSIgXAogICAgLS1jb2xsZWN0b3IudGNwc3RhdCBcCiAgICAtLWNvbGxlY3Rvci5zeXN0ZW1kIFwKICAgIC0tY29sbGVjdG9yLm1vdW50c3RhdHMgXAogICAgLS1jb2xsZWN0b3IubWVtaW5mb19udW1hIFwKICAgIC0tY29sbGVjdG9yLmludGVycnVwdHMgXAogICAgLS1jb2xsZWN0b3Iudm1zdGF0LmZpZWxkcz0iXi4qIiBcCiAgICAtLWxvZy5sZXZlbD0iaW5mbyIK" + autogenFiles["/templates/scripts/run_node_exporter.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKZXhlYyA+ID4odGVlIC1pIC1hICJ7ey5Mb2dEaXJ9fS9ub2RlX2V4cG9ydGVyLmxvZyIpCmV4ZWMgMj4mMQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL25vZGVfZXhwb3J0ZXIvbm9kZV9leHBvcnRlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vbm9kZV9leHBvcnRlci9ub2RlX2V4cG9ydGVyIFwKe3stIGVuZH19CiAgICAtLXdlYi5saXN0ZW4tYWRkcmVzcz0iOnt7LlBvcnR9fSIgXAogICAgLS1jb2xsZWN0b3IudGNwc3RhdCBcCiAgICAtLWNvbGxlY3Rvci5zeXN0ZW1kIFwKICAgIC0tY29sbGVjdG9yLm1vdW50c3RhdHMgXAogICAgLS1jb2xsZWN0b3IubWVtaW5mb19udW1hIFwKICAgIC0tY29sbGVjdG9yLmludGVycnVwdHMgXAogICAgLS1jb2xsZWN0b3IuYnVkZHlpbmZvIFwKICAgIC0tY29sbGVjdG9yLnZtc3RhdC5maWVsZHM9Il4uKiIgXAogICAgLS1sb2cubGV2ZWw9ImluZm8iCg==" autogenFiles["/templates/scripts/run_pd.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5OYW1lfX09e3skcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLlBlZXJQb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3stICRwZC5OYW1lfX09e3skcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLlBlZXJQb3J0fX0KICAgIHt7LSBlbmR9fQogIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9wZC1zZXJ2ZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL3BkLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1uYW1lPSJ7ey5OYW1lfX0iIFwKICAgIC0tY2xpZW50LXVybHM9Int7LlNjaGVtZX19Oi8ve3suTGlzdGVuSG9zdH19Ont7LkNsaWVudFBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtY2xpZW50LXVybHM9Int7LlNjaGVtZX19Oi8ve3suSVB9fTp7ey5DbGllbnRQb3J0fX0iIFwKICAgIC0tcGVlci11cmxzPSJ7ey5TY2hlbWV9fTovL3t7LklQfX06e3suUGVlclBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtcGVlci11cmxzPSJ7ey5TY2hlbWV9fTovL3t7LklQfX06e3suUGVlclBvcnR9fSIgXAogICAgLS1kYXRhLWRpcj0ie3suRGF0YURpcn19IiBcCiAgICAtLWluaXRpYWwtY2x1c3Rlcj0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tY29uZmlnPWNvbmYvcGQudG9tbCBcCiAgICAtLWxvZy1maWxlPSJ7ey5Mb2dEaXJ9fS9wZC5sb2ciIDI+PiAie3suTG9nRGlyfX0vcGRfc3RkZXJyLmxvZyIKICAK" autogenFiles["/templates/scripts/run_pd_scale.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL3BkLXNlcnZlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vcGQtc2VydmVyIFwKe3stIGVuZH19CiAgICAtLW5hbWU9Int7Lk5hbWV9fSIgXAogICAgLS1jbGllbnQtdXJscz0ie3suU2NoZW1lfX06Ly97ey5MaXN0ZW5Ib3N0fX06e3suQ2xpZW50UG9ydH19IiBcCiAgICAtLWFkdmVydGlzZS1jbGllbnQtdXJscz0ie3suU2NoZW1lfX06Ly97ey5JUH19Ont7LkNsaWVudFBvcnR9fSIgXAogICAgLS1wZWVyLXVybHM9Int7LlNjaGVtZX19Oi8ve3suSVB9fTp7ey5QZWVyUG9ydH19IiBcCiAgICAtLWFkdmVydGlzZS1wZWVyLXVybHM9Int7LlNjaGVtZX19Oi8ve3suSVB9fTp7ey5QZWVyUG9ydH19IiBcCiAgICAtLWRhdGEtZGlyPSJ7ey5EYXRhRGlyfX0iIFwKICAgIC0tam9pbj0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tY29uZmlnPWNvbmYvcGQudG9tbCBcCiAgICAtLWxvZy1maWxlPSJ7ey5Mb2dEaXJ9fS9wZC5sb2ciIDI+PiAie3suTG9nRGlyfX0vcGRfc3RkZXJyLmxvZyIKICAK" autogenFiles["/templates/scripts/run_prometheus.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgpERVBMT1lfRElSPXt7LkRlcGxveURpcn19CmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCiMgV0FSTklORzogVGhpcyBmaWxlIHdhcyBhdXRvLWdlbmVyYXRlZC4gRG8gbm90IGVkaXQhCiMgICAgICAgICAgQWxsIHlvdXIgZWRpdCBtaWdodCBiZSBvdmVyd3JpdHRlbiEKCmV4ZWMgPiA+KHRlZSAtaSAtYSAie3suTG9nRGlyfX0vcHJvbWV0aGV1cy5sb2ciKQpleGVjIDI+JjEKCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9wcm9tZXRoZXVzL3Byb21ldGhldXMgXAp7ey0gZWxzZX19CmV4ZWMgYmluL3Byb21ldGhldXMvcHJvbWV0aGV1cyBcCnt7LSBlbmR9fQogICAgLS1jb25maWcuZmlsZT0ie3suRGVwbG95RGlyfX0vY29uZi9wcm9tZXRoZXVzLnltbCIgXAogICAgLS13ZWIubGlzdGVuLWFkZHJlc3M9Ijp7ey5Qb3J0fX0iIFwKICAgIC0td2ViLmV4dGVybmFsLXVybD0iaHR0cDovL3t7LklQfX06e3suUG9ydH19LyIgXAogICAgLS13ZWIuZW5hYmxlLWFkbWluLWFwaSBcCiAgICAtLWxvZy5sZXZlbD0iaW5mbyIgXAogICAgLS1zdG9yYWdlLnRzZGIucGF0aD0ie3suRGF0YURpcn19IiBcCiAgICAtLXN0b3JhZ2UudHNkYi5yZXRlbnRpb249Int7LlJldGVudGlvbn19Igo=" diff --git a/templates/scripts/run_node_exporter.sh.tpl b/templates/scripts/run_node_exporter.sh.tpl index f99ca565af..9bbf6f065a 100644 --- a/templates/scripts/run_node_exporter.sh.tpl +++ b/templates/scripts/run_node_exporter.sh.tpl @@ -20,5 +20,6 @@ exec bin/node_exporter/node_exporter \ --collector.mountstats \ --collector.meminfo_numa \ --collector.interrupts \ + --collector.buddyinfo \ --collector.vmstat.fields="^.*" \ --log.level="info" From ffa217fc481f3170800a9ae13678066d873c17c5 Mon Sep 17 00:00:00 2001 From: 9547 Date: Tue, 17 Nov 2020 13:54:42 +0800 Subject: [PATCH 09/14] Feature/add log dir (#908) --- cmd/env.go | 1 + components/cluster/command/root.go | 2 +- components/dm/command/root.go | 2 +- pkg/localdata/constant.go | 3 +++ pkg/logger/debug.go | 21 ++++++++++++--------- tests/tiup-cluster/script/scale_tools.sh | 4 ++++ tests/tiup-dm/test_cmd.sh | 4 ++++ 7 files changed, 26 insertions(+), 11 deletions(-) diff --git a/cmd/env.go b/cmd/env.go index abfe241151..efc37b548f 100644 --- a/cmd/env.go +++ b/cmd/env.go @@ -28,6 +28,7 @@ var envList = []string{ localdata.EnvNameSSHPassPrompt, localdata.EnvNameSSHPath, localdata.EnvNameSCPPath, + localdata.EnvNameLogPath, } func newEnvCmd() *cobra.Command { diff --git a/components/cluster/command/root.go b/components/cluster/command/root.go index 41a1377466..cd516390d7 100644 --- a/components/cluster/command/root.go +++ b/components/cluster/command/root.go @@ -298,7 +298,7 @@ func Execute() { } if !errorx.HasTrait(err, errutil.ErrTraitPreCheck) { - logger.OutputDebugLog() + logger.OutputDebugLog("tiup-cluster") } if errx := errorx.Cast(err); errx != nil { diff --git a/components/dm/command/root.go b/components/dm/command/root.go index e994ff432a..34db5b1144 100644 --- a/components/dm/command/root.go +++ b/components/dm/command/root.go @@ -217,7 +217,7 @@ func Execute() { } if !errorx.HasTrait(err, errutil.ErrTraitPreCheck) { - logger.OutputDebugLog() + logger.OutputDebugLog("tiup-dm") } if errx := errorx.Cast(err); errx != nil { diff --git a/pkg/localdata/constant.go b/pkg/localdata/constant.go index f42d4cec15..c61ac3bbbf 100644 --- a/pkg/localdata/constant.go +++ b/pkg/localdata/constant.go @@ -81,6 +81,9 @@ const ( // EnvNameKeepSourceTarget is the variable name by which user can keep the source target or not EnvNameKeepSourceTarget = "TIUP_KEEP_SOURCE_TARGET" + // EnvNameLogPath is the variable name by which user can write the log files into + EnvNameLogPath = "TIUP_LOG_PATH" + // MetaFilename represents the process meta file name MetaFilename = "tiup_process_meta" ) diff --git a/pkg/logger/debug.go b/pkg/logger/debug.go index 6c358fc3b4..32ccdcf50d 100644 --- a/pkg/logger/debug.go +++ b/pkg/logger/debug.go @@ -22,6 +22,7 @@ import ( "time" "github.com/pingcap/tiup/pkg/colorutil" + "github.com/pingcap/tiup/pkg/localdata" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -35,20 +36,22 @@ func newDebugLogCore() zapcore.Core { } // OutputDebugLog outputs debug log in the current working directory. -func OutputDebugLog() { - if err := os.MkdirAll("./logs", 0755); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "\nCreate debug logs directory failed %v.\n", err) +func OutputDebugLog(prefix string) { + logDir := os.Getenv(localdata.EnvNameLogPath) + if logDir == "" { + profile := localdata.InitProfile() + logDir = profile.Path("logs") + } + if err := os.MkdirAll(logDir, 0755); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "\nCreate debug logs(%s) directory failed %v.\n", logDir, err) return } // FIXME: Stupid go does not allow writing fraction seconds without a leading dot. - fileName := time.Now().Format("./logs/tiup-cluster-debug-2006-01-02-15-04-05.log") - filePath, err := filepath.Abs(fileName) - if err != nil { - filePath = fileName - } + fileName := time.Now().Format(fmt.Sprintf("%s-debug-2006-01-02-15-04-05.log", prefix)) + filePath := filepath.Join(logDir, fileName) - err = ioutil.WriteFile(filePath, debugBuffer.Bytes(), 0644) + err := ioutil.WriteFile(filePath, debugBuffer.Bytes(), 0644) if err != nil { _, _ = colorutil.ColorWarningMsg.Fprint(os.Stderr, "\nWarn: Failed to write error debug log.\n") } else { diff --git a/tests/tiup-cluster/script/scale_tools.sh b/tests/tiup-cluster/script/scale_tools.sh index 80080382d5..844fe16e5a 100755 --- a/tests/tiup-cluster/script/scale_tools.sh +++ b/tests/tiup-cluster/script/scale_tools.sh @@ -100,4 +100,8 @@ function scale_tools() { fi tiup-cluster $client _test $name writable + + # test cluster log dir + tiup-cluster notfound-command 2>&1 | grep $HOME/.tiup/logs/tiup-cluster-debug + TIUP_LOG_PATH=/tmp/a/b tiup-cluster notfound-command 2>&1 | grep /tmp/a/b/tiup-cluster-debug } diff --git a/tests/tiup-dm/test_cmd.sh b/tests/tiup-dm/test_cmd.sh index 2398895dad..f2fb070ace 100755 --- a/tests/tiup-dm/test_cmd.sh +++ b/tests/tiup-dm/test_cmd.sh @@ -73,3 +73,7 @@ yes | tiup-dm scale-out $name $topo_worker ./script/task/run.sh tiup-dm --yes destroy $name + +# test dm log dir +tiup-dm notfound-command 2>&1 | grep $HOME/.tiup/logs/tiup-dm-debug +TIUP_LOG_PATH=/tmp/a/b tiup-dm notfound-command 2>&1 | grep /tmp/a/b/tiup-dm-debug From bd9a03a700a9b74c40262c397c2f0cc06045d1f7 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Tue, 17 Nov 2020 16:03:28 +0800 Subject: [PATCH 10/14] Fix imported pump & drainer start script (#903) --- components/playground/playground.go | 8 ++--- pkg/cluster/api/binlog.go | 51 ++++++++++++++++++++++++---- pkg/cluster/embed/autogen_pkger.go | 4 +-- pkg/cluster/manager.go | 2 +- pkg/cluster/operation/scale_in.go | 6 ++-- pkg/cluster/spec/drainer.go | 7 +++- pkg/cluster/spec/pump.go | 7 +++- templates/scripts/run_drainer.sh.tpl | 2 ++ templates/scripts/run_pump.sh.tpl | 2 ++ 9 files changed, 71 insertions(+), 18 deletions(-) diff --git a/components/playground/playground.go b/components/playground/playground.go index 486b3495a4..4b478a0a56 100644 --- a/components/playground/playground.go +++ b/components/playground/playground.go @@ -176,7 +176,7 @@ func (p *Playground) removePumpWhenTombstone(c *api.BinlogClient, inst *instance defer logIfErr(p.renderSDFile()) for { - tombstone, err := c.IsPumpTombstone(inst.NodeID()) + tombstone, err := c.IsPumpTombstone(inst.Addr()) if err != nil { fmt.Println(err) } @@ -199,7 +199,7 @@ func (p *Playground) removeDrainerWhenTombstone(c *api.BinlogClient, inst *insta defer logIfErr(p.renderSDFile()) for { - tombstone, err := c.IsDrainerTombstone(inst.NodeID()) + tombstone, err := c.IsDrainerTombstone(inst.Addr()) if err != nil { fmt.Println(err) } @@ -325,7 +325,7 @@ func (p *Playground) handleScaleIn(w io.Writer, pid int) error { if err != nil { return errors.AddStack(err) } - err = c.OfflinePump(inst.Addr(), inst.NodeID()) + err = c.OfflinePump(inst.Addr()) if err != nil { return errors.AddStack(err) } @@ -344,7 +344,7 @@ func (p *Playground) handleScaleIn(w io.Writer, pid int) error { if err != nil { return errors.AddStack(err) } - err = c.OfflineDrainer(inst.Addr(), inst.NodeID()) + err = c.OfflineDrainer(inst.Addr()) if err != nil { return errors.AddStack(err) } diff --git a/pkg/cluster/api/binlog.go b/pkg/cluster/api/binlog.go index 83fffa5a6e..ee57066097 100644 --- a/pkg/cluster/api/binlog.go +++ b/pkg/cluster/api/binlog.go @@ -84,12 +84,20 @@ type NodeStatus struct { } // IsPumpTombstone check if drainer is tombstone. -func (c *BinlogClient) IsPumpTombstone(nodeID string) (bool, error) { +func (c *BinlogClient) IsPumpTombstone(addr string) (bool, error) { + nodeID, err := c.nodeID(addr, "pumps") + if err != nil { + return false, err + } return c.isTombstone("pumps", nodeID) } // IsDrainerTombstone check if drainer is tombstone. -func (c *BinlogClient) IsDrainerTombstone(nodeID string) (bool, error) { +func (c *BinlogClient) IsDrainerTombstone(addr string) (bool, error) { + nodeID, err := c.nodeID(addr, "drainers") + if err != nil { + return false, err + } return c.isTombstone("drainer", nodeID) } @@ -121,13 +129,36 @@ func (c *BinlogClient) drainerNodeStatus() (status []*NodeStatus, err error) { return c.nodeStatus("drainers") } +func (c *BinlogClient) nodeID(addr, ty string) (string, error) { + nodes, err := c.nodeStatus(ty) + if err != nil { + return "", err + } + + for _, node := range nodes { + if addr == node.Addr { + return node.NodeID, nil + } + } + + return "", errors.Errorf("pump node id for address %s not found", addr) +} + // UpdateDrainerState update the specify state as the specified state. -func (c *BinlogClient) UpdateDrainerState(nodeID string, state string) error { +func (c *BinlogClient) UpdateDrainerState(addr string, state string) error { + nodeID, err := c.nodeID(addr, "drainers") + if err != nil { + return err + } return c.updateStatus("drainers", nodeID, state) } // UpdatePumpState update the specify state as the specified state. -func (c *BinlogClient) UpdatePumpState(nodeID string, state string) error { +func (c *BinlogClient) UpdatePumpState(addr string, state string) error { + nodeID, err := c.nodeID(addr, "pumps") + if err != nil { + return err + } return c.updateStatus("pumps", nodeID, state) } @@ -228,11 +259,19 @@ func (c *BinlogClient) offline(addr string, nodeID string) error { } // OfflinePump offline a pump. -func (c *BinlogClient) OfflinePump(addr string, nodeID string) error { +func (c *BinlogClient) OfflinePump(addr string) error { + nodeID, err := c.nodeID(addr, "pumps") + if err != nil { + return err + } return c.offline(addr, nodeID) } // OfflineDrainer offline a drainer. -func (c *BinlogClient) OfflineDrainer(addr string, nodeID string) error { +func (c *BinlogClient) OfflineDrainer(addr string) error { + nodeID, err := c.nodeID(addr, "drainers") + if err != nil { + return err + } return c.offline(addr, nodeID) } diff --git a/pkg/cluster/embed/autogen_pkger.go b/pkg/cluster/embed/autogen_pkger.go index 60af7d427a..c5cc56aacb 100644 --- a/pkg/cluster/embed/autogen_pkger.go +++ b/pkg/cluster/embed/autogen_pkger.go @@ -31,13 +31,13 @@ func init() { autogenFiles["/templates/scripts/run_dm-master.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKe3stIGRlZmluZSAiTWFzdGVyTGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkbWFzdGVyIDo9IC59fQogICAge3stIGlmIGVxICRpZHggMH19CiAgICAgIHt7LSAkbWFzdGVyLk5hbWV9fT17eyRtYXN0ZXIuU2NoZW1lfX06Ly97eyRtYXN0ZXIuSVB9fTp7eyRtYXN0ZXIuUGVlclBvcnR9fQogICAge3stIGVsc2UgLX19CiAgICAgICx7ey0gJG1hc3Rlci5OYW1lfX09e3skbWFzdGVyLlNjaGVtZX19Oi8ve3skbWFzdGVyLklQfX06e3skbWFzdGVyLlBlZXJQb3J0fX0KICAgIHt7LSBlbmR9fQogIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9kbS1tYXN0ZXIvZG0tbWFzdGVyIFwKe3stIGVsc2V9fQpleGVjIGJpbi9kbS1tYXN0ZXIvZG0tbWFzdGVyIFwKe3stIGVuZH19Cnt7LSBpZiAuVjFTb3VyY2VQYXRofX0KICAgIC0tdjEtc291cmNlcy1wYXRoPSJ7ey5WMVNvdXJjZVBhdGh9fSIgXAp7ey0gZW5kfX0KICAgIC0tbmFtZT0ie3suTmFtZX19IiBcCiAgICAtLW1hc3Rlci1hZGRyPSIwLjAuMC4wOnt7LlBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtYWRkcj0ie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGVlci11cmxzPSJ7ey5JUH19Ont7LlBlZXJQb3J0fX0iIFwKICAgIC0tYWR2ZXJ0aXNlLXBlZXItdXJscz0ie3suSVB9fTp7ey5QZWVyUG9ydH19IiBcCiAgICAtLWxvZy1maWxlPSJ7ey5Mb2dEaXJ9fS9kbS1tYXN0ZXIubG9nIiBcCiAgICAtLWRhdGEtZGlyPSJ7ey5EYXRhRGlyfX0iIFwKICAgIC0taW5pdGlhbC1jbHVzdGVyPSJ7e3RlbXBsYXRlICJNYXN0ZXJMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tY29uZmlnPWNvbmYvZG0tbWFzdGVyLnRvbWwgPj4gInt7LkxvZ0Rpcn19L2RtLW1hc3Rlcl9zdGRvdXQubG9nIiAyPj4gInt7LkxvZ0Rpcn19L2RtLW1hc3Rlcl9zdGRlcnIubG9nIgo=" autogenFiles["/templates/scripts/run_dm-master_scale.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKe3stIGRlZmluZSAiTWFzdGVyTGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkbWFzdGVyIDo9IC59fQogICAge3stIGlmIGVxICRpZHggMH19CiAgICAgIHt7LSAkbWFzdGVyLklQfX06e3skbWFzdGVyLlBvcnR9fQogICAge3stIGVsc2UgLX19CiAgICAgICx7ey0gJG1hc3Rlci5JUH19Ont7JG1hc3Rlci5Qb3J0fX0KICAgIHt7LSBlbmR9fQogIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9kbS1tYXN0ZXIvZG0tbWFzdGVyIFwKe3stIGVsc2V9fQpleGVjIGJpbi9kbS1tYXN0ZXIvZG0tbWFzdGVyIFwKe3stIGVuZH19CiAgICAtLW5hbWU9Int7Lk5hbWV9fSIgXAogICAgLS1tYXN0ZXItYWRkcj0iMC4wLjAuMDp7ey5Qb3J0fX0iIFwKICAgIC0tYWR2ZXJ0aXNlLWFkZHI9Int7LklQfX06e3suUG9ydH19IiBcCiAgICAtLXBlZXItdXJscz0ie3suU2NoZW1lfX06Ly97ey5JUH19Ont7LlBlZXJQb3J0fX0iIFwKICAgIC0tYWR2ZXJ0aXNlLXBlZXItdXJscz0ie3suU2NoZW1lfX06Ly97ey5JUH19Ont7LlBlZXJQb3J0fX0iIFwKICAgIC0tbG9nLWZpbGU9Int7LkxvZ0Rpcn19L2RtLW1hc3Rlci5sb2ciIFwKICAgIC0tZGF0YS1kaXI9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1qb2luPSJ7e3RlbXBsYXRlICJNYXN0ZXJMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tY29uZmlnPWNvbmYvZG0tbWFzdGVyLnRvbWwgPj4gInt7LkxvZ0Rpcn19L2RtLW1hc3Rlcl9zdGRvdXQubG9nIiAyPj4gInt7LkxvZ0Rpcn19L2RtLW1hc3Rlcl9zdGRlcnIubG9nIgo=" autogenFiles["/templates/scripts/run_dm-worker.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIk1hc3Rlckxpc3QifX0KICB7ey0gcmFuZ2UgJGlkeCwgJG1hc3RlciA6PSAufX0KICAgIHt7LSBpZiBlcSAkaWR4IDB9fQogICAgICB7ey0gJG1hc3Rlci5JUH19Ont7JG1hc3Rlci5Qb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3skbWFzdGVyLklQfX06e3skbWFzdGVyLlBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2RtLXdvcmtlci9kbS13b3JrZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2RtLXdvcmtlci9kbS13b3JrZXIgXAp7ey0gZW5kfX0KICAgIC0tbmFtZT0ie3suTmFtZX19IiBcCiAgICAtLXdvcmtlci1hZGRyPSIwLjAuMC4wOnt7LlBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtYWRkcj0ie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tbG9nLWZpbGU9Int7LkxvZ0Rpcn19L2RtLXdvcmtlci5sb2ciIFwKICAgIC0tam9pbj0ie3t0ZW1wbGF0ZSAiTWFzdGVyTGlzdCIgLkVuZHBvaW50c319IiBcCiAgICAtLWNvbmZpZz1jb25mL2RtLXdvcmtlci50b21sID4+ICJ7ey5Mb2dEaXJ9fS9kbS13b3JrZXJfc3Rkb3V0LmxvZyIgMj4+ICJ7ey5Mb2dEaXJ9fS9kbS13b3JrZXJfc3RkZXJyLmxvZyIK" - autogenFiles["/templates/scripts/run_drainer.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2RyYWluZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2RyYWluZXIgXAp7ey0gZW5kfX0KICAgIC0tbm9kZS1pZD0ie3suTm9kZUlEfX0iIFwKICAgIC0tYWRkcj0ie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQtdXJscz0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tZGF0YS1kaXI9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1sb2ctZmlsZT0ie3suTG9nRGlyfX0vZHJhaW5lci5sb2ciIFwKICAgIC0tY29uZmlnPWNvbmYvZHJhaW5lci50b21sIFwKICAgIC0taW5pdGlhbC1jb21taXQtdHM9Int7LkNvbW1pdFRzfX0iIDI+PiAie3suTG9nRGlyfX0vZHJhaW5lcl9zdGRlcnIubG9nIgo=" + autogenFiles["/templates/scripts/run_drainer.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL2RyYWluZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2RyYWluZXIgXAp7ey0gZW5kfX0Ke3stIGlmIC5Ob2RlSUR9fQogICAgLS1ub2RlLWlkPSJ7ey5Ob2RlSUR9fSIgXAp7ey0gZW5kfX0KICAgIC0tYWRkcj0ie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQtdXJscz0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tZGF0YS1kaXI9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1sb2ctZmlsZT0ie3suTG9nRGlyfX0vZHJhaW5lci5sb2ciIFwKICAgIC0tY29uZmlnPWNvbmYvZHJhaW5lci50b21sIFwKICAgIC0taW5pdGlhbC1jb21taXQtdHM9Int7LkNvbW1pdFRzfX0iIDI+PiAie3suTG9nRGlyfX0vZHJhaW5lcl9zdGRlcnIubG9nIgo=" autogenFiles["/templates/scripts/run_grafana.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKTEFORz1lbl9VUy5VVEYtOCBcCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9iaW4vZ3JhZmFuYS1zZXJ2ZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL2Jpbi9ncmFmYW5hLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1ob21lcGF0aD0ie3suRGVwbG95RGlyfX0vYmluIiBcCiAgICAtLWNvbmZpZz0ie3suRGVwbG95RGlyfX0vY29uZi9ncmFmYW5hLmluaSIK" autogenFiles["/templates/scripts/run_node_exporter.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KY2QgIiR7REVQTE9ZX0RJUn0iIHx8IGV4aXQgMQoKZXhlYyA+ID4odGVlIC1pIC1hICJ7ey5Mb2dEaXJ9fS9ub2RlX2V4cG9ydGVyLmxvZyIpCmV4ZWMgMj4mMQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL25vZGVfZXhwb3J0ZXIvbm9kZV9leHBvcnRlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vbm9kZV9leHBvcnRlci9ub2RlX2V4cG9ydGVyIFwKe3stIGVuZH19CiAgICAtLXdlYi5saXN0ZW4tYWRkcmVzcz0iOnt7LlBvcnR9fSIgXAogICAgLS1jb2xsZWN0b3IudGNwc3RhdCBcCiAgICAtLWNvbGxlY3Rvci5zeXN0ZW1kIFwKICAgIC0tY29sbGVjdG9yLm1vdW50c3RhdHMgXAogICAgLS1jb2xsZWN0b3IubWVtaW5mb19udW1hIFwKICAgIC0tY29sbGVjdG9yLmludGVycnVwdHMgXAogICAgLS1jb2xsZWN0b3IuYnVkZHlpbmZvIFwKICAgIC0tY29sbGVjdG9yLnZtc3RhdC5maWVsZHM9Il4uKiIgXAogICAgLS1sb2cubGV2ZWw9ImluZm8iCg==" autogenFiles["/templates/scripts/run_pd.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5OYW1lfX09e3skcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLlBlZXJQb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3stICRwZC5OYW1lfX09e3skcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLlBlZXJQb3J0fX0KICAgIHt7LSBlbmR9fQogIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9wZC1zZXJ2ZXIgXAp7ey0gZWxzZX19CmV4ZWMgYmluL3BkLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLS1uYW1lPSJ7ey5OYW1lfX0iIFwKICAgIC0tY2xpZW50LXVybHM9Int7LlNjaGVtZX19Oi8ve3suTGlzdGVuSG9zdH19Ont7LkNsaWVudFBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtY2xpZW50LXVybHM9Int7LlNjaGVtZX19Oi8ve3suSVB9fTp7ey5DbGllbnRQb3J0fX0iIFwKICAgIC0tcGVlci11cmxzPSJ7ey5TY2hlbWV9fTovL3t7LklQfX06e3suUGVlclBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtcGVlci11cmxzPSJ7ey5TY2hlbWV9fTovL3t7LklQfX06e3suUGVlclBvcnR9fSIgXAogICAgLS1kYXRhLWRpcj0ie3suRGF0YURpcn19IiBcCiAgICAtLWluaXRpYWwtY2x1c3Rlcj0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tY29uZmlnPWNvbmYvcGQudG9tbCBcCiAgICAtLWxvZy1maWxlPSJ7ey5Mb2dEaXJ9fS9wZC5sb2ciIDI+PiAie3suTG9nRGlyfX0vcGRfc3RkZXJyLmxvZyIKICAK" autogenFiles["/templates/scripts/run_pd_scale.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL3BkLXNlcnZlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vcGQtc2VydmVyIFwKe3stIGVuZH19CiAgICAtLW5hbWU9Int7Lk5hbWV9fSIgXAogICAgLS1jbGllbnQtdXJscz0ie3suU2NoZW1lfX06Ly97ey5MaXN0ZW5Ib3N0fX06e3suQ2xpZW50UG9ydH19IiBcCiAgICAtLWFkdmVydGlzZS1jbGllbnQtdXJscz0ie3suU2NoZW1lfX06Ly97ey5JUH19Ont7LkNsaWVudFBvcnR9fSIgXAogICAgLS1wZWVyLXVybHM9Int7LlNjaGVtZX19Oi8ve3suSVB9fTp7ey5QZWVyUG9ydH19IiBcCiAgICAtLWFkdmVydGlzZS1wZWVyLXVybHM9Int7LlNjaGVtZX19Oi8ve3suSVB9fTp7ey5QZWVyUG9ydH19IiBcCiAgICAtLWRhdGEtZGlyPSJ7ey5EYXRhRGlyfX0iIFwKICAgIC0tam9pbj0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tY29uZmlnPWNvbmYvcGQudG9tbCBcCiAgICAtLWxvZy1maWxlPSJ7ey5Mb2dEaXJ9fS9wZC5sb2ciIDI+PiAie3suTG9nRGlyfX0vcGRfc3RkZXJyLmxvZyIKICAK" autogenFiles["/templates/scripts/run_prometheus.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgpERVBMT1lfRElSPXt7LkRlcGxveURpcn19CmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCiMgV0FSTklORzogVGhpcyBmaWxlIHdhcyBhdXRvLWdlbmVyYXRlZC4gRG8gbm90IGVkaXQhCiMgICAgICAgICAgQWxsIHlvdXIgZWRpdCBtaWdodCBiZSBvdmVyd3JpdHRlbiEKCmV4ZWMgPiA+KHRlZSAtaSAtYSAie3suTG9nRGlyfX0vcHJvbWV0aGV1cy5sb2ciKQpleGVjIDI+JjEKCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi9wcm9tZXRoZXVzL3Byb21ldGhldXMgXAp7ey0gZWxzZX19CmV4ZWMgYmluL3Byb21ldGhldXMvcHJvbWV0aGV1cyBcCnt7LSBlbmR9fQogICAgLS1jb25maWcuZmlsZT0ie3suRGVwbG95RGlyfX0vY29uZi9wcm9tZXRoZXVzLnltbCIgXAogICAgLS13ZWIubGlzdGVuLWFkZHJlc3M9Ijp7ey5Qb3J0fX0iIFwKICAgIC0td2ViLmV4dGVybmFsLXVybD0iaHR0cDovL3t7LklQfX06e3suUG9ydH19LyIgXAogICAgLS13ZWIuZW5hYmxlLWFkbWluLWFwaSBcCiAgICAtLWxvZy5sZXZlbD0iaW5mbyIgXAogICAgLS1zdG9yYWdlLnRzZGIucGF0aD0ie3suRGF0YURpcn19IiBcCiAgICAtLXN0b3JhZ2UudHNkYi5yZXRlbnRpb249Int7LlJldGVudGlvbn19Igo=" - autogenFiles["/templates/scripts/run_pump.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL3B1bXAgXAp7ey0gZWxzZX19CmV4ZWMgYmluL3B1bXAgXAp7ey0gZW5kfX0KICAgIC0tbm9kZS1pZD0ie3suTm9kZUlEfX0iIFwKICAgIC0tYWRkcj0iMC4wLjAuMDp7ey5Qb3J0fX0iIFwKICAgIC0tYWR2ZXJ0aXNlLWFkZHI9Int7Lkhvc3R9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQtdXJscz0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tZGF0YS1kaXI9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1sb2ctZmlsZT0ie3suTG9nRGlyfX0vcHVtcC5sb2ciIFwKICAgIC0tY29uZmlnPWNvbmYvcHVtcC50b21sIDI+PiAie3suTG9nRGlyfX0vcHVtcF9zdGRlcnIubG9nIgo=" + autogenFiles["/templates/scripts/run_pump.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5TY2hlbWV9fTovL3t7JHBkLklQfX06e3skcGQuQ2xpZW50UG9ydH19CiAgICB7ey0gZWxzZSAtfX0KICAgICAgLHt7LSAkcGQuU2NoZW1lfX06Ly97eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gYmluL3B1bXAgXAp7ey0gZWxzZX19CmV4ZWMgYmluL3B1bXAgXAp7ey0gZW5kfX0Ke3stIGlmIC5Ob2RlSUR9fQogICAgLS1ub2RlLWlkPSJ7ey5Ob2RlSUR9fSIgXAp7ey0gZW5kfX0KICAgIC0tYWRkcj0iMC4wLjAuMDp7ey5Qb3J0fX0iIFwKICAgIC0tYWR2ZXJ0aXNlLWFkZHI9Int7Lkhvc3R9fTp7ey5Qb3J0fX0iIFwKICAgIC0tcGQtdXJscz0ie3t0ZW1wbGF0ZSAiUERMaXN0IiAuRW5kcG9pbnRzfX0iIFwKICAgIC0tZGF0YS1kaXI9Int7LkRhdGFEaXJ9fSIgXAogICAgLS1sb2ctZmlsZT0ie3suTG9nRGlyfX0vcHVtcC5sb2ciIFwKICAgIC0tY29uZmlnPWNvbmYvcHVtcC50b21sIDI+PiAie3suTG9nRGlyfX0vcHVtcF9zdGRlcnIubG9nIgo=" autogenFiles["/templates/scripts/run_tidb.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCkRFUExPWV9ESVI9e3suRGVwbG95RGlyfX0KCmNkICIke0RFUExPWV9ESVJ9IiB8fCBleGl0IDEKCnt7LSBkZWZpbmUgIlBETGlzdCJ9fQogIHt7LSByYW5nZSAkaWR4LCAkcGQgOj0gLn19CiAgICB7ey0gaWYgZXEgJGlkeCAwfX0KICAgICAge3stICRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVsc2UgLX19CiAgICAgICx7eyRwZC5JUH19Ont7JHBkLkNsaWVudFBvcnR9fQogICAge3stIGVuZH19CiAge3stIGVuZH19Cnt7LSBlbmR9fQoKe3stIGlmIC5OdW1hTm9kZX19CmV4ZWMgbnVtYWN0bCAtLWNwdW5vZGViaW5kPXt7Lk51bWFOb2RlfX0gLS1tZW1iaW5kPXt7Lk51bWFOb2RlfX0gZW52IEdPREVCVUc9bWFkdmRvbnRuZWVkPTEgYmluL3RpZGItc2VydmVyIFwKe3stIGVsc2V9fQpleGVjIGVudiBHT0RFQlVHPW1hZHZkb250bmVlZD0xIGJpbi90aWRiLXNlcnZlciBcCnt7LSBlbmR9fQogICAgLVAge3suUG9ydH19IFwKICAgIC0tc3RhdHVzPSJ7ey5TdGF0dXNQb3J0fX0iIFwKICAgIC0taG9zdD0ie3suTGlzdGVuSG9zdH19IiBcCiAgICAtLWFkdmVydGlzZS1hZGRyZXNzPSJ7ey5JUH19IiBcCiAgICAtLXN0b3JlPSJ0aWt2IiBcCiAgICAtLXBhdGg9Int7dGVtcGxhdGUgIlBETGlzdCIgLkVuZHBvaW50c319IiBcCiAgICAtLWxvZy1zbG93LXF1ZXJ5PSJsb2cvdGlkYl9zbG93X3F1ZXJ5LmxvZyIgXAogICAgLS1jb25maWc9Y29uZi90aWRiLnRvbWwgXAogICAgLS1sb2ctZmlsZT0ie3suTG9nRGlyfX0vdGlkYi5sb2ciIDI+PiAie3suTG9nRGlyfX0vdGlkYl9zdGRlcnIubG9nIgo=" autogenFiles["/templates/scripts/run_tiflash.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCmNkICJ7ey5EZXBsb3lEaXJ9fSIgfHwgZXhpdCAxCgpleHBvcnQgUlVTVF9CQUNLVFJBQ0U9MQoKZXhwb3J0IFRaPSR7VFo6LS9ldGMvbG9jYWx0aW1lfQpleHBvcnQgTERfTElCUkFSWV9QQVRIPXt7LkRlcGxveURpcn19L2Jpbi90aWZsYXNoOiRMRF9MSUJSQVJZX1BBVEgKCmVjaG8gLW4gJ3N5bmMgLi4uICcKc3RhdD0kKHRpbWUgc3luYykKZWNobyBvawplY2hvICRzdGF0Cgp7ey0gaWYgLk51bWFOb2RlfX0KZXhlYyBudW1hY3RsIC0tY3B1bm9kZWJpbmQ9e3suTnVtYU5vZGV9fSAtLW1lbWJpbmQ9e3suTnVtYU5vZGV9fSAgXAp7ey0gZWxzZX19CmV4ZWMgXAp7ey0gZW5kfX0KICAgIGJpbi90aWZsYXNoL3RpZmxhc2ggc2VydmVyIC0tY29uZmlnLWZpbGUgY29uZi90aWZsYXNoLnRvbWw=" autogenFiles["/templates/scripts/run_tikv.sh.tpl"] = "IyEvYmluL2Jhc2gKc2V0IC1lCgojIFdBUk5JTkc6IFRoaXMgZmlsZSB3YXMgYXV0by1nZW5lcmF0ZWQuIERvIG5vdCBlZGl0IQojICAgICAgICAgIEFsbCB5b3VyIGVkaXQgbWlnaHQgYmUgb3ZlcndyaXR0ZW4hCmNkICJ7ey5EZXBsb3lEaXJ9fSIgfHwgZXhpdCAxCgplY2hvIC1uICdzeW5jIC4uLiAnCnN0YXQ9JCh0aW1lIHN5bmMgfHwgc3luYykKZWNobyBvawplY2hvICRzdGF0Cgp7ey0gZGVmaW5lICJQRExpc3QifX0KICB7ey0gcmFuZ2UgJGlkeCwgJHBkIDo9IC59fQogICAge3stIGlmIGVxICRpZHggMH19CiAgICAgIHt7LSAkcGQuSVB9fTp7eyRwZC5DbGllbnRQb3J0fX0KICAgIHt7LSBlbHNlIC19fQogICAgICAse3skcGQuSVB9fTp7eyRwZC5DbGllbnRQb3J0fX0KICAgIHt7LSBlbmR9fQogIHt7LSBlbmR9fQp7ey0gZW5kfX0KCnt7LSBpZiAuTnVtYU5vZGV9fQpleGVjIG51bWFjdGwgLS1jcHVub2RlYmluZD17ey5OdW1hTm9kZX19IC0tbWVtYmluZD17ey5OdW1hTm9kZX19IGJpbi90aWt2LXNlcnZlciBcCnt7LSBlbHNlfX0KZXhlYyBiaW4vdGlrdi1zZXJ2ZXIgXAp7ey0gZW5kfX0KICAgIC0tYWRkciAie3suTGlzdGVuSG9zdH19Ont7LlBvcnR9fSIgXAogICAgLS1hZHZlcnRpc2UtYWRkciAie3suSVB9fTp7ey5Qb3J0fX0iIFwKICAgIC0tc3RhdHVzLWFkZHIgInt7Lkxpc3Rlbkhvc3R9fTp7ey5TdGF0dXNQb3J0fX0iIFwKICAgIC0tcGQgInt7dGVtcGxhdGUgIlBETGlzdCIgLkVuZHBvaW50c319IiBcCiAgICAtLWRhdGEtZGlyICJ7ey5EYXRhRGlyfX0iIFwKICAgIC0tY29uZmlnIGNvbmYvdGlrdi50b21sIFwKICAgIC0tbG9nLWZpbGUgInt7LkxvZ0Rpcn19L3Rpa3YubG9nIiAyPj4gInt7LkxvZ0Rpcn19L3Rpa3Zfc3RkZXJyLmxvZyIK" diff --git a/pkg/cluster/manager.go b/pkg/cluster/manager.go index 9e950b579a..ee66582c0c 100644 --- a/pkg/cluster/manager.go +++ b/pkg/cluster/manager.go @@ -611,7 +611,7 @@ func (m *Manager) Display(clusterName string, opt operator.Options) error { // Check if there is some instance in tombstone state nodes, _ := operator.DestroyTombstone(ctx, t, true /* returnNodesOnly */, opt, tlsCfg) if len(nodes) != 0 { - color.Green("There are some nodes in state: `Tombstone`\n\tNodes: %+v\n\tYou can destroy them with the command: `tiup cluster prune %s`", nodes, clusterName) + color.Green("There are some nodes can be pruned: \n\tNodes: %+v\n\tYou can destroy them with the command: `tiup cluster prune %s`", nodes, clusterName) } } diff --git a/pkg/cluster/operation/scale_in.go b/pkg/cluster/operation/scale_in.go index b084bb4f4c..94d93cfa7d 100644 --- a/pkg/cluster/operation/scale_in.go +++ b/pkg/cluster/operation/scale_in.go @@ -315,13 +315,13 @@ func deleteMember( } case spec.ComponentDrainer: addr := instance.GetHost() + ":" + strconv.Itoa(instance.GetPort()) - err := binlogClient.OfflineDrainer(addr, addr) + err := binlogClient.OfflineDrainer(addr) if err != nil { - return errors.AddStack(err) + return err } case spec.ComponentPump: addr := instance.GetHost() + ":" + strconv.Itoa(instance.GetPort()) - err := binlogClient.OfflinePump(addr, addr) + err := binlogClient.OfflinePump(addr) if err != nil { return errors.AddStack(err) } diff --git a/pkg/cluster/spec/drainer.go b/pkg/cluster/spec/drainer.go index dc22666ab9..ea2a50cb8f 100644 --- a/pkg/cluster/spec/drainer.go +++ b/pkg/cluster/spec/drainer.go @@ -147,8 +147,13 @@ func (i *DrainerInstance) InitConfig( enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(DrainerSpec) + nodeID := i.GetHost() + ":" + strconv.Itoa(i.GetPort()) + // keep origin node id if is imported + if i.IsImported() { + nodeID = "" + } cfg := scripts.NewDrainerScript( - i.GetHost()+":"+strconv.Itoa(i.GetPort()), + nodeID, i.GetHost(), paths.Deploy, paths.Data[0], diff --git a/pkg/cluster/spec/pump.go b/pkg/cluster/spec/pump.go index 0a7e82cffc..dc87818f6b 100644 --- a/pkg/cluster/spec/pump.go +++ b/pkg/cluster/spec/pump.go @@ -146,8 +146,13 @@ func (i *PumpInstance) InitConfig( enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(PumpSpec) + nodeID := i.GetHost() + ":" + strconv.Itoa(i.GetPort()) + // keep origin node id if is imported + if i.IsImported() { + nodeID = "" + } cfg := scripts.NewPumpScript( - i.GetHost()+":"+strconv.Itoa(i.GetPort()), + nodeID, i.GetHost(), paths.Deploy, paths.Data[0], diff --git a/templates/scripts/run_drainer.sh.tpl b/templates/scripts/run_drainer.sh.tpl index 0d5a6b2993..2b5b7fca65 100644 --- a/templates/scripts/run_drainer.sh.tpl +++ b/templates/scripts/run_drainer.sh.tpl @@ -22,7 +22,9 @@ exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/drainer \ {{- else}} exec bin/drainer \ {{- end}} +{{- if .NodeID}} --node-id="{{.NodeID}}" \ +{{- end}} --addr="{{.IP}}:{{.Port}}" \ --pd-urls="{{template "PDList" .Endpoints}}" \ --data-dir="{{.DataDir}}" \ diff --git a/templates/scripts/run_pump.sh.tpl b/templates/scripts/run_pump.sh.tpl index 3109d524bf..323cb141e4 100644 --- a/templates/scripts/run_pump.sh.tpl +++ b/templates/scripts/run_pump.sh.tpl @@ -22,7 +22,9 @@ exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/pump \ {{- else}} exec bin/pump \ {{- end}} +{{- if .NodeID}} --node-id="{{.NodeID}}" \ +{{- end}} --addr="0.0.0.0:{{.Port}}" \ --advertise-addr="{{.Host}}:{{.Port}}" \ --pd-urls="{{template "PDList" .Endpoints}}" \ From afcaf6f0e0304d1da29e3031b611a8fcf5cd3b5b Mon Sep 17 00:00:00 2001 From: Allen Zhong Date: Thu, 19 Nov 2020 14:42:53 +0800 Subject: [PATCH 11/14] cluster: ignore no tispark master error when listing and scaling in clusters (#920) --- pkg/cluster/manager.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/cluster/manager.go b/pkg/cluster/manager.go index ee66582c0c..e27869d3eb 100644 --- a/pkg/cluster/manager.go +++ b/pkg/cluster/manager.go @@ -262,7 +262,8 @@ func (m *Manager) ListCluster() error { for _, name := range names { metadata, err := m.meta(name) - if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { + if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && + !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return perrs.Trace(err) } @@ -347,7 +348,9 @@ func (m *Manager) CleanCluster(clusterName string, gOpt operator.Options, cleanO func (m *Manager) DestroyCluster(clusterName string, gOpt operator.Options, destroyOpt operator.Options, skipConfirm bool) error { metadata, err := m.meta(clusterName) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && - !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { + !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) && + !errors.Is(perrs.Cause(err), spec.ErrMultipleTiSparkMaster) && + !errors.Is(perrs.Cause(err), spec.ErrMultipleTisparkWorker) { return perrs.AddStack(err) } @@ -1366,7 +1369,9 @@ func (m *Manager) ScaleIn( } metadata, err := m.meta(clusterName) - if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { + if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && + !errors.Is(perrs.Cause(err), spec.ErrMultipleTiSparkMaster) && + !errors.Is(perrs.Cause(err), spec.ErrMultipleTisparkWorker) { // ignore conflict check error, node may be deployed by former version // that lack of some certain conflict checks return perrs.AddStack(err) From d78a4462cb3cab02dfb6adc4ebc1213c5ab0aae7 Mon Sep 17 00:00:00 2001 From: Oboo Cheng Date: Thu, 19 Nov 2020 15:57:13 +0800 Subject: [PATCH 12/14] fix(install): install script export path (#918) --- install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install.sh b/install.sh index 6165a8e84f..dc4fba837e 100755 --- a/install.sh +++ b/install.sh @@ -68,8 +68,8 @@ echo "Shell profile: ${bold}$PROFILE${sgr0}" case :$PATH: in *:$bin_dir:*) : "PATH already contains $bin_dir" ;; - *) printf 'export PATH=%s:$PATH\n' "$bin_dir" >> "$PROFILE" - echo "$PROFILE has been modified to to add tiup to PATH" + *) printf '\nexport PATH=%s:$PATH\n' "$bin_dir" >> "$PROFILE" + echo "$PROFILE has been modified to add tiup to PATH" echo "open a new terminal or ${bold}source ${PROFILE}${sgr0} to use it" ;; esac From 5116917cdf0cd06dca09fc69d99cef2785749358 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Thu, 19 Nov 2020 19:05:29 +0800 Subject: [PATCH 13/14] Bump v1.2.4 (#921) --- CHANGELOG.md | 23 +++++++++++++++++++++++ pkg/version/version.go | 2 +- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f39a27d19e..7e024a16ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ TiUP Changelog +## [1.2.4] 2020.11.19 + +### Fixes + +- Fix the issue that Pump & Drainer has different node id between tidb-ansible and TiUP ([#903](https://github.com/pingcap/tiup/pull/903), [@lucklove](https://github.com/lucklove)) + - For the cluster imported from tidb-ansible, if the pump or drainer is restarted, it will start with a new node id + - Risk of this issue: binlog may not work correctly after restart pump or drainer +- Fix the issue that audit log may get lost in some special case ([#879](https://github.com/pingcap/tiup/pull/879), [#882](https://github.com/pingcap/tiup/pull/882), [@9547](https://github.com/9547)) + - If the user execute two commands one follows the other, and the second one quit in 1 second, the audit log of the first command will be overwirten by the second one + - Risk caused by this issue: some audit logs may get lost in above case +- Fix the issue that new component deployed with `tiup cluster scale-out` doesn't auto start when rebooting ([#905](https://github.com/pingcap/tiup/pull/905), [@9547](https://github.com/9547)) + - Risk caused by this issue: the cluster may be unavailable after rebooting +- Fix the issue that data directory of tiflash is not deleted if multiple data directories are specified ([#871](https://github.com/pingcap/tiup/pull/871), [@9547](https://github.com/9547)) +- Fix the issue that `node_exporter` and `blackbox_exporter` not cleaned up after scale-in all instances on specified host ([#857](https://github.com/pingcap/tiup/pull/857), [@9547](https://github.com/9547)) +- Fix the issue that the patch command will fail when try to patch dm cluster ([#884](https://github.com/pingcap/tiup/pull/884), [@lucklove](https://github.com/lucklove)) +- Fix the issue that the bench component report `Error 1105: client has multi-statement capability disabled` ([#887](https://github.com/pingcap/tiup/pull/887), [@mahjonp](https://github.com/mahjonp)) +- Fix the issue that the TiSpark node can't be upgraded ([#901](https://github.com/pingcap/tiup/pull/901), [@lucklove](https://github.com/lucklove)) +- Fix the issue that tiup-playground can't start TiFlash with newest nightly PD ([#902](https://github.com/pingcap/tiup/pull/902), [@lucklove](https://github.com/lucklove)) + +### Improvements + +- Ignore no tispark master error when listing clusters since the master node may be remove by `scale-in --force` ([#920](https://github.com/pingcap/tiup/pull/920), [@AstroProfundis](https://github.com/AstroProfundis)) + ## [1.2.3] 2020.10.30 ### Fixes diff --git a/pkg/version/version.go b/pkg/version/version.go index 7fbd607ec5..d6ee940575 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -24,7 +24,7 @@ var ( // TiUPVerMinor is the minor version of TiUP TiUPVerMinor = 2 // TiUPVerPatch is the patch version of TiUP - TiUPVerPatch = 3 + TiUPVerPatch = 4 // TiUPVerName is alternative name of the version TiUPVerName = "tiup" // GitHash is the current git commit hash From d1c4866d4f07998daa4076e7f29d8407075cd0f6 Mon Sep 17 00:00:00 2001 From: Allen Zhong Date: Fri, 20 Nov 2020 18:17:25 +0800 Subject: [PATCH 14/14] cluster: fix tispark master scaling handling (#924) --- pkg/cluster/manager.go | 11 ++++------- pkg/cluster/operation/scale_in.go | 18 ++++++++++++++++++ pkg/cluster/spec/spec_manager.go | 20 ++++++++++++-------- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/pkg/cluster/manager.go b/pkg/cluster/manager.go index e27869d3eb..5ebbc86aa8 100644 --- a/pkg/cluster/manager.go +++ b/pkg/cluster/manager.go @@ -1481,18 +1481,15 @@ func (m *Manager) ScaleOut( sshType executor.SSHType, ) error { metadata, err := m.meta(clusterName) - if err != nil { // not allowing validation errors + // allow specific validation errors so that user can recover a broken + // cluster if it is somehow in a bad state. + if err != nil && + !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return perrs.AddStack(err) } topo := metadata.GetTopology() base := metadata.GetBaseMeta() - - // not allowing validation errors - if err := topo.Validate(); err != nil { - return err - } - // Inherit existing global configuration. We must assign the inherited values before unmarshalling // because some default value rely on the global options and monitored options. newPart := topo.NewPart() diff --git a/pkg/cluster/operation/scale_in.go b/pkg/cluster/operation/scale_in.go index 94d93cfa7d..3b8ee1a171 100644 --- a/pkg/cluster/operation/scale_in.go +++ b/pkg/cluster/operation/scale_in.go @@ -117,6 +117,24 @@ func ScaleInCluster( return errors.New("cannot delete all TiKV servers") } + // Cannot delete TiSpark master server if there's any TiSpark worker remains + if len(deletedDiff[spec.ComponentTiSpark]) > 0 { + var cntDiffTiSparkMaster int + var cntDiffTiSparkWorker int + for _, inst := range deletedDiff[spec.ComponentTiSpark] { + switch inst.Role() { + case spec.RoleTiSparkMaster: + cntDiffTiSparkMaster++ + case spec.RoleTiSparkWorker: + cntDiffTiSparkWorker++ + } + } + if cntDiffTiSparkMaster == len(cluster.TiSparkMasters) && + cntDiffTiSparkWorker < len(cluster.TiSparkWorkers) { + return errors.New("cannot delete tispark master when there are workers left") + } + } + var pdEndpoint []string for _, instance := range (&spec.PDComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { diff --git a/pkg/cluster/spec/spec_manager.go b/pkg/cluster/spec/spec_manager.go index 7b76f1901f..f03ef6a4c5 100644 --- a/pkg/cluster/spec/spec_manager.go +++ b/pkg/cluster/spec/spec_manager.go @@ -14,14 +14,16 @@ package spec import ( + "errors" "io/ioutil" "os" "path/filepath" "github.com/joomcode/errorx" - "github.com/pingcap/errors" + perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cliutil" "github.com/pingcap/tiup/pkg/file" + "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "gopkg.in/yaml.v2" @@ -122,12 +124,12 @@ func (s *SpecManager) Metadata(clusterName string, meta interface{}) error { yamlFile, err := ioutil.ReadFile(fname) if err != nil { - return errors.AddStack(err) + return perrs.AddStack(err) } err = yaml.Unmarshal(yamlFile, meta) if err != nil { - return errors.AddStack(err) + return perrs.AddStack(err) } return nil @@ -142,7 +144,7 @@ func (s *SpecManager) Exist(name string) (exist bool, err error) { if os.IsNotExist(err) { return false, nil } - return false, errors.AddStack(err) + return false, perrs.AddStack(err) } return true, nil @@ -160,7 +162,7 @@ func (s *SpecManager) List() (names []string, err error) { if os.IsNotExist(err) { return nil, nil } - return nil, errors.AddStack(err) + return nil, perrs.AddStack(err) } for _, info := range fileInfos { @@ -178,13 +180,15 @@ func (s *SpecManager) GetAllClusters() (map[string]Metadata, error) { clusters := make(map[string]Metadata) names, err := s.List() if err != nil { - return nil, errors.AddStack(err) + return nil, perrs.AddStack(err) } for _, name := range names { metadata := s.NewMetadata() err = s.Metadata(name, metadata) - if err != nil { - return nil, errors.Trace(err) + // clusters with topology validation errors should also be listed + if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && + !errors.Is(perrs.Cause(err), ErrNoTiSparkMaster) { + return nil, perrs.Trace(err) } clusters[name] = metadata }