From e280d8375e7eab313313f7e456bd0ffa23f35de9 Mon Sep 17 00:00:00 2001 From: qupeng Date: Thu, 1 Apr 2021 19:23:24 +0800 Subject: [PATCH 1/8] enable but inact memory profiling (#1272) --- components/playground/instance/process.go | 34 +++++++++++++++-------- components/playground/instance/tikv.go | 4 ++- embed/templates/scripts/run_tikv.sh.tpl | 2 ++ pkg/exec/run.go | 24 ++++++++-------- 4 files changed, 41 insertions(+), 23 deletions(-) diff --git a/components/playground/instance/process.go b/components/playground/instance/process.go index 78bb1ec893..b4e3d940eb 100644 --- a/components/playground/instance/process.go +++ b/components/playground/instance/process.go @@ -2,6 +2,7 @@ package instance import ( "context" + "fmt" "io" "os" "os/exec" @@ -84,8 +85,8 @@ func (p *process) Cmd() *exec.Cmd { return p.cmd } -// NewComponentProcess create a Process instance. -func NewComponentProcess(ctx context.Context, dir, binPath, component string, version utils.Version, arg ...string) (Process, error) { +// NewComponentProcessWithEnvs create a Process instance with given environment variables. +func NewComponentProcessWithEnvs(ctx context.Context, dir, binPath, component string, version utils.Version, envs map[string]string, arg ...string) (Process, error) { if dir == "" { panic("dir must be set") } @@ -95,16 +96,22 @@ func NewComponentProcess(ctx context.Context, dir, binPath, component string, ve env := environment.GlobalEnv() params := &tiupexec.PrepareCommandParams{ - Ctx: ctx, - Component: component, - Version: version, - BinPath: binPath, - InstanceDir: dir, - WD: dir, - Args: arg, - SysProcAttr: SysProcAttr, - Env: env, + Ctx: ctx, + Component: component, + Version: version, + BinPath: binPath, + InstanceDir: dir, + WD: dir, + Args: arg, + EnvVariables: make([]string, 0), + SysProcAttr: SysProcAttr, + Env: env, } + for k, v := range envs { + pair := fmt.Sprintf("%s=%s", k, v) + params.EnvVariables = append(params.EnvVariables, pair) + } + cmd, err := tiupexec.PrepareCommand(params) if err != nil { return nil, err @@ -112,3 +119,8 @@ func NewComponentProcess(ctx context.Context, dir, binPath, component string, ve return &process{cmd: cmd}, nil } + +// NewComponentProcess create a Process instance. +func NewComponentProcess(ctx context.Context, dir, binPath, component string, version utils.Version, arg ...string) (Process, error) { + return NewComponentProcessWithEnvs(ctx, dir, binPath, component, version, nil, arg...) +} diff --git a/components/playground/instance/tikv.go b/components/playground/instance/tikv.go index a4ae77e64a..70a3dec5c6 100644 --- a/components/playground/instance/tikv.go +++ b/components/playground/instance/tikv.go @@ -71,7 +71,9 @@ func (inst *TiKVInstance) Start(ctx context.Context, version utils.Version) erro } var err error - if inst.Process, err = NewComponentProcess(ctx, inst.Dir, inst.BinPath, "tikv", version, args...); err != nil { + envs := make(map[string]string) + envs["MALLOC_CONF"] = "prof:true,prof_active:true,prof.active:false" + if inst.Process, err = NewComponentProcessWithEnvs(ctx, inst.Dir, inst.BinPath, "tikv", version, envs, args...); err != nil { return err } logIfErr(inst.Process.SetOutputFile(inst.LogFile())) diff --git a/embed/templates/scripts/run_tikv.sh.tpl b/embed/templates/scripts/run_tikv.sh.tpl index f70b6e42ef..ed82bec3e0 100644 --- a/embed/templates/scripts/run_tikv.sh.tpl +++ b/embed/templates/scripts/run_tikv.sh.tpl @@ -20,6 +20,8 @@ echo $stat {{- end}} {{- end}} +export MALLOC_CONF="prof:true,prof_active:true,prof.active:false" + {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tikv-server \ {{- else}} diff --git a/pkg/exec/run.go b/pkg/exec/run.go index 256cf677ab..c7387549d7 100644 --- a/pkg/exec/run.go +++ b/pkg/exec/run.go @@ -127,17 +127,18 @@ func base62Tag() string { // PrepareCommandParams for PrepareCommand. type PrepareCommandParams struct { - Ctx context.Context - Component string - Version utils.Version - BinPath string - Tag string - InstanceDir string - WD string - Args []string - SysProcAttr *syscall.SysProcAttr - Env *environment.Environment - CheckUpdate bool + Ctx context.Context + Component string + Version utils.Version + BinPath string + Tag string + InstanceDir string + WD string + Args []string + EnvVariables []string + SysProcAttr *syscall.SysProcAttr + Env *environment.Environment + CheckUpdate bool } // PrepareCommand will download necessary component and returns a *exec.Cmd @@ -222,6 +223,7 @@ func PrepareCommand(p *PrepareCommandParams) (*exec.Cmd, error) { fmt.Sprintf("%s=%s", localdata.EnvTag, p.Tag), } envs = append(envs, os.Environ()...) + envs = append(envs, p.EnvVariables...) // init the command c := exec.CommandContext(p.Ctx, binPath, p.Args...) From 7e1f73b2183f80ff8129aaef70041ab656547604 Mon Sep 17 00:00:00 2001 From: 9547 Date: Thu, 1 Apr 2021 20:11:24 +0800 Subject: [PATCH 2/8] Feature/confirm reload patch rename (#1263) --- components/cluster/command/patch.go | 2 +- components/cluster/command/reload.go | 2 +- components/cluster/command/rename.go | 2 +- components/cluster/command/restart.go | 2 +- components/cluster/command/stop.go | 2 +- components/dm/command/patch.go | 2 +- components/dm/command/reload.go | 2 +- components/dm/command/restart.go | 2 +- components/dm/command/stop.go | 2 +- pkg/cluster/manager/basic.go | 32 ++++++++++++++++++++++-- pkg/cluster/manager/patch.go | 18 ++++++++++++- pkg/cluster/manager/reload.go | 19 +++++++++++++- pkg/cluster/manager/rename.go | 14 +++++++++-- tests/tiup-cluster/script/cmd_subtest.sh | 4 +-- tests/tiup-cluster/script/scale_core.sh | 2 +- 15 files changed, 89 insertions(+), 18 deletions(-) diff --git a/components/cluster/command/patch.go b/components/cluster/command/patch.go index 0fda5b7bfe..0be9096cdc 100644 --- a/components/cluster/command/patch.go +++ b/components/cluster/command/patch.go @@ -41,7 +41,7 @@ func newPatchCmd() *cobra.Command { clusterName := args[0] teleCommand = append(teleCommand, scrubClusterName(clusterName)) - return cm.Patch(clusterName, args[1], gOpt, overwrite, offlineMode) + return cm.Patch(clusterName, args[1], gOpt, overwrite, offlineMode, skipConfirm) }, } diff --git a/components/cluster/command/reload.go b/components/cluster/command/reload.go index abfd92ade2..711faaf87e 100644 --- a/components/cluster/command/reload.go +++ b/components/cluster/command/reload.go @@ -36,7 +36,7 @@ func newReloadCmd() *cobra.Command { clusterName := args[0] teleCommand = append(teleCommand, scrubClusterName(clusterName)) - return cm.Reload(clusterName, gOpt, skipRestart) + return cm.Reload(clusterName, gOpt, skipRestart, skipConfirm) }, } diff --git a/components/cluster/command/rename.go b/components/cluster/command/rename.go index 68ece2590b..0bae70e4a6 100644 --- a/components/cluster/command/rename.go +++ b/components/cluster/command/rename.go @@ -34,7 +34,7 @@ func newRenameCmd() *cobra.Command { newClusterName := args[1] teleCommand = append(teleCommand, scrubClusterName(oldClusterName)) - return cm.Rename(oldClusterName, gOpt, newClusterName) + return cm.Rename(oldClusterName, gOpt, newClusterName, skipConfirm) }, } diff --git a/components/cluster/command/restart.go b/components/cluster/command/restart.go index ce5ea90be6..404da952ad 100644 --- a/components/cluster/command/restart.go +++ b/components/cluster/command/restart.go @@ -33,7 +33,7 @@ func newRestartCmd() *cobra.Command { clusterName := args[0] teleCommand = append(teleCommand, scrubClusterName(clusterName)) - return cm.RestartCluster(clusterName, gOpt) + return cm.RestartCluster(clusterName, gOpt, skipConfirm) }, } diff --git a/components/cluster/command/stop.go b/components/cluster/command/stop.go index b38d81282b..400dd39b88 100644 --- a/components/cluster/command/stop.go +++ b/components/cluster/command/stop.go @@ -33,7 +33,7 @@ func newStopCmd() *cobra.Command { clusterName := args[0] teleCommand = append(teleCommand, scrubClusterName(clusterName)) - return cm.StopCluster(clusterName, gOpt) + return cm.StopCluster(clusterName, gOpt, skipConfirm) }, } diff --git a/components/dm/command/patch.go b/components/dm/command/patch.go index e45ef2bf21..11c662b26b 100644 --- a/components/dm/command/patch.go +++ b/components/dm/command/patch.go @@ -41,7 +41,7 @@ func newPatchCmd() *cobra.Command { clusterName := args[0] - return cm.Patch(clusterName, args[1], gOpt, overwrite, offlineMode) + return cm.Patch(clusterName, args[1], gOpt, overwrite, offlineMode, skipConfirm) }, } diff --git a/components/dm/command/reload.go b/components/dm/command/reload.go index 0d7e526e51..ee77af7ee5 100644 --- a/components/dm/command/reload.go +++ b/components/dm/command/reload.go @@ -35,7 +35,7 @@ func newReloadCmd() *cobra.Command { clusterName := args[0] - return cm.Reload(clusterName, gOpt, skipRestart) + return cm.Reload(clusterName, gOpt, skipRestart, skipConfirm) }, } diff --git a/components/dm/command/restart.go b/components/dm/command/restart.go index ef19a3d774..fc9e08e2d6 100644 --- a/components/dm/command/restart.go +++ b/components/dm/command/restart.go @@ -28,7 +28,7 @@ func newRestartCmd() *cobra.Command { clusterName := args[0] - return cm.RestartCluster(clusterName, gOpt) + return cm.RestartCluster(clusterName, gOpt, skipConfirm) }, } diff --git a/components/dm/command/stop.go b/components/dm/command/stop.go index 2eeb12389f..6c4b7204a7 100644 --- a/components/dm/command/stop.go +++ b/components/dm/command/stop.go @@ -28,7 +28,7 @@ func newStopCmd() *cobra.Command { clusterName := args[0] - return cm.StopCluster(clusterName, gOpt) + return cm.StopCluster(clusterName, gOpt, skipConfirm) }, } diff --git a/pkg/cluster/manager/basic.go b/pkg/cluster/manager/basic.go index 1e597c6b8a..b47574f3e6 100644 --- a/pkg/cluster/manager/basic.go +++ b/pkg/cluster/manager/basic.go @@ -16,9 +16,13 @@ package manager import ( "context" "errors" + "fmt" + "strings" + "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" + "github.com/pingcap/tiup/pkg/cliutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" @@ -115,7 +119,7 @@ func (m *Manager) StartCluster(name string, options operator.Options, fn ...func } // StopCluster stop the cluster. -func (m *Manager) StopCluster(name string, options operator.Options) error { +func (m *Manager) StopCluster(name string, options operator.Options, skipConfirm bool) error { metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err @@ -129,6 +133,18 @@ func (m *Manager) StopCluster(name string, options operator.Options) error { return err } + if !skipConfirm { + if err := cliutil.PromptForConfirmOrAbortError( + fmt.Sprintf("Will stop the cluster %s with nodes: %s, roles: %s.\nDo you want to continue? [y/N]:", + color.HiYellowString(name), + color.HiRedString(strings.Join(options.Nodes, ",")), + color.HiRedString(strings.Join(options.Roles, ",")), + ), + ); err != nil { + return err + } + } + t := m.sshTaskBuilder(name, topo, base.User, options). Func("StopCluster", func(ctx context.Context) error { return operator.Stop(ctx, topo, options, tlsCfg) @@ -148,7 +164,7 @@ func (m *Manager) StopCluster(name string, options operator.Options) error { } // RestartCluster restart the cluster. -func (m *Manager) RestartCluster(name string, options operator.Options) error { +func (m *Manager) RestartCluster(name string, options operator.Options, skipConfirm bool) error { metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err @@ -162,6 +178,18 @@ func (m *Manager) RestartCluster(name string, options operator.Options) error { return err } + if !skipConfirm { + if err := cliutil.PromptForConfirmOrAbortError( + fmt.Sprintf("Will restart the cluster %s with nodes: %s roles: %s.\nDo you want to continue? [y/N]:", + color.HiYellowString(name), + color.HiYellowString(strings.Join(options.Nodes, ",")), + color.HiYellowString(strings.Join(options.Roles, ",")), + ), + ); err != nil { + return err + } + } + t := m.sshTaskBuilder(name, topo, base.User, options). Func("RestartCluster", func(ctx context.Context) error { return operator.Restart(ctx, topo, options, tlsCfg) diff --git a/pkg/cluster/manager/patch.go b/pkg/cluster/manager/patch.go index 78feabe43f..fae761e250 100644 --- a/pkg/cluster/manager/patch.go +++ b/pkg/cluster/manager/patch.go @@ -19,10 +19,13 @@ import ( "os" "os/exec" "path" + "strings" + "github.com/fatih/color" "github.com/joomcode/errorx" "github.com/pingcap/errors" perrs "github.com/pingcap/errors" + "github.com/pingcap/tiup/pkg/cliutil" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" @@ -33,7 +36,7 @@ import ( ) // Patch the cluster. -func (m *Manager) Patch(name string, packagePath string, opt operator.Options, overwrite, offline bool) error { +func (m *Manager) Patch(name string, packagePath string, opt operator.Options, overwrite, offline, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } @@ -50,6 +53,19 @@ func (m *Manager) Patch(name string, packagePath string, opt operator.Options, o return perrs.New("specified package not exists") } + if !skipConfirm { + if err := cliutil.PromptForConfirmOrAbortError( + fmt.Sprintf("Will patch the cluster %s with package path is %s, nodes: %s, roles: %s.\nDo you want to continue? [y/N]:", + color.HiYellowString(name), + color.HiYellowString(packagePath), + color.HiRedString(strings.Join(opt.Nodes, ",")), + color.HiRedString(strings.Join(opt.Roles, ",")), + ), + ); err != nil { + return err + } + } + insts, err := instancesToPatch(topo, opt) if err != nil { return err diff --git a/pkg/cluster/manager/reload.go b/pkg/cluster/manager/reload.go index ab3291b327..757601214c 100644 --- a/pkg/cluster/manager/reload.go +++ b/pkg/cluster/manager/reload.go @@ -15,9 +15,13 @@ package manager import ( "context" + "fmt" + "strings" + "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" + "github.com/pingcap/tiup/pkg/cliutil" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" @@ -26,7 +30,7 @@ import ( ) // Reload the cluster. -func (m *Manager) Reload(name string, opt operator.Options, skipRestart bool) error { +func (m *Manager) Reload(name string, opt operator.Options, skipRestart, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } @@ -38,6 +42,19 @@ func (m *Manager) Reload(name string, opt operator.Options, skipRestart bool) er return err } + if !skipConfirm { + if err := cliutil.PromptForConfirmOrAbortError( + fmt.Sprintf("Will reload the cluster %s with restart policy is %s, nodes: %s, roles: %s.\nDo you want to continue? [y/N]:", + color.HiYellowString(name), + color.HiRedString(fmt.Sprintf("%v", !skipRestart)), + color.HiRedString(strings.Join(opt.Nodes, ",")), + color.HiRedString(strings.Join(opt.Roles, ",")), + ), + ); err != nil { + return err + } + } + topo := metadata.GetTopology() base := metadata.GetBaseMeta() diff --git a/pkg/cluster/manager/rename.go b/pkg/cluster/manager/rename.go index 0be9f465ed..3a82048aae 100644 --- a/pkg/cluster/manager/rename.go +++ b/pkg/cluster/manager/rename.go @@ -14,8 +14,10 @@ package manager import ( + "fmt" "os" + "github.com/fatih/color" "github.com/pingcap/tiup/pkg/cliutil" "github.com/pingcap/tiup/pkg/cluster/clusterutil" operator "github.com/pingcap/tiup/pkg/cluster/operation" @@ -25,7 +27,7 @@ import ( ) // Rename the cluster -func (m *Manager) Rename(name string, opt operator.Options, newName string) error { +func (m *Manager) Rename(name string, opt operator.Options, newName string, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } @@ -44,6 +46,14 @@ func (m *Manager) Rename(name string, opt operator.Options, newName string) erro WithProperty(cliutil.SuggestionFromFormat("Please specify another cluster name")) } + if !skipConfirm { + if err := cliutil.PromptForConfirmOrAbortError( + fmt.Sprintf("Will rename the cluster name from %s to %s.\nDo you confirm this action? [y/N]:", color.HiYellowString(name), color.HiYellowString(newName)), + ); err != nil { + return err + } + } + _, err := m.meta(name) if err != nil { // refuse renaming if current cluster topology is not valid return err @@ -56,5 +66,5 @@ func (m *Manager) Rename(name string, opt operator.Options, newName string) erro log.Infof("Rename cluster `%s` -> `%s` successfully", name, newName) opt.Roles = []string{spec.ComponentGrafana, spec.ComponentPrometheus} - return m.Reload(newName, opt, false) + return m.Reload(newName, opt, false, skipConfirm) } diff --git a/tests/tiup-cluster/script/cmd_subtest.sh b/tests/tiup-cluster/script/cmd_subtest.sh index 3e54c34d98..e34c92e57f 100755 --- a/tests/tiup-cluster/script/cmd_subtest.sh +++ b/tests/tiup-cluster/script/cmd_subtest.sh @@ -116,9 +116,9 @@ function cmd_subtest() { echo "$display_result" | grep "Since" # Test rename - tiup-cluster $client rename $name "tmp-cluster-name" + tiup-cluster $client --yes rename $name "tmp-cluster-name" tiup-cluster $client display "tmp-cluster-name" - tiup-cluster $client rename "tmp-cluster-name" $name + tiup-cluster $client --yes rename "tmp-cluster-name" $name # Test enable & disable tiup-cluster $client exec $name -R tidb --command="systemctl status tidb-4000|grep 'enabled;'" diff --git a/tests/tiup-cluster/script/scale_core.sh b/tests/tiup-cluster/script/scale_core.sh index 508b58a635..05961f8615 100755 --- a/tests/tiup-cluster/script/scale_core.sh +++ b/tests/tiup-cluster/script/scale_core.sh @@ -29,7 +29,7 @@ function scale_core() { tiup-cluster $client display $name - tiup-cluster $client reload $name --skip-restart + tiup-cluster $client --yes reload $name --skip-restart if [ $test_tls = true ]; then total_sub_one=18 From cf55cab8a71a01d5ad328f69db1abe812db773ac Mon Sep 17 00:00:00 2001 From: 9547 Date: Fri, 2 Apr 2021 11:11:25 +0800 Subject: [PATCH 3/8] cluster: start pd,dm-master in sequentially (#1262) --- pkg/cluster/manager/builder.go | 2 +- pkg/cluster/operation/action.go | 20 ++++++++++++++++++++ pkg/cluster/operation/operation.go | 1 + 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/manager/builder.go b/pkg/cluster/manager/builder.go index 6c01ce3ffc..fcbe520b71 100644 --- a/pkg/cluster/manager/builder.go +++ b/pkg/cluster/manager/builder.go @@ -306,7 +306,7 @@ func buildScaleOutTask( return m.specManager.SaveMeta(name, metadata) }). Func("StartCluster", func(ctx context.Context) error { - return operator.Start(ctx, newPart, operator.Options{OptTimeout: gOpt.OptTimeout}, tlsCfg) + return operator.Start(ctx, newPart, operator.Options{OptTimeout: gOpt.OptTimeout, Operation: operator.ScaleOutOperation}, tlsCfg) }). Parallel(false, refreshConfigTasks...). Parallel(false, buildReloadPromTasks(metadata.GetTopology())...) diff --git a/pkg/cluster/operation/action.go b/pkg/cluster/operation/action.go index a2cc9f07f0..f30921594f 100644 --- a/pkg/cluster/operation/action.go +++ b/pkg/cluster/operation/action.go @@ -460,6 +460,14 @@ func StartComponent(ctx context.Context, instances []spec.Instance, options Opti name := instances[0].ComponentName() log.Infof("Starting component %s", name) + // start instances in serial for Raft related components + // eg: PD has more strict restrictions on the capacity expansion process, + // that is, there should be only one node in the peer-join stage at most + // ref https://github.com/tikv/pd/blob/d38b36714ccee70480c39e07126e3456b5fb292d/server/join/join.go#L179-L191 + if options.Operation == ScaleOutOperation && (name == spec.ComponentPD || name == spec.ComponentDMMaster) { + return serialStartInstances(ctx, instances, options, tlsCfg) + } + errg, _ := errgroup.WithContext(ctx) for _, ins := range instances { @@ -484,6 +492,18 @@ func StartComponent(ctx context.Context, instances []spec.Instance, options Opti return errg.Wait() } +func serialStartInstances(ctx context.Context, instances []spec.Instance, options Options, tlsCfg *tls.Config) error { + for _, ins := range instances { + if err := ins.PrepareStart(ctx, tlsCfg); err != nil { + return err + } + if err := startInstance(ctx, ins, options.OptTimeout); err != nil { + return err + } + } + return nil +} + // StopMonitored stop BlackboxExporter and NodeExporter func StopMonitored(ctx context.Context, instance spec.Instance, options *spec.MonitoredOptions, timeout uint64) error { ports := map[string]int{ diff --git a/pkg/cluster/operation/operation.go b/pkg/cluster/operation/operation.go index 7a79130cc8..a729dad539 100644 --- a/pkg/cluster/operation/operation.go +++ b/pkg/cluster/operation/operation.go @@ -43,6 +43,7 @@ type Options struct { // Show uptime or not ShowUptime bool + Operation Operation } // Operation represents the type of cluster operation From 90493569bb2ab8017a4a5abe0f9e29fada35a825 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Fri, 2 Apr 2021 14:31:24 +0800 Subject: [PATCH 4/8] Fix the compatibility with PD v3.x (#1274) --- pkg/cluster/api/pdapi.go | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/api/pdapi.go b/pkg/cluster/api/pdapi.go index 7dae4dafbe..d4ea9a555a 100644 --- a/pkg/cluster/api/pdapi.go +++ b/pkg/cluster/api/pdapi.go @@ -30,10 +30,12 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/tiup/pkg/logger/log" "github.com/pingcap/tiup/pkg/utils" + "golang.org/x/mod/semver" ) // PDClient is an HTTP client of the PD server type PDClient struct { + version string addrs []string tlsEnabled bool httpClient *utils.HTTPClient @@ -46,11 +48,30 @@ func NewPDClient(addrs []string, timeout time.Duration, tlsConfig *tls.Config) * enableTLS = true } - return &PDClient{ + cli := &PDClient{ addrs: addrs, tlsEnabled: enableTLS, httpClient: utils.NewHTTPClient(timeout, tlsConfig), } + + cli.tryIdentifyVersion() + return cli +} + +func (pc *PDClient) tryIdentifyVersion() { + endpoints := pc.getEndpoints(pdVersionURI) + response := map[string]string{} + _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { + body, err := pc.httpClient.Get(endpoint) + if err != nil { + return body, err + } + + return body, json.Unmarshal(body, &response) + }) + if err == nil { + pc.version = response["version"] + } } // GetURL builds the the client URL of PDClient @@ -70,6 +91,7 @@ const ( // nolint (some is unused now) var ( pdPingURI = "pd/ping" + pdVersionURI = "pd/api/v1/version" pdConfigURI = "pd/api/v1/config" pdClusterIDURI = "pd/api/v1/cluster" pdConfigReplicate = "pd/api/v1/config/replicate" @@ -735,6 +757,11 @@ func (pc *PDClient) CheckRegion(state string) (*RegionsInfo, error) { // SetReplicationConfig sets a config key value of PD replication, it has the // same effect as `pd-ctl config set key value` func (pc *PDClient) SetReplicationConfig(key string, value int) error { + // Only support for pd version >= v4.0.0 + if pc.version == "" || semver.Compare(pc.version, "v4.0.0") < 0 { + return nil + } + data := map[string]interface{}{"set": map[string]interface{}{key: value}} body, err := json.Marshal(data) if err != nil { @@ -747,6 +774,11 @@ func (pc *PDClient) SetReplicationConfig(key string, value int) error { // SetAllStoreLimits sets store for all stores and types, it has the same effect // as `pd-ctl store limit all value` func (pc *PDClient) SetAllStoreLimits(value int) error { + // Only support for pd version >= v4.0.0 + if pc.version == "" || semver.Compare(pc.version, "v4.0.0") < 0 { + return nil + } + data := map[string]interface{}{"rate": value} body, err := json.Marshal(data) if err != nil { From c211098a3e1af09ae44204354bf0157af2f74688 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Sat, 3 Apr 2021 18:29:24 +0800 Subject: [PATCH 5/8] Fix the issue that checkpoint can't parse large audit log (#1259) --- pkg/checkpoint/checkpoint.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/checkpoint/checkpoint.go b/pkg/checkpoint/checkpoint.go index 8a2d714cf3..56533ad7b2 100644 --- a/pkg/checkpoint/checkpoint.go +++ b/pkg/checkpoint/checkpoint.go @@ -38,6 +38,9 @@ const ( goroutineKey = contextKey("CHECKPOINT_GOROUTINE") funcKey = "__func__" hashKey = "__hash__" + + // At most 10M for each line in audit log + maxTokenSize = 10 * 1024 * 1024 ) var ( @@ -125,6 +128,7 @@ func NewCheckPoint(r io.Reader) (*CheckPoint, error) { cp := CheckPoint{points: make([]map[string]interface{}, 0)} scanner := bufio.NewScanner(r) + scanner.Buffer(nil, maxTokenSize) for scanner.Scan() { line := scanner.Text() m, err := checkLine(line) From e60f8224f1f78447dd1d4b2938a04d3eee890141 Mon Sep 17 00:00:00 2001 From: 9547 Date: Sat, 3 Apr 2021 18:47:25 +0800 Subject: [PATCH 6/8] cluster: reload shoud pass through ignore-config-check (#1265) --- pkg/cluster/manager/reload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cluster/manager/reload.go b/pkg/cluster/manager/reload.go index 757601214c..73cbecaafc 100644 --- a/pkg/cluster/manager/reload.go +++ b/pkg/cluster/manager/reload.go @@ -69,7 +69,7 @@ func (m *Manager) Reload(name string, opt operator.Options, skipRestart, skipCon } }) - refreshConfigTasks, hasImported := buildRegenConfigTasks(m, name, topo, base, nil, false) + refreshConfigTasks, hasImported := buildRegenConfigTasks(m, name, topo, base, nil, opt.IgnoreConfigCheck) monitorConfigTasks := buildRefreshMonitoredConfigTasks( m.specManager, name, From 0b37f9f5ee1c52dde06b1ae48efc4667021ed6fe Mon Sep 17 00:00:00 2001 From: Rohinton Kazak Date: Sat, 3 Apr 2021 04:31:24 -0700 Subject: [PATCH 7/8] Update grafana.ini.tpl (#1276) --- embed/templates/config/grafana.ini.tpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/embed/templates/config/grafana.ini.tpl b/embed/templates/config/grafana.ini.tpl index 0d0f4a0e87..92bd7afc26 100644 --- a/embed/templates/config/grafana.ini.tpl +++ b/embed/templates/config/grafana.ini.tpl @@ -17,7 +17,7 @@ data = {{.DeployDir}}/data # # Directory where grafana can store logs # -logs = {{.DeployDir}}/logs +logs = {{.DeployDir}}/log # # Directory where grafana will automatically scan and look for plugins # @@ -289,4 +289,4 @@ path = {{.DeployDir}}/dashboards #################################### Internal Grafana Metrics ########################## # Url used to to import dashboards directly from Grafana.net [grafana_net] -url = https://grafana.net \ No newline at end of file +url = https://grafana.net From 52e24c2ee45e6a2acc3bd59990a4f1ebbedcd633 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Sat, 3 Apr 2021 23:43:25 +0800 Subject: [PATCH 8/8] Add advertise addr for TiKV and PD (#1224) --- embed/templates/scripts/run_pd.sh.tpl | 9 ++- embed/templates/scripts/run_pd_scale.sh.tpl | 8 +-- embed/templates/scripts/run_tidb.sh.tpl | 2 +- embed/templates/scripts/run_tikv.sh.tpl | 4 +- pkg/cluster/spec/pd.go | 26 ++++---- pkg/cluster/spec/spec.go | 2 + pkg/cluster/spec/tidb.go | 13 ++-- pkg/cluster/spec/tikv.go | 67 +++++++++++---------- pkg/cluster/template/scripts/pd.go | 59 ++++++++++++------ pkg/cluster/template/scripts/tidb.go | 36 +++++++---- pkg/cluster/template/scripts/tikv.go | 27 ++++++--- 11 files changed, 150 insertions(+), 103 deletions(-) diff --git a/embed/templates/scripts/run_pd.sh.tpl b/embed/templates/scripts/run_pd.sh.tpl index 0ad94401bd..a7547181be 100644 --- a/embed/templates/scripts/run_pd.sh.tpl +++ b/embed/templates/scripts/run_pd.sh.tpl @@ -10,9 +10,9 @@ cd "${DEPLOY_DIR}" || exit 1 {{- define "PDList"}} {{- range $idx, $pd := .}} {{- if eq $idx 0}} - {{- $pd.Name}}={{$pd.Scheme}}://{{$pd.IP}}:{{$pd.PeerPort}} + {{- $pd.Name}}={{$pd.AdvertisePeerAddr}} {{- else -}} - ,{{- $pd.Name}}={{$pd.Scheme}}://{{$pd.IP}}:{{$pd.PeerPort}} + ,{{- $pd.Name}}={{$pd.AdvertisePeerAddr}} {{- end}} {{- end}} {{- end}} @@ -24,11 +24,10 @@ exec bin/pd-server \ {{- end}} --name="{{.Name}}" \ --client-urls="{{.Scheme}}://{{.ListenHost}}:{{.ClientPort}}" \ - --advertise-client-urls="{{.Scheme}}://{{.IP}}:{{.ClientPort}}" \ + --advertise-client-urls="{{.AdvertiseClientAddr}}" \ --peer-urls="{{.Scheme}}://{{.ListenHost}}:{{.PeerPort}}" \ - --advertise-peer-urls="{{.Scheme}}://{{.IP}}:{{.PeerPort}}" \ + --advertise-peer-urls="{{.AdvertisePeerAddr}}" \ --data-dir="{{.DataDir}}" \ --initial-cluster="{{template "PDList" .Endpoints}}" \ --config=conf/pd.toml \ --log-file="{{.LogDir}}/pd.log" 2>> "{{.LogDir}}/pd_stderr.log" - diff --git a/embed/templates/scripts/run_pd_scale.sh.tpl b/embed/templates/scripts/run_pd_scale.sh.tpl index b58f03177f..849b9778d0 100644 --- a/embed/templates/scripts/run_pd_scale.sh.tpl +++ b/embed/templates/scripts/run_pd_scale.sh.tpl @@ -10,9 +10,9 @@ cd "${DEPLOY_DIR}" || exit 1 {{- define "PDList"}} {{- range $idx, $pd := .}} {{- if eq $idx 0}} - {{- $pd.Scheme}}://{{$pd.IP}}:{{$pd.ClientPort}} + {{- $pd.Name}}={{$pd.AdvertisePeerAddr}} {{- else -}} - ,{{- $pd.Scheme}}://{{$pd.IP}}:{{$pd.ClientPort}} + ,{{- $pd.Name}}={{$pd.AdvertisePeerAddr}} {{- end}} {{- end}} {{- end}} @@ -24,9 +24,9 @@ exec bin/pd-server \ {{- end}} --name="{{.Name}}" \ --client-urls="{{.Scheme}}://{{.ListenHost}}:{{.ClientPort}}" \ - --advertise-client-urls="{{.Scheme}}://{{.IP}}:{{.ClientPort}}" \ + --advertise-client-urls="{{.AdvertiseClientAddr}}" \ --peer-urls="{{.Scheme}}://{{.ListenHost}}:{{.PeerPort}}" \ - --advertise-peer-urls="{{.Scheme}}://{{.IP}}:{{.PeerPort}}" \ + --advertise-peer-urls="{{.AdvertisePeerAddr}}" \ --data-dir="{{.DataDir}}" \ --join="{{template "PDList" .Endpoints}}" \ --config=conf/pd.toml \ diff --git a/embed/templates/scripts/run_tidb.sh.tpl b/embed/templates/scripts/run_tidb.sh.tpl index b9326c2c3a..84ff07a61a 100644 --- a/embed/templates/scripts/run_tidb.sh.tpl +++ b/embed/templates/scripts/run_tidb.sh.tpl @@ -25,7 +25,7 @@ exec env GODEBUG=madvdontneed=1 bin/tidb-server \ -P {{.Port}} \ --status="{{.StatusPort}}" \ --host="{{.ListenHost}}" \ - --advertise-address="{{.IP}}" \ + --advertise-address="{{.AdvertiseAddr}}" \ --store="tikv" \ --path="{{template "PDList" .Endpoints}}" \ --log-slow-query="log/tidb_slow_query.log" \ diff --git a/embed/templates/scripts/run_tikv.sh.tpl b/embed/templates/scripts/run_tikv.sh.tpl index ed82bec3e0..cc77e262e9 100644 --- a/embed/templates/scripts/run_tikv.sh.tpl +++ b/embed/templates/scripts/run_tikv.sh.tpl @@ -28,10 +28,10 @@ exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tikv-server exec bin/tikv-server \ {{- end}} --addr "{{.ListenHost}}:{{.Port}}" \ - --advertise-addr "{{.IP}}:{{.Port}}" \ + --advertise-addr "{{.AdvertiseAddr}}" \ --status-addr "{{.ListenHost}}:{{.StatusPort}}" \ {{- if .SupportAdvertiseStatusAddr}} - --advertise-status-addr "{{.IP}}:{{.StatusPort}}" \ + --advertise-status-addr "{{.AdvertiseStatusAddr}}" \ {{- end}} --pd "{{template "PDList" .Endpoints}}" \ --data-dir "{{.DataDir}}" \ diff --git a/pkg/cluster/spec/pd.go b/pkg/cluster/spec/pd.go index e758793a41..25f2429128 100644 --- a/pkg/cluster/spec/pd.go +++ b/pkg/cluster/spec/pd.go @@ -32,11 +32,13 @@ import ( // PDSpec represents the PD topology specification in topology.yaml type PDSpec struct { - Host string `yaml:"host"` - ListenHost string `yaml:"listen_host,omitempty"` - SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` - Imported bool `yaml:"imported,omitempty"` - Patched bool `yaml:"patched,omitempty"` + Host string `yaml:"host"` + ListenHost string `yaml:"listen_host,omitempty"` + AdvertiseClientAddr string `yaml:"advertise_client_addr,omitempty"` + AdvertisePeerAddr string `yaml:"advertise_peer_addr,omitempty"` + SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` + Imported bool `yaml:"imported,omitempty"` + Patched bool `yaml:"patched,omitempty"` // Use Name to get the name with a default value if it's empty. Name string `yaml:"name"` ClientPort int `yaml:"client_port" default:"2379"` @@ -164,13 +166,9 @@ func (i *PDInstance) InitConfig( enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*PDSpec) - cfg := scripts.NewPDScript( - spec.Name, - i.GetHost(), - paths.Deploy, - paths.Data[0], - paths.Log, - ).WithClientPort(spec.ClientPort). + cfg := scripts. + NewPDScript(spec.Name, i.GetHost(), paths.Deploy, paths.Data[0], paths.Log). + WithClientPort(spec.ClientPort). WithPeerPort(spec.PeerPort). AppendEndpoints(topo.Endpoints(deployUser)...). WithListenHost(i.GetListenHost()) @@ -178,6 +176,8 @@ func (i *PDInstance) InitConfig( if enableTLS { cfg = cfg.WithScheme("https") } + cfg = cfg.WithAdvertiseClientAddr(spec.AdvertiseClientAddr). + WithAdvertisePeerAddr(spec.AdvertisePeerAddr) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pd_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { @@ -274,6 +274,8 @@ func (i *PDInstance) ScaleConfig( if topo.BaseTopo().GlobalOptions.TLSEnabled { cfg0 = cfg0.WithScheme("https") } + cfg0 = cfg0.WithAdvertiseClientAddr(spec.AdvertiseClientAddr). + WithAdvertisePeerAddr(spec.AdvertisePeerAddr) cfg := scripts.NewPDScaleScript(cfg0) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pd_%s_%d.sh", i.GetHost(), i.GetPort())) diff --git a/pkg/cluster/spec/spec.go b/pkg/cluster/spec/spec.go index 6ab09de69c..3eda5d83f4 100644 --- a/pkg/cluster/spec/spec.go +++ b/pkg/cluster/spec/spec.go @@ -700,6 +700,8 @@ func (s *Specification) Endpoints(user string) []*scripts.PDScript { if s.GlobalOptions.TLSEnabled { script = script.WithScheme("https") } + script = script.WithAdvertiseClientAddr(spec.AdvertiseClientAddr). + WithAdvertisePeerAddr(spec.AdvertisePeerAddr) ends = append(ends, script) } return ends diff --git a/pkg/cluster/spec/tidb.go b/pkg/cluster/spec/tidb.go index 333b109429..d0b08a9383 100644 --- a/pkg/cluster/spec/tidb.go +++ b/pkg/cluster/spec/tidb.go @@ -30,6 +30,7 @@ import ( type TiDBSpec struct { Host string `yaml:"host"` ListenHost string `yaml:"listen_host,omitempty"` + AdvertiseAddr string `yaml:"advertise_address,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` @@ -130,14 +131,14 @@ func (i *TiDBInstance) InitConfig( enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*TiDBSpec) - cfg := scripts.NewTiDBScript( - i.GetHost(), - paths.Deploy, - paths.Log, - ).WithPort(spec.Port).WithNumaNode(spec.NumaNode). + cfg := scripts. + NewTiDBScript(i.GetHost(), paths.Deploy, paths.Log). + WithPort(spec.Port). + WithNumaNode(spec.NumaNode). WithStatusPort(spec.StatusPort). AppendEndpoints(topo.Endpoints(deployUser)...). - WithListenHost(i.GetListenHost()) + WithListenHost(i.GetListenHost()). + WithAdvertiseAddr(spec.Host) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tidb_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err diff --git a/pkg/cluster/spec/tikv.go b/pkg/cluster/spec/tikv.go index 4a40ef6213..5e9c00fb2c 100644 --- a/pkg/cluster/spec/tikv.go +++ b/pkg/cluster/spec/tikv.go @@ -43,22 +43,24 @@ const ( // TiKVSpec represents the TiKV topology specification in topology.yaml type TiKVSpec struct { - Host string `yaml:"host"` - ListenHost string `yaml:"listen_host,omitempty"` - SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` - Imported bool `yaml:"imported,omitempty"` - Patched bool `yaml:"patched,omitempty"` - Port int `yaml:"port" default:"20160"` - StatusPort int `yaml:"status_port" default:"20180"` - DeployDir string `yaml:"deploy_dir,omitempty"` - DataDir string `yaml:"data_dir,omitempty"` - LogDir string `yaml:"log_dir,omitempty"` - Offline bool `yaml:"offline,omitempty"` - NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` - Config map[string]interface{} `yaml:"config,omitempty" validate:"config:ignore"` - ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` - Arch string `yaml:"arch,omitempty"` - OS string `yaml:"os,omitempty"` + Host string `yaml:"host"` + ListenHost string `yaml:"listen_host,omitempty"` + AdvertiseAddr string `yaml:"advertise_addr,omitempty"` + SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` + Imported bool `yaml:"imported,omitempty"` + Patched bool `yaml:"patched,omitempty"` + Port int `yaml:"port" default:"20160"` + StatusPort int `yaml:"status_port" default:"20180"` + AdvertiseStatusAddr string `yaml:"advertise_status_addr,omitempty"` + DeployDir string `yaml:"deploy_dir,omitempty"` + DataDir string `yaml:"data_dir,omitempty"` + LogDir string `yaml:"log_dir,omitempty"` + Offline bool `yaml:"offline,omitempty"` + NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` + Config map[string]interface{} `yaml:"config,omitempty" validate:"config:ignore"` + ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` + Arch string `yaml:"arch,omitempty"` + OS string `yaml:"os,omitempty"` } // checkStoreStatus checks the store status in current cluster @@ -80,7 +82,7 @@ func checkStoreStatus(storeAddr string, tlsCfg *tls.Config, pdList ...string) st // Status queries current status of the instance func (s *TiKVSpec) Status(tlsCfg *tls.Config, pdList ...string) string { - storeAddr := fmt.Sprintf("%s:%d", s.Host, s.Port) + storeAddr := addr(s) state := checkStoreStatus(storeAddr, tlsCfg, pdList...) if s.Offline && strings.ToLower(state) == "offline" { state = "Pending Offline" // avoid misleading @@ -203,17 +205,14 @@ func (i *TiKVInstance) InitConfig( enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*TiKVSpec) - cfg := scripts.NewTiKVScript( - clusterVersion, - i.GetHost(), - paths.Deploy, - paths.Data[0], - paths.Log, - ).WithPort(spec.Port). + cfg := scripts. + NewTiKVScript(clusterVersion, i.GetHost(), spec.Port, spec.StatusPort, paths.Deploy, paths.Data[0], paths.Log). WithNumaNode(spec.NumaNode). - WithStatusPort(spec.StatusPort). AppendEndpoints(topo.Endpoints(deployUser)...). - WithListenHost(i.GetListenHost()) + WithListenHost(i.GetListenHost()). + WithAdvertiseAddr(spec.AdvertiseAddr). + WithAdvertiseStatusAddr(spec.AdvertiseStatusAddr) + fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tikv_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err @@ -324,7 +323,7 @@ func (i *TiKVInstance) PreRestart(topo Topology, apiTimeoutSeconds int, tlsCfg * return err } - if err := pdClient.EvictStoreLeader(addr(i), timeoutOpt, genLeaderCounter(tidbTopo, tlsCfg)); err != nil { + if err := pdClient.EvictStoreLeader(addr(i.InstanceSpec.(*TiKVSpec)), timeoutOpt, genLeaderCounter(tidbTopo, tlsCfg)); err != nil { if utils.IsTimeoutOrMaxRetry(err) { log.Warnf("Ignore evicting store leader from %s, %v", i.ID(), err) } else { @@ -348,18 +347,22 @@ func (i *TiKVInstance) PostRestart(topo Topology, tlsCfg *tls.Config) error { pdClient := api.NewPDClient(tidbTopo.GetPDList(), 5*time.Second, tlsCfg) // remove store leader evict scheduler after restart - if err := pdClient.RemoveStoreEvict(addr(i)); err != nil { + if err := pdClient.RemoveStoreEvict(addr(i.InstanceSpec.(*TiKVSpec))); err != nil { return perrs.Annotatef(err, "failed to remove evict store scheduler for %s", i.GetHost()) } return nil } -func addr(ins Instance) string { - if ins.GetPort() == 0 || ins.GetPort() == 80 { - panic(ins) +func addr(spec *TiKVSpec) string { + if spec.AdvertiseAddr != "" { + return spec.AdvertiseAddr + } + + if spec.Port == 0 || spec.Port == 80 { + panic(fmt.Sprintf("invalid TiKV port %d", spec.Port)) } - return ins.GetHost() + ":" + strconv.Itoa(ins.GetPort()) + return spec.Host + ":" + strconv.Itoa(spec.Port) } func genLeaderCounter(topo *Specification, tlsCfg *tls.Config) func(string) (int, error) { diff --git a/pkg/cluster/template/scripts/pd.go b/pkg/cluster/template/scripts/pd.go index 109a78ef9a..e8d22eb366 100644 --- a/pkg/cluster/template/scripts/pd.go +++ b/pkg/cluster/template/scripts/pd.go @@ -16,6 +16,7 @@ package scripts import ( "bytes" "errors" + "fmt" "os" "path" "text/template" @@ -25,30 +26,34 @@ import ( // PDScript represent the data to generate pd config type PDScript struct { - Name string - Scheme string - IP string - ListenHost string - ClientPort int - PeerPort int - DeployDir string - DataDir string - LogDir string - NumaNode string - Endpoints []*PDScript + Name string + Scheme string + IP string + ListenHost string + AdvertiseClientAddr string + AdvertisePeerAddr string + ClientPort int + PeerPort int + DeployDir string + DataDir string + LogDir string + NumaNode string + Endpoints []*PDScript } // NewPDScript returns a PDScript with given arguments func NewPDScript(name, ip, deployDir, dataDir, logDir string) *PDScript { return &PDScript{ - Name: name, - Scheme: "http", - IP: ip, - ClientPort: 2379, - PeerPort: 2380, - DeployDir: deployDir, - DataDir: dataDir, - LogDir: logDir, + Name: name, + Scheme: "http", + IP: ip, + AdvertiseClientAddr: fmt.Sprintf("http://%s:%d", ip, 2379), + AdvertisePeerAddr: fmt.Sprintf("http://%s:%d", ip, 2380), + ClientPort: 2379, + PeerPort: 2380, + DeployDir: deployDir, + DataDir: dataDir, + LogDir: logDir, } } @@ -76,6 +81,22 @@ func (c *PDScript) WithPeerPort(port int) *PDScript { return c } +// WithAdvertiseClientAddr set AdvertiseClientAddr field of PDScript +func (c *PDScript) WithAdvertiseClientAddr(addr string) *PDScript { + if addr != "" { + c.AdvertiseClientAddr = fmt.Sprintf("%s://%s", c.Scheme, addr) + } + return c +} + +// WithAdvertisePeerAddr set AdvertisePeerAddr field of PDScript +func (c *PDScript) WithAdvertisePeerAddr(addr string) *PDScript { + if addr != "" { + c.AdvertisePeerAddr = fmt.Sprintf("%s://%s", c.Scheme, addr) + } + return c +} + // WithNumaNode set NumaNode field of PDScript func (c *PDScript) WithNumaNode(numa string) *PDScript { c.NumaNode = numa diff --git a/pkg/cluster/template/scripts/tidb.go b/pkg/cluster/template/scripts/tidb.go index 89f4719f69..6f713e104e 100644 --- a/pkg/cluster/template/scripts/tidb.go +++ b/pkg/cluster/template/scripts/tidb.go @@ -24,24 +24,26 @@ import ( // TiDBScript represent the data to generate TiDB config type TiDBScript struct { - IP string - ListenHost string - Port int - StatusPort int - DeployDir string - LogDir string - NumaNode string - Endpoints []*PDScript + IP string + ListenHost string + AdvertiseAddr string + Port int + StatusPort int + DeployDir string + LogDir string + NumaNode string + Endpoints []*PDScript } // NewTiDBScript returns a TiDBScript with given arguments func NewTiDBScript(ip, deployDir, logDir string) *TiDBScript { return &TiDBScript{ - IP: ip, - Port: 4000, - StatusPort: 10080, - DeployDir: deployDir, - LogDir: logDir, + IP: ip, + AdvertiseAddr: ip, + Port: 4000, + StatusPort: 10080, + DeployDir: deployDir, + LogDir: logDir, } } @@ -51,6 +53,14 @@ func (c *TiDBScript) WithListenHost(listenHost string) *TiDBScript { return c } +// WithAdvertiseAddr set AdvertiseAddr field of TiDBScript +func (c *TiDBScript) WithAdvertiseAddr(addr string) *TiDBScript { + if addr != "" { + c.AdvertiseAddr = addr + } + return c +} + // WithPort set Port field of TiDBScript func (c *TiDBScript) WithPort(port int) *TiDBScript { c.Port = port diff --git a/pkg/cluster/template/scripts/tikv.go b/pkg/cluster/template/scripts/tikv.go index a9276258f3..aaac858982 100644 --- a/pkg/cluster/template/scripts/tikv.go +++ b/pkg/cluster/template/scripts/tikv.go @@ -15,6 +15,7 @@ package scripts import ( "bytes" + "fmt" "os" "path" "text/template" @@ -31,6 +32,8 @@ const ( type TiKVScript struct { IP string ListenHost string + AdvertiseAddr string + AdvertiseStatusAddr string Port int StatusPort int DeployDir string @@ -42,11 +45,13 @@ type TiKVScript struct { } // NewTiKVScript returns a TiKVScript with given arguments -func NewTiKVScript(version, ip, deployDir, dataDir, logDir string) *TiKVScript { +func NewTiKVScript(version, ip string, port, statusPort int, deployDir, dataDir, logDir string) *TiKVScript { return &TiKVScript{ IP: ip, - Port: 20160, - StatusPort: 20180, + AdvertiseAddr: fmt.Sprintf("%s:%d", ip, port), + AdvertiseStatusAddr: fmt.Sprintf("%s:%d", ip, statusPort), + Port: port, + StatusPort: statusPort, DeployDir: deployDir, DataDir: dataDir, LogDir: logDir, @@ -60,15 +65,19 @@ func (c *TiKVScript) WithListenHost(listenHost string) *TiKVScript { return c } -// WithPort set Port field of TiKVScript -func (c *TiKVScript) WithPort(port int) *TiKVScript { - c.Port = port +// WithAdvertiseAddr set AdvertiseAddr of TiKVScript +func (c *TiKVScript) WithAdvertiseAddr(addr string) *TiKVScript { + if addr != "" { + c.AdvertiseAddr = addr + } return c } -// WithStatusPort set StatusPort field of TiKVScript -func (c *TiKVScript) WithStatusPort(port int) *TiKVScript { - c.StatusPort = port +// WithAdvertiseStatusAddr set AdvertiseStatusAddr of TiKVScript +func (c *TiKVScript) WithAdvertiseStatusAddr(addr string) *TiKVScript { + if addr != "" { + c.AdvertiseStatusAddr = addr + } return c }