Skip to content

Commit

Permalink
merge master and support tiproxy version
Browse files Browse the repository at this point in the history
  • Loading branch information
nexustar committed Nov 3, 2023
2 parents 56d06a8 + a07c4d7 commit cc5c49b
Show file tree
Hide file tree
Showing 47 changed files with 813 additions and 81 deletions.
14 changes: 14 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,19 @@
TiUP Changelog

## [1.13.1] 2023-09-25

### Fixes

- Increase timeout when publish package in `tiup` (#2269, @nexustar)
- Fix pd microservice component id in `tiup-playground` (#2272, @iosmanthus)
- Fix grafana for multiple instances using same host in `tiup-cluster` and `tiup-dm` (#2277, @lastincisor)
- Add cdn workaround (#2285, @nexustar)
- Mirror: fix progress bar is not accurate (#2284, @nexustar)

### Improvement

- Support ignore version check when upgrade in `tiup-cluster` and `tiup-dm` (#2282, @nexustar)

## [1.13.0] 2023-08-26

### New Features
Expand Down
1 change: 1 addition & 0 deletions components/cluster/command/display.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ func newDisplayCmd() *cobra.Command {
cmd.Flags().BoolVar(&showTiKVLabels, "labels", false, "Only display labels of specified TiKV role or nodes")
cmd.Flags().BoolVar(&dopt.ShowProcess, "process", false, "display cpu and memory usage of nodes")
cmd.Flags().BoolVar(&dopt.ShowManageHost, "manage-host", false, "display manage host of nodes")
cmd.Flags().BoolVar(&dopt.ShowNuma, "numa", false, "display numa information of nodes")
cmd.Flags().Uint64Var(&statusTimeout, "status-timeout", 10, "Timeout in seconds when getting node status")

return cmd
Expand Down
8 changes: 6 additions & 2 deletions components/cluster/command/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ import (

func newUpgradeCmd() *cobra.Command {
offlineMode := false
var tidbVer, tikvVer, pdVer, tiflashVer, kvcdcVer, dashboardVer, cdcVer, alertmanagerVer, nodeExporterVer, blackboxExporterVer string
ignoreVersionCheck := false
var tidbVer, tikvVer, pdVer, tiflashVer, kvcdcVer, dashboardVer, cdcVer, alertmanagerVer, nodeExporterVer, blackboxExporterVer, tiproxyVer string

cmd := &cobra.Command{
Use: "upgrade <cluster-name> <version>",
Expand Down Expand Up @@ -49,11 +50,12 @@ func newUpgradeCmd() *cobra.Command {
spec.ComponentTiFlash: tiflashVer,
spec.ComponentTiKVCDC: kvcdcVer,
spec.ComponentCDC: cdcVer,
spec.ComponentTiProxy: tiproxyVer,
spec.ComponentBlackboxExporter: blackboxExporterVer,
spec.ComponentNodeExporter: nodeExporterVer,
}

return cm.Upgrade(clusterName, version, componentVersions, gOpt, skipConfirm, offlineMode)
return cm.Upgrade(clusterName, version, componentVersions, gOpt, skipConfirm, offlineMode, ignoreVersionCheck)
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
switch len(args) {
Expand All @@ -68,6 +70,7 @@ func newUpgradeCmd() *cobra.Command {
cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 600, "Timeout in seconds when transferring PD and TiKV store leaders, also for TiCDC drain one capture")
cmd.Flags().BoolVarP(&gOpt.IgnoreConfigCheck, "ignore-config-check", "", false, "Ignore the config check result")
cmd.Flags().BoolVarP(&offlineMode, "offline", "", false, "Upgrade a stopped cluster")
cmd.Flags().BoolVarP(&ignoreVersionCheck, "ignore-version-check", "", false, "Ignore checking if target version is bigger than current version")
cmd.Flags().StringVar(&gOpt.SSHCustomScripts.BeforeRestartInstance.Raw, "pre-upgrade-script", "", "(EXPERIMENTAL) Custom script to be executed on each server before the server is upgraded")
cmd.Flags().StringVar(&gOpt.SSHCustomScripts.AfterRestartInstance.Raw, "post-upgrade-script", "", "(EXPERIMENTAL) Custom script to be executed on each server after the server is upgraded")

Expand All @@ -81,5 +84,6 @@ func newUpgradeCmd() *cobra.Command {
cmd.Flags().StringVar(&alertmanagerVer, "alertmanager-version", "", "Specify the version of alertmanager to upgrade to")
cmd.Flags().StringVar(&nodeExporterVer, "node-exporter-version", "", "Specify the version of node-exporter to upgrade to")
cmd.Flags().StringVar(&blackboxExporterVer, "blackbox-exporter-version", "", "Specify the version of blackbox-exporter to upgrade to")
cmd.Flags().StringVar(&tiproxyVer, "tiproxy-version", "", "Specify the version of tiproxy to upgrade to")
return cmd
}
5 changes: 3 additions & 2 deletions components/dm/command/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (

func newUpgradeCmd() *cobra.Command {
offlineMode := false

ignoreVersionCheck := false
cmd := &cobra.Command{
Use: "upgrade <cluster-name> <version>",
Short: "Upgrade a specified DM cluster",
Expand All @@ -28,7 +28,7 @@ func newUpgradeCmd() *cobra.Command {
return cmd.Help()
}

return cm.Upgrade(args[0], args[1], nil, gOpt, skipConfirm, offlineMode)
return cm.Upgrade(args[0], args[1], nil, gOpt, skipConfirm, offlineMode, ignoreVersionCheck)
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
switch len(args) {
Expand All @@ -41,6 +41,7 @@ func newUpgradeCmd() *cobra.Command {
}

cmd.Flags().BoolVarP(&offlineMode, "offline", "", false, "Upgrade a stopped cluster")
cmd.Flags().BoolVarP(&ignoreVersionCheck, "ignore-version-check", "", false, "Ignore checking if target version is higher than current version")

return cmd
}
16 changes: 11 additions & 5 deletions components/playground/instance/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,12 @@ type instance struct {
BinPath string
}

// MetricAddr will be used by prometheus scrape_configs.
type MetricAddr struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}

// Instance represent running component
type Instance interface {
Pid() int
Expand All @@ -59,16 +65,16 @@ type Instance interface {
LogFile() string
// Uptime show uptime.
Uptime() string
// StatusAddrs return the address to pull metrics.
StatusAddrs() []string
// MetricAddr return the address to pull metrics.
MetricAddr() MetricAddr
// Wait Should only call this if the instance is started successfully.
// The implementation should be safe to call Wait multi times.
Wait() error
}

func (inst *instance) StatusAddrs() (addrs []string) {
func (inst *instance) MetricAddr() (r MetricAddr) {
if inst.Host != "" && inst.StatusPort != 0 {
addrs = append(addrs, utils.JoinHostPort(inst.Host, inst.StatusPort))
r.Targets = append(r.Targets, utils.JoinHostPort(inst.Host, inst.StatusPort))
}
return
}
Expand Down Expand Up @@ -109,7 +115,7 @@ func logIfErr(err error) {
func pdEndpoints(pds []*PDInstance, isHTTP bool) []string {
var endpoints []string
for _, pd := range pds {
if pd.Role == PDRoleTSO || pd.Role == PDRoleResourceManager {
if pd.Role == PDRoleTSO || pd.Role == PDRoleScheduling || pd.Role == PDRoleResourceManager {
continue
}
if isHTTP {
Expand Down
23 changes: 19 additions & 4 deletions components/playground/instance/pd.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ const (
PDRoleAPI PDRole = "api"
// PDRoleTSO is the role of PD TSO
PDRoleTSO PDRole = "tso"
// PDRoleScheduling is the role of PD scheduling
PDRoleScheduling PDRole = "scheduling"
// PDRoleResourceManager is the role of PD resource manager
PDRoleResourceManager PDRole = "resource manager"
)
Expand Down Expand Up @@ -128,8 +130,21 @@ func (inst *PDInstance) Start(ctx context.Context, version utils.Version) error
args = []string{
"services",
"tso",
fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.Port)),
fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.Port)),
fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.StatusPort)),
fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort)),
fmt.Sprintf("--backend-endpoints=%s", strings.Join(endpoints, ",")),
fmt.Sprintf("--log-file=%s", inst.LogFile()),
}
if inst.ConfigPath != "" {
args = append(args, fmt.Sprintf("--config=%s", inst.ConfigPath))
}
case PDRoleScheduling:
endpoints := pdEndpoints(inst.pds, true)
args = []string{
"services",
"scheduling",
fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.StatusPort)),
fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort)),
fmt.Sprintf("--backend-endpoints=%s", strings.Join(endpoints, ",")),
fmt.Sprintf("--log-file=%s", inst.LogFile()),
}
Expand All @@ -141,8 +156,8 @@ func (inst *PDInstance) Start(ctx context.Context, version utils.Version) error
args = []string{
"services",
"resource-manager",
fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.Port)),
fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.Port)),
fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.StatusPort)),
fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort)),
fmt.Sprintf("--backend-endpoints=%s", strings.Join(endpoints, ",")),
fmt.Sprintf("--log-file=%s", inst.LogFile()),
}
Expand Down
9 changes: 9 additions & 0 deletions components/playground/instance/tiproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,15 @@ func NewTiProxy(binPath string, dir, host, configPath string, id int, port int,
return tiproxy
}

// MetricAddr implements Instance interface.
func (c *TiProxy) MetricAddr() (r MetricAddr) {
r.Targets = append(r.Targets, utils.JoinHostPort(c.Host, c.StatusPort))
r.Labels = map[string]string{
"__metrics_path__": "/api/metrics",
}
return
}

// Start implements Instance interface.
func (c *TiProxy) Start(ctx context.Context, version utils.Version) error {
endpoints := pdEndpoints(c.pds, true)
Expand Down
33 changes: 16 additions & 17 deletions components/playground/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ package main

import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
Expand Down Expand Up @@ -57,10 +57,11 @@ type BootOptions struct {
Mode string `yaml:"mode"`
PDMode string `yaml:"pd_mode"`
Version string `yaml:"version"`
PD instance.Config `yaml:"pd"` // ignored when pd_mode == ms
PDAPI instance.Config `yaml:"pd_api"` // Only available when pd_mode == ms
PDTSO instance.Config `yaml:"pd_tso"` // Only available when pd_mode == ms
PDRM instance.Config `yaml:"pd_rm"` // Only available when pd_mode == ms
PD instance.Config `yaml:"pd"` // ignored when pd_mode == ms
PDAPI instance.Config `yaml:"pd_api"` // Only available when pd_mode == ms
PDTSO instance.Config `yaml:"pd_tso"` // Only available when pd_mode == ms
PDScheduling instance.Config `yaml:"pd_scheduling"` // Only available when pd_mode == ms
PDRM instance.Config `yaml:"pd_rm"` // Only available when pd_mode == ms
TiProxy instance.Config `yaml:"tiproxy"`
TiDB instance.Config `yaml:"tidb"`
TiKV instance.Config `yaml:"tikv"`
Expand Down Expand Up @@ -294,6 +295,7 @@ If you'd like to use a TiDB version other than %s, cancel and retry with the fol

rootCmd.Flags().IntVar(&options.PDAPI.Num, "pd.api", 0, "PD API instance number")
rootCmd.Flags().IntVar(&options.PDTSO.Num, "pd.tso", 0, "PD TSO instance number")
rootCmd.Flags().IntVar(&options.PDScheduling.Num, "pd.scheduling", 0, "PD scheduling instance number")
rootCmd.Flags().IntVar(&options.PDRM.Num, "pd.rm", 0, "PD resource manager instance number")

rootCmd.Flags().IntVar(&options.TiDB.UpTimeout, "db.timeout", 60, "TiDB max wait time in seconds for starting, 0 means no limit")
Expand Down Expand Up @@ -326,6 +328,7 @@ If you'd like to use a TiDB version other than %s, cancel and retry with the fol

rootCmd.Flags().StringVar(&options.PDAPI.ConfigPath, "pd.api.config", "", "PD API instance configuration file")
rootCmd.Flags().StringVar(&options.PDTSO.ConfigPath, "pd.tso.config", "", "PD TSO instance configuration file")
rootCmd.Flags().StringVar(&options.PDScheduling.ConfigPath, "pd.scheduling.config", "", "PD scheduling instance configuration file")
rootCmd.Flags().StringVar(&options.PDRM.ConfigPath, "pd.rm.config", "", "PD resource manager instance configuration file")

rootCmd.Flags().StringVar(&options.TiDB.BinPath, "db.binpath", "", "TiDB instance binary path")
Expand All @@ -342,6 +345,7 @@ If you'd like to use a TiDB version other than %s, cancel and retry with the fol

rootCmd.Flags().StringVar(&options.PDAPI.BinPath, "pd.api.binpath", "", "PD API instance binary path")
rootCmd.Flags().StringVar(&options.PDTSO.BinPath, "pd.tso.binpath", "", "PD TSO instance binary path")
rootCmd.Flags().StringVar(&options.PDScheduling.BinPath, "pd.scheduling.binpath", "", "PD scheduling instance binary path")
rootCmd.Flags().StringVar(&options.PDRM.BinPath, "pd.rm.binpath", "", "PD resource manager instance binary path")

rootCmd.Flags().StringVar(&options.TiKVCDC.Version, "kvcdc.version", "", "TiKV-CDC instance version")
Expand Down Expand Up @@ -409,6 +413,9 @@ func populateDefaultOpt(flagSet *pflag.FlagSet) error {
defaultInt(&options.PDTSO.Num, "pd.tso", 1)
defaultStr(&options.PDTSO.BinPath, "pd.tso.binpath", options.PDTSO.BinPath)
defaultStr(&options.PDTSO.ConfigPath, "pd.tso.config", options.PDTSO.ConfigPath)
defaultInt(&options.PDScheduling.Num, "pd.scheduling", 1)
defaultStr(&options.PDScheduling.BinPath, "pd.scheduling.binpath", options.PDScheduling.BinPath)
defaultStr(&options.PDScheduling.ConfigPath, "pd.scheduling.config", options.PDScheduling.ConfigPath)
defaultInt(&options.PDRM.Num, "pd.rm", 1)
defaultStr(&options.PDRM.BinPath, "pd.rm.binpath", options.PDRM.BinPath)
defaultStr(&options.PDRM.ConfigPath, "pd.rm.config", options.PDRM.ConfigPath)
Expand All @@ -419,36 +426,28 @@ func populateDefaultOpt(flagSet *pflag.FlagSet) error {
return nil
}

func tryConnect(dsn string) error {
cli, err := sql.Open("mysql", dsn)
if err != nil {
return err
}
defer cli.Close()

conn, err := cli.Conn(context.Background())
func tryConnect(addr string, timeoutSec int) error {
conn, err := net.DialTimeout("tcp", addr, time.Duration(timeoutSec)*time.Second)
if err != nil {
return err
}
defer conn.Close()

return nil
}

// checkDB check if the addr is connectable by getting a connection from sql.DB. timeout <=0 means no timeout
func checkDB(dbAddr string, timeout int) bool {
dsn := fmt.Sprintf("root:@tcp(%s)/", dbAddr)
if timeout > 0 {
for i := 0; i < timeout; i++ {
if tryConnect(dsn) == nil {
if tryConnect(dbAddr, timeout) == nil {
return true
}
time.Sleep(time.Second)
}
return false
}
for {
if err := tryConnect(dsn); err == nil {
if err := tryConnect(dbAddr, timeout); err == nil {
return true
}
time.Sleep(time.Second)
Expand Down
26 changes: 11 additions & 15 deletions components/playground/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,24 +29,20 @@ import (
)

// ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config
func (m *monitor) renderSDFile(cid2targets map[string][]string) error {
type Item struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}

cid2targets["prometheus"] = []string{utils.JoinHostPort(m.host, m.port)}
func (m *monitor) renderSDFile(cid2targets map[string]instance.MetricAddr) error {
cid2targets["prometheus"] = instance.MetricAddr{Targets: []string{utils.JoinHostPort(m.host, m.port)}}

var items []Item
var items []instance.MetricAddr

for id, targets := range cid2targets {
item := Item{
Targets: targets,
Labels: map[string]string{
"job": id,
},
for id, t := range cid2targets {
it := instance.MetricAddr{
Targets: t.Targets,
Labels: map[string]string{"job": id},
}
for k, v := range t.Labels {
it.Labels[k] = v
}
items = append(items, item)
items = append(items, it)
}

data, err := json.MarshalIndent(&items, "", "\t")
Expand Down
Loading

0 comments on commit cc5c49b

Please sign in to comment.