diff --git a/client/client.go b/client/client.go index 0ae3fd21df05..b3885c537916 100644 --- a/client/client.go +++ b/client/client.go @@ -43,8 +43,12 @@ import ( const ( // defaultKeyspaceID is the default key space id. // Valid keyspace id range is [0, 0xFFFFFF](uint24max, or 16777215) - // ​0 is reserved for default keyspace with the name "DEFAULT", It's initialized when PD bootstrap and reserved for users who haven't been assigned keyspace. + // ​0 is reserved for default keyspace with the name "DEFAULT", It's initialized when PD bootstrap + // and reserved for users who haven't been assigned keyspace. defaultKeyspaceID = uint32(0) + // defaultKeySpaceGroupID is the default key space group id. + // We also reserved 0 for the keyspace group for the same purpose. + defaultKeySpaceGroupID = uint32(0) ) // Region contains information of a region's meta and its peers. @@ -205,6 +209,8 @@ var ( errClosing = errors.New("[pd] closing") // errTSOLength is returned when the number of response timestamps is inconsistent with request. errTSOLength = errors.New("[pd] tso length in rpc response is incorrect") + // errInvalidRespHeader is returned when the response doesn't contain service mode info unexpectedly. + errNoServiceModeReturned = errors.New("[pd] no service mode returned") ) // ClientOption configures client. @@ -421,6 +427,7 @@ func (c *client) Close() { func (c *client) setServiceMode(newMode pdpb.ServiceMode) { c.Lock() defer c.Unlock() + if newMode == c.serviceMode { return } @@ -437,13 +444,18 @@ func (c *client) setServiceMode(newMode pdpb.ServiceMode) { newTSOCli = newTSOClient(c.ctx, c.option, c.keyspaceID, c.pdSvcDiscovery, &pdTSOStreamBuilderFactory{}) case pdpb.ServiceMode_API_SVC_MODE: - newTSOSvcDiscovery = newTSOServiceDiscovery(c.ctx, MetaStorageClient(c), - c.GetClusterID(c.ctx), c.keyspaceID, c.svrUrls, c.tlsCfg, c.option) + newTSOSvcDiscovery = newTSOServiceDiscovery( + c.ctx, MetaStorageClient(c), c.pdSvcDiscovery, + c.GetClusterID(c.ctx), c.keyspaceID, c.tlsCfg, c.option) + // At this point, the keyspace group isn't known yet. Starts from the default keyspace group, + // and will be updated later. newTSOCli = newTSOClient(c.ctx, c.option, c.keyspaceID, newTSOSvcDiscovery, &tsoTSOStreamBuilderFactory{}) if err := newTSOSvcDiscovery.Init(); err != nil { log.Error("[pd] failed to initialize tso service discovery. keep the current service mode", - zap.Strings("svr-urls", c.svrUrls), zap.String("current-mode", c.serviceMode.String()), zap.Error(err)) + zap.Strings("svr-urls", c.svrUrls), + zap.String("current-mode", c.serviceMode.String()), + zap.Error(err)) return } case pdpb.ServiceMode_UNKNOWN_SVC_MODE: @@ -643,11 +655,10 @@ func (c *client) GetLocalTSAsync(ctx context.Context, dcLocation string) TSFutur req.clientCtx = c.ctx tsoClient := c.getTSOClient() req.start = time.Now() - req.keyspaceID = c.keyspaceID req.dcLocation = dcLocation if tsoClient == nil { - req.done <- errs.ErrClientGetTSO + req.done <- errs.ErrClientGetTSO.FastGenByArgs("tso client is nil") return req } diff --git a/client/client_test.go b/client/client_test.go index 43c0cc5c3083..5f6a0b89b426 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -58,11 +58,11 @@ func TestUpdateURLs(t *testing.T) { cli := &pdServiceDiscovery{option: newOption()} cli.urls.Store([]string{}) cli.updateURLs(members[1:]) - re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2]}), cli.GetURLs()) + re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2]}), cli.GetServiceURLs()) cli.updateURLs(members[1:]) - re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2]}), cli.GetURLs()) + re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2]}), cli.GetServiceURLs()) cli.updateURLs(members) - re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2], members[0]}), cli.GetURLs()) + re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2], members[0]}), cli.GetServiceURLs()) } const testClientURL = "tmp://test.url:5255" diff --git a/client/errs/errno.go b/client/errs/errno.go index 9ed860681bed..e4bb7a21a9b1 100644 --- a/client/errs/errno.go +++ b/client/errs/errno.go @@ -38,18 +38,19 @@ const ( // client errors var ( - ErrClientGetProtoClient = errors.Normalize("failed to get proto client", errors.RFCCodeText("PD:client:ErrClientGetProtoClient")) - ErrClientCreateTSOStream = errors.Normalize("create TSO stream failed, %s", errors.RFCCodeText("PD:client:ErrClientCreateTSOStream")) - ErrClientTSOStreamClosed = errors.Normalize("encountered TSO stream being closed unexpectedly", errors.RFCCodeText("PD:client:ErrClientTSOStreamClosed")) - ErrClientGetTSOTimeout = errors.Normalize("get TSO timeout", errors.RFCCodeText("PD:client:ErrClientGetTSOTimeout")) - ErrClientGetTSO = errors.Normalize("get TSO failed, %v", errors.RFCCodeText("PD:client:ErrClientGetTSO")) - ErrClientGetLeader = errors.Normalize("get leader from %v error", errors.RFCCodeText("PD:client:ErrClientGetLeader")) - ErrClientGetMember = errors.Normalize("get member failed", errors.RFCCodeText("PD:client:ErrClientGetMember")) - ErrClientGetClusterInfo = errors.Normalize("get cluster info failed", errors.RFCCodeText("PD:client:ErrClientGetClusterInfo")) - ErrClientUpdateMember = errors.Normalize("update member failed, %v", errors.RFCCodeText("PD:client:ErrUpdateMember")) - ErrClientProtoUnmarshal = errors.Normalize("failed to unmarshal proto", errors.RFCCodeText("PD:proto:ErrClientProtoUnmarshal")) - ErrClientGetMultiResponse = errors.Normalize("get invalid value response %v, must only one", errors.RFCCodeText("PD:client:ErrClientGetMultiResponse")) - ErrClientGetServingEndpoint = errors.Normalize("get serving endpoint failed", errors.RFCCodeText("PD:client:ErrClientGetServingEndpoint")) + ErrClientGetProtoClient = errors.Normalize("failed to get proto client", errors.RFCCodeText("PD:client:ErrClientGetProtoClient")) + ErrClientCreateTSOStream = errors.Normalize("create TSO stream failed, %s", errors.RFCCodeText("PD:client:ErrClientCreateTSOStream")) + ErrClientTSOStreamClosed = errors.Normalize("encountered TSO stream being closed unexpectedly", errors.RFCCodeText("PD:client:ErrClientTSOStreamClosed")) + ErrClientGetTSOTimeout = errors.Normalize("get TSO timeout", errors.RFCCodeText("PD:client:ErrClientGetTSOTimeout")) + ErrClientGetTSO = errors.Normalize("get TSO failed, %v", errors.RFCCodeText("PD:client:ErrClientGetTSO")) + ErrClientGetLeader = errors.Normalize("get leader failed, %v", errors.RFCCodeText("PD:client:ErrClientGetLeader")) + ErrClientGetMember = errors.Normalize("get member failed", errors.RFCCodeText("PD:client:ErrClientGetMember")) + ErrClientGetClusterInfo = errors.Normalize("get cluster info failed", errors.RFCCodeText("PD:client:ErrClientGetClusterInfo")) + ErrClientUpdateMember = errors.Normalize("update member failed, %v", errors.RFCCodeText("PD:client:ErrUpdateMember")) + ErrClientProtoUnmarshal = errors.Normalize("failed to unmarshal proto", errors.RFCCodeText("PD:proto:ErrClientProtoUnmarshal")) + ErrClientGetMultiResponse = errors.Normalize("get invalid value response %v, must only one", errors.RFCCodeText("PD:client:ErrClientGetMultiResponse")) + ErrClientGetServingEndpoint = errors.Normalize("get serving endpoint failed", errors.RFCCodeText("PD:client:ErrClientGetServingEndpoint")) + ErrClientFindGroupByKeyspaceID = errors.Normalize("can't find keyspace group by keyspace id", errors.RFCCodeText("PD:client:ErrClientFindGroupByKeyspaceID")) ) // grpcutil errors diff --git a/client/go.mod b/client/go.mod index 9f9a5225b2bd..aa19f4f3e6af 100644 --- a/client/go.mod +++ b/client/go.mod @@ -8,7 +8,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a + github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/prometheus/client_golang v1.11.1 github.com/stretchr/testify v1.8.2 diff --git a/client/go.sum b/client/go.sum index c42b62af83de..f06d1ee10da4 100644 --- a/client/go.sum +++ b/client/go.sum @@ -82,8 +82,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTm github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a h1:PWkMSJSDaOuLNKCV84K3tQ9stZuZPN8E148jRPD9TcA= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be h1:eHtwHgPzzm8aIZ4x8o7zg1b23cjUl0AikW+SDLpqf3E= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index 80634922cd39..d02932c51ed4 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -41,6 +41,13 @@ const ( updateMemberTimeout = time.Second // Use a shorter timeout to recover faster from network isolation. ) +type serviceType int + +const ( + apiService serviceType = iota + tsoService +) + // ServiceDiscovery defines the general interface for service discovery on a quorum-based cluster // or a primary/secondary configured cluster. type ServiceDiscovery interface { @@ -50,8 +57,14 @@ type ServiceDiscovery interface { Close() // GetClusterID returns the ID of the cluster GetClusterID() uint64 - // GetURLs returns the URLs of the servers. - GetURLs() []string + // GetKeyspaceID returns the ID of the keyspace + GetKeyspaceID() uint32 + // GetKeyspaceGroupID returns the ID of the keyspace group + GetKeyspaceGroupID() uint32 + // DiscoverServiceURLs discovers the microservice with the specified type and returns the server urls. + DiscoverMicroservice(svcType serviceType) []string + // GetServiceURLs returns the URLs of the servers providing the service + GetServiceURLs() []string // GetServingEndpointClientConn returns the grpc client connection of the serving endpoint // which is the leader in a quorum-based cluster or the primary in a primary/secondary // configured cluster. @@ -174,7 +187,9 @@ func (c *pdServiceDiscovery) Init() error { } log.Info("[pd] init cluster id", zap.Uint64("cluster-id", c.clusterID)) - c.updateServiceMode() + if err := c.checkServiceModeChanged(); err != nil { + log.Warn("[pd] failed to check service mode and will check later", zap.Error(err)) + } c.wg.Add(2) go c.updateMemberLoop() @@ -218,7 +233,7 @@ func (c *pdServiceDiscovery) updateMemberLoop() { failpoint.Continue() }) if err := c.updateMember(); err != nil { - log.Error("[pd] failed to update member", zap.Strings("urls", c.GetURLs()), errs.ZapError(err)) + log.Error("[pd] failed to update member", zap.Strings("urls", c.GetServiceURLs()), errs.ZapError(err)) } } } @@ -240,7 +255,11 @@ func (c *pdServiceDiscovery) updateServiceModeLoop() { return case <-ticker.C: } - c.updateServiceMode() + if err := c.checkServiceModeChanged(); err != nil { + log.Error("[pd] failed to update service mode", + zap.Strings("urls", c.GetServiceURLs()), errs.ZapError(err)) + c.ScheduleCheckMemberChanged() // check if the leader changed + } } } @@ -263,13 +282,50 @@ func (c *pdServiceDiscovery) GetClusterID() uint64 { return c.clusterID } -// GetURLs returns the URLs of the servers. +// GetKeyspaceID returns the ID of the keyspace +func (c *pdServiceDiscovery) GetKeyspaceID() uint32 { + // PD/API service only supports the default keyspace + return defaultKeyspaceID +} + +// GetKeyspaceGroupID returns the ID of the keyspace group +func (c *pdServiceDiscovery) GetKeyspaceGroupID() uint32 { + // PD/API service only supports the default keyspace group + return defaultKeySpaceGroupID +} + +// DiscoverServiceURLs discovers the microservice with the specified type and returns the server urls. +func (c *pdServiceDiscovery) DiscoverMicroservice(svcType serviceType) (urls []string) { + switch svcType { + case apiService: + urls = c.GetServiceURLs() + case tsoService: + leaderAddr := c.getLeaderAddr() + if len(leaderAddr) > 0 { + clusterInfo, err := c.getClusterInfo(c.ctx, leaderAddr, c.option.timeout) + if err != nil { + log.Error("[pd] failed to get cluster info", + zap.String("leader-addr", leaderAddr), errs.ZapError(err)) + return nil + } + urls = clusterInfo.TsoUrls + } else { + log.Error("[pd] failed to get leader addr") + } + default: + panic("invalid service type") + } + + return urls +} + +// GetServiceURLs returns the URLs of the servers. // For testing use. It should only be called when the client is closed. -func (c *pdServiceDiscovery) GetURLs() []string { +func (c *pdServiceDiscovery) GetServiceURLs() []string { return c.urls.Load().([]string) } -// GetServingAddr returns the grpc client connection of the serving endpoint +// GetServingEndpointClientConn returns the grpc client connection of the serving endpoint // which is the leader in a quorum-based cluster or the primary in a primary/secondary // configured cluster. func (c *pdServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { @@ -360,7 +416,7 @@ func (c *pdServiceDiscovery) initClusterID() error { ctx, cancel := context.WithCancel(c.ctx) defer cancel() clusterID := uint64(0) - for _, url := range c.GetURLs() { + for _, url := range c.GetServiceURLs() { members, err := c.getMembers(ctx, url, c.option.timeout) if err != nil || members.GetHeader() == nil { log.Warn("[pd] failed to get cluster id", zap.String("url", url), errs.ZapError(err)) @@ -386,29 +442,32 @@ func (c *pdServiceDiscovery) initClusterID() error { return nil } -func (c *pdServiceDiscovery) updateServiceMode() { +func (c *pdServiceDiscovery) checkServiceModeChanged() error { leaderAddr := c.getLeaderAddr() - if len(leaderAddr) > 0 { - clusterInfo, err := c.getClusterInfo(c.ctx, leaderAddr, c.option.timeout) - // If the method is not supported, we set it to pd mode. - if err != nil { + if len(leaderAddr) == 0 { + return errors.New("no leader found") + } + + clusterInfo, err := c.getClusterInfo(c.ctx, leaderAddr, c.option.timeout) + if err != nil { + if strings.Contains(err.Error(), "Unimplemented") { + // If the method is not supported, we set it to pd mode. // TODO: it's a hack way to solve the compatibility issue. // we need to remove this after all maintained version supports the method. - if strings.Contains(err.Error(), "Unimplemented") { - c.serviceModeUpdateCb(pdpb.ServiceMode_PD_SVC_MODE) - } else { - log.Warn("[pd] failed to get cluster info for the leader", zap.String("leader-addr", leaderAddr), errs.ZapError(err)) - } - return + c.serviceModeUpdateCb(pdpb.ServiceMode_PD_SVC_MODE) + return nil } - c.serviceModeUpdateCb(clusterInfo.ServiceModes[0]) - } else { - log.Warn("[pd] no leader found") + return err + } + if clusterInfo == nil || len(clusterInfo.ServiceModes) == 0 { + return errors.WithStack(errNoServiceModeReturned) } + c.serviceModeUpdateCb(clusterInfo.ServiceModes[0]) + return nil } func (c *pdServiceDiscovery) updateMember() error { - for i, url := range c.GetURLs() { + for i, url := range c.GetServiceURLs() { failpoint.Inject("skipFirstUpdateMember", func() { if i == 0 { failpoint.Continue() @@ -424,7 +483,7 @@ func (c *pdServiceDiscovery) updateMember() error { var errTSO error if err == nil { if members.GetLeader() == nil || len(members.GetLeader().GetClientUrls()) == 0 { - err = errs.ErrClientGetLeader.FastGenByArgs("leader address don't exist") + err = errs.ErrClientGetLeader.FastGenByArgs("leader address doesn't exist") } // Still need to update TsoAllocatorLeaders, even if there is no PD leader errTSO = c.switchTSOAllocatorLeaders(members.GetTsoAllocatorLeaders()) @@ -501,7 +560,7 @@ func (c *pdServiceDiscovery) updateURLs(members []*pdpb.Member) { } sort.Strings(urls) - oldURLs := c.GetURLs() + oldURLs := c.GetServiceURLs() // the url list is same. if reflect.DeepEqual(oldURLs, urls) { return diff --git a/client/tso_client.go b/client/tso_client.go index a13d635b986f..7585fdc34f65 100644 --- a/client/tso_client.go +++ b/client/tso_client.go @@ -48,7 +48,6 @@ type tsoRequest struct { done chan error physical int64 logical int64 - keyspaceID uint32 dcLocation string } diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index a852b62331b0..04d2ea412354 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -376,7 +376,8 @@ tsoBatchLoop: zap.String("dc-location", dc)) } else { log.Error("[tso] fetch pending tso requests error", - zap.String("dc-location", dc), errs.ZapError(errs.ErrClientGetTSO, err)) + zap.String("dc-location", dc), + errs.ZapError(errs.ErrClientGetTSO.FastGenByArgs("when fetch pending tso requests"), err)) } return } @@ -450,7 +451,10 @@ tsoBatchLoop: default: } c.svcDiscovery.ScheduleCheckMemberChanged() - log.Error("[tso] getTS error", zap.String("dc-location", dc), zap.String("stream-addr", streamAddr), errs.ZapError(errs.ErrClientGetTSO, err)) + log.Error("[tso] getTS error", + zap.String("dc-location", dc), + zap.String("stream-addr", streamAddr), + errs.ZapError(errs.ErrClientGetTSO.FastGenByArgs("after processing requests"), err)) // Set `stream` to nil and remove this stream from the `connectionCtxs` due to error. connectionCtxs.Delete(streamAddr) cancel() @@ -615,7 +619,7 @@ func (c *tsoClient) tryConnectToTSO( // or of keyspace group primary/secondaries. func (c *tsoClient) getAllTSOStreamBuilders() map[string]tsoStreamBuilder { var ( - addrs = c.svcDiscovery.GetURLs() + addrs = c.svcDiscovery.GetServiceURLs() streamBuilders = make(map[string]tsoStreamBuilder, len(addrs)) cc *grpc.ClientConn err error @@ -662,7 +666,8 @@ func (c *tsoClient) tryConnectToTSOWithProxy(dispatcherCtx context.Context, dc s cctx, cancel := context.WithCancel(dispatcherCtx) // Do not proxy the leader client. if addr != leaderAddr { - log.Info("[tso] use follower to forward tso stream to do the proxy", zap.String("dc", dc), zap.String("addr", addr)) + log.Info("[tso] use follower to forward tso stream to do the proxy", + zap.String("dc", dc), zap.String("addr", addr)) cctx = grpcutil.BuildForwardContext(cctx, forwardedHost) } // Create the TSO stream. @@ -676,7 +681,8 @@ func (c *tsoClient) tryConnectToTSOWithProxy(dispatcherCtx context.Context, dc s connectionCtxs.Store(addr, &tsoConnectionContext{addr, stream, cctx, cancel}) continue } - log.Error("[tso] create the tso stream failed", zap.String("dc", dc), zap.String("addr", addr), errs.ZapError(err)) + log.Error("[tso] create the tso stream failed", + zap.String("dc", dc), zap.String("addr", addr), errs.ZapError(err)) cancel() } return nil @@ -691,7 +697,9 @@ func extractSpanReference(tbc *tsoBatchController, opts []opentracing.StartSpanO return opts } -func (c *tsoClient) processRequests(stream tsoStream, dcLocation string, tbc *tsoBatchController, opts []opentracing.StartSpanOption) error { +func (c *tsoClient) processRequests( + stream tsoStream, dcLocation string, tbc *tsoBatchController, opts []opentracing.StartSpanOption, +) error { if len(opts) > 0 { span := opentracing.StartSpan("pdclient.processRequests", opts...) defer span.Finish() @@ -699,7 +707,9 @@ func (c *tsoClient) processRequests(stream tsoStream, dcLocation string, tbc *ts requests := tbc.getCollectedRequests() count := int64(len(requests)) - physical, logical, suffixBits, err := stream.processRequests(c.svcDiscovery.GetClusterID(), dcLocation, requests, tbc.batchStartTime) + physical, logical, suffixBits, err := stream.processRequests( + c.svcDiscovery.GetClusterID(), c.svcDiscovery.GetKeyspaceID(), c.svcDiscovery.GetKeyspaceGroupID(), + dcLocation, requests, tbc.batchStartTime) if err != nil { c.finishRequest(requests, 0, 0, 0, err) return err diff --git a/client/tso_service_discovery.go b/client/tso_service_discovery.go index 05d7acc36235..96478f2049ab 100644 --- a/client/tso_service_discovery.go +++ b/client/tso_service_discovery.go @@ -17,11 +17,10 @@ package pd import ( "context" "fmt" + "strings" "sync" - "sync/atomic" "time" - "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/tsopb" "github.com/pingcap/log" @@ -35,27 +34,121 @@ import ( const ( msServiceRootPath = "/ms" tsoServiceName = "tso" - // tspSvcDiscoveryFormat defines the key prefix for keyspace group primary election. + // tsoSvcDiscoveryFormat defines the key prefix for keyspace group primary election. // The entire key is in the format of "/ms//tso//primary". // The is 5 digits integer with leading zeros. - tspSvcDiscoveryFormat = msServiceRootPath + "/%d/" + tsoServiceName + "/%05d/primary" + tsoSvcDiscoveryFormat = msServiceRootPath + "/%d/" + tsoServiceName + "/%05d/primary" + // initRetryInterval is the rpc retry interval during the initialization phase. + initRetryInterval = time.Second + // tsoQueryRetryMaxTimes is the max retry times for querying TSO. + tsoQueryRetryMaxTimes = 10 + // tsoQueryRetryInterval is the retry interval for querying TSO. + tsoQueryRetryInterval = 500 * time.Millisecond ) var _ ServiceDiscovery = (*tsoServiceDiscovery)(nil) var _ tsoAllocatorEventSource = (*tsoServiceDiscovery)(nil) +// keyspaceGroupSvcDiscovery is used for discovering the serving endpoints of the keyspace +// group to which the keyspace belongs +type keyspaceGroupSvcDiscovery struct { + sync.RWMutex + group *tsopb.KeyspaceGroup + // primaryAddr is the primary serving address + primaryAddr string + // secondaryAddrs are TSO secondary serving addresses + secondaryAddrs []string + // addrs are the primary/secondary serving addresses + addrs []string +} + +func (k *keyspaceGroupSvcDiscovery) update( + keyspaceGroup *tsopb.KeyspaceGroup, + newPrimaryAddr string, + secondaryAddrs, addrs []string, +) (oldPrimaryAddr string, primarySwitched bool) { + k.Lock() + defer k.Unlock() + + // If the new primary address is empty, we don't switch the primary address. + oldPrimaryAddr = k.primaryAddr + if len(newPrimaryAddr) > 0 { + primarySwitched = !strings.EqualFold(oldPrimaryAddr, newPrimaryAddr) + k.primaryAddr = newPrimaryAddr + } + + k.group = keyspaceGroup + k.secondaryAddrs = secondaryAddrs + k.addrs = addrs + + return +} + +// tsoServerDiscovery is for discovering the serving endpoints of the TSO servers +// TODO: dynamically update the TSO server addresses in the case of TSO server failover +// and scale-out/in. +type tsoServerDiscovery struct { + sync.RWMutex + addrs []string + // used for round-robin load balancing + selectIdx int + // failureCount counts the consecutive failures for communicating with the tso servers + failureCount int +} + +func (t *tsoServerDiscovery) getTSOServer(sd ServiceDiscovery) (string, error) { + t.Lock() + defer t.Unlock() + + if len(t.addrs) == 0 || t.failureCount == len(t.addrs) { + addrs := sd.DiscoverMicroservice(tsoService) + if len(addrs) == 0 { + return "", errors.New("no tso server address found") + } + + log.Info("update tso server addresses", zap.Strings("addrs", addrs)) + + t.addrs = addrs + t.selectIdx = 0 + t.failureCount = 0 + } + + // Pick a TSO server in a round-robin way. + tsoServerAddr := t.addrs[t.selectIdx] + t.selectIdx++ + t.selectIdx %= len(t.addrs) + + return tsoServerAddr, nil +} + +func (t *tsoServerDiscovery) countFailure() { + t.Lock() + defer t.Unlock() + t.failureCount++ +} + +func (t *tsoServerDiscovery) resetFailure() { + t.Lock() + defer t.Unlock() + t.failureCount = 0 +} + // tsoServiceDiscovery is the service discovery client of the independent TSO service + type tsoServiceDiscovery struct { - clusterID uint64 - keyspaceID uint32 - urls atomic.Value // Store as []string - // primary key is the etcd path used for discovering the serving endpoint of this keyspace - primaryKey string - // TSO Primary URL - primary atomic.Value // Store as string - // TSO Secondary URLs - secondaries atomic.Value // Store as []string - metacli MetaStorageClient + metacli MetaStorageClient + apiSvcDiscovery ServiceDiscovery + clusterID uint64 + keyspaceID uint32 + + // defaultDiscoveryKey is the etcd path used for discovering the serving endpoints of + // the default keyspace group + defaultDiscoveryKey string + // tsoServersDiscovery is for discovering the serving endpoints of the TSO servers + *tsoServerDiscovery + + // keyspaceGroupSD is for discovering the serving endpoints of the keyspace group + keyspaceGroupSD *keyspaceGroupSvcDiscovery // addr -> a gRPC connection clientConns sync.Map // Store as map[string]*grpc.ClientConn @@ -80,31 +173,46 @@ type tsoServiceDiscovery struct { // newTSOServiceDiscovery returns a new client-side service discovery for the independent TSO service. func newTSOServiceDiscovery( - ctx context.Context, metacli MetaStorageClient, - clusterID uint64, keyspaceID uint32, urls []string, tlsCfg *tlsutil.TLSConfig, option *option, + ctx context.Context, metacli MetaStorageClient, apiSvcDiscovery ServiceDiscovery, + clusterID uint64, keyspaceID uint32, tlsCfg *tlsutil.TLSConfig, option *option, ) ServiceDiscovery { ctx, cancel := context.WithCancel(ctx) c := &tsoServiceDiscovery{ ctx: ctx, cancel: cancel, metacli: metacli, + apiSvcDiscovery: apiSvcDiscovery, keyspaceID: keyspaceID, clusterID: clusterID, - primaryKey: fmt.Sprintf(tspSvcDiscoveryFormat, clusterID, keyspaceID), tlsCfg: tlsCfg, option: option, checkMembershipCh: make(chan struct{}, 1), } - c.urls.Store(urls) + c.keyspaceGroupSD = &keyspaceGroupSvcDiscovery{ + primaryAddr: "", + secondaryAddrs: make([]string, 0), + addrs: make([]string, 0), + } + c.tsoServerDiscovery = &tsoServerDiscovery{addrs: make([]string, 0)} + // Start with the default keyspace group. The actual keyspace group, to which the keyspace belongs, + // will be discovered later. + c.defaultDiscoveryKey = fmt.Sprintf(tsoSvcDiscoveryFormat, clusterID, defaultKeySpaceGroupID) - log.Info("created tso service discovery", zap.String("discovery-key", c.primaryKey)) + log.Info("created tso service discovery", + zap.Uint64("cluster-id", clusterID), + zap.Uint32("keyspace-id", keyspaceID), + zap.String("default-discovery-key", c.defaultDiscoveryKey)) return c } // Init initialize the concrete client underlying func (c *tsoServiceDiscovery) Init() error { - if err := c.initRetry(c.updateMember); err != nil { + log.Info("initializing tso service discovery", + zap.Int("max-retry-times", c.option.maxRetryTimes), + zap.Duration("retry-interval", initRetryInterval)) + if err := c.retry(c.option.maxRetryTimes, initRetryInterval, c.updateMember); err != nil { + log.Error("failed to update member. initialization failed.", zap.Error(err)) c.cancel() return err } @@ -113,16 +221,18 @@ func (c *tsoServiceDiscovery) Init() error { return nil } -func (c *tsoServiceDiscovery) initRetry(f func() error) error { +func (c *tsoServiceDiscovery) retry( + maxRetryTimes int, retryInterval time.Duration, f func() error, +) error { var err error - for i := 0; i < c.option.maxRetryTimes; i++ { + for i := 0; i < maxRetryTimes; i++ { if err = f(); err == nil { return nil } select { case <-c.ctx.Done(): return err - case <-time.After(time.Second): + case <-time.After(retryInterval): } } return errors.WithStack(err) @@ -160,7 +270,10 @@ func (c *tsoServiceDiscovery) startCheckMemberLoop() { log.Info("[tso] exit check member loop") return } - if err := c.updateMember(); err != nil { + // Make sure tsoQueryRetryMaxTimes * tsoQueryRetryInterval is far less than memberUpdateInterval, + // so that we can speed up the process of tso service discovery when failover happens on the + // tso service side and also ensures it won't call updateMember too frequently during normal time. + if err := c.retry(tsoQueryRetryMaxTimes, tsoQueryRetryInterval, c.updateMember); err != nil { log.Error("[tso] failed to update member", errs.ZapError(err)) } } @@ -171,10 +284,43 @@ func (c *tsoServiceDiscovery) GetClusterID() uint64 { return c.clusterID } -// GetURLs returns the URLs of the servers. +// GetKeyspaceID returns the ID of the keyspace +func (c *tsoServiceDiscovery) GetKeyspaceID() uint32 { + return c.keyspaceID +} + +// GetKeyspaceGroupID returns the ID of the keyspace group. If the keyspace group is unknown, +// it returns the default keyspace group ID. +func (c *tsoServiceDiscovery) GetKeyspaceGroupID() uint32 { + c.keyspaceGroupSD.RLock() + defer c.keyspaceGroupSD.RUnlock() + if c.keyspaceGroupSD.group == nil { + return defaultKeySpaceGroupID + } + return c.keyspaceGroupSD.group.Id +} + +// DiscoverServiceURLs discovers the microservice with the specified type and returns the server urls. +func (c *tsoServiceDiscovery) DiscoverMicroservice(svcType serviceType) []string { + var urls []string + + switch svcType { + case apiService: + case tsoService: + return c.apiSvcDiscovery.DiscoverMicroservice(tsoService) + default: + panic("invalid service type") + } + + return urls +} + +// GetServiceURLs returns the URLs of the tso primary/secondary addresses of this keyspace group. // For testing use. It should only be called when the client is closed. -func (c *tsoServiceDiscovery) GetURLs() []string { - return c.urls.Load().([]string) +func (c *tsoServiceDiscovery) GetServiceURLs() []string { + c.keyspaceGroupSD.RLock() + defer c.keyspaceGroupSD.RUnlock() + return c.keyspaceGroupSD.addrs } // GetServingAddr returns the grpc client connection of the serving endpoint @@ -198,7 +344,7 @@ func (c *tsoServiceDiscovery) GetServingAddr() string { } // GetBackupAddrs gets the addresses of the current reachable and healthy -// backup service endpoints randomly. Backup service endpoints are secondaries in +// backup service endpoints. Backup service endpoints are secondaries in // a primary/secondary configured cluster. func (c *tsoServiceDiscovery) GetBackupAddrs() []string { return c.getSecondaryAddrs() @@ -220,7 +366,12 @@ func (c *tsoServiceDiscovery) ScheduleCheckMemberChanged() { // Immediately check if there is any membership change among the primary/secondaries in // a primary/secondary configured cluster. func (c *tsoServiceDiscovery) CheckMemberChanged() error { - return c.updateMember() + c.apiSvcDiscovery.CheckMemberChanged() + if err := c.retry(tsoQueryRetryMaxTimes, tsoQueryRetryInterval, c.updateMember); err != nil { + log.Error("[tso] failed to update member", errs.ZapError(err)) + return err + } + return nil } // AddServingAddrSwitchedCallback adds callbacks which will be called when the primary in @@ -251,69 +402,127 @@ func (c *tsoServiceDiscovery) SetTSOGlobalServAddrUpdatedCallback(callback tsoGl // getPrimaryAddr returns the primary address. func (c *tsoServiceDiscovery) getPrimaryAddr() string { - primaryAddr := c.primary.Load() - if primaryAddr == nil { - return "" - } - return primaryAddr.(string) + c.keyspaceGroupSD.RLock() + defer c.keyspaceGroupSD.RUnlock() + return c.keyspaceGroupSD.primaryAddr } // getSecondaryAddrs returns the secondary addresses. func (c *tsoServiceDiscovery) getSecondaryAddrs() []string { - secondaryAddrs := c.secondaries.Load() - if secondaryAddrs == nil { - return []string{} - } - return secondaryAddrs.([]string) + c.keyspaceGroupSD.RLock() + defer c.keyspaceGroupSD.RUnlock() + return c.keyspaceGroupSD.secondaryAddrs } -func (c *tsoServiceDiscovery) switchPrimary(addrs []string) error { - // FIXME: How to safely compare primary urls? For now, only allows one client url. - addr := addrs[0] - oldPrimary := c.getPrimaryAddr() - if addr == oldPrimary { - return nil - } - - if _, err := c.GetOrCreateGRPCConn(addr); err != nil { - log.Warn("[tso] failed to connect primary", zap.String("primary", addr), errs.ZapError(err)) - return err - } - // Set PD primary and Global TSO Allocator (which is also the PD primary) - c.primary.Store(addr) +func (c *tsoServiceDiscovery) afterPrimarySwitched(oldPrimary, newPrimary string) error { // Run callbacks if c.globalAllocPrimariesUpdatedCb != nil { - if err := c.globalAllocPrimariesUpdatedCb(addr); err != nil { + if err := c.globalAllocPrimariesUpdatedCb(newPrimary); err != nil { return err } } - log.Info("[tso] switch primary", zap.String("new-primary", addr), zap.String("old-primary", oldPrimary)) + log.Info("[tso] switch primary", + zap.String("new-primary", newPrimary), + zap.String("old-primary", oldPrimary)) return nil } func (c *tsoServiceDiscovery) updateMember() error { - resp, err := c.metacli.Get(c.ctx, []byte(c.primaryKey)) + // The keyspace membership or the primary serving address of the keyspace group, to which this + // keyspace belongs, might have been changed. We need to query tso servers to get the latest info. + tsoServerAddr, err := c.tsoServerDiscovery.getTSOServer(c.apiSvcDiscovery) + if err != nil { + log.Error("[tso] failed to get tso server", errs.ZapError(err)) + return err + } + keyspaceGroup, err := c.findGroupByKeyspaceID(c.keyspaceID, tsoServerAddr, updateMemberTimeout) if err != nil { - log.Error("[tso] failed to get the keyspace serving endpoint", zap.String("primary-key", c.primaryKey), errs.ZapError(err)) + c.tsoServerDiscovery.countFailure() + log.Error("[tso] failed to find the keyspace group", errs.ZapError(err)) return err } + c.tsoServerDiscovery.resetFailure() + + log.Info("[tso] update keyspace group", zap.String("keyspace-group", keyspaceGroup.String())) + + // Initialize the serving addresses from the returned keyspace group info. + primaryAddr := "" + secondaryAddrs := make([]string, 0) + addrs := make([]string, 0, len(keyspaceGroup.Members)) + for _, m := range keyspaceGroup.Members { + addrs = append(addrs, m.Address) + if m.IsPrimary { + primaryAddr = m.Address + } else { + secondaryAddrs = append(secondaryAddrs, m.Address) + } + } + + // If the primary address is not empty, we need to create a grpc connection to it, and do it + // out of the critical section of the keyspace group service discovery. + if len(primaryAddr) > 0 { + if primarySwitched := !strings.EqualFold(primaryAddr, c.getPrimaryAddr()); primarySwitched { + if _, err := c.GetOrCreateGRPCConn(primaryAddr); err != nil { + log.Warn("[tso] failed to connect the next primary", + zap.String("next-primary", primaryAddr), errs.ZapError(err)) + return err + } + } + } - if resp == nil || len(resp.Kvs) == 0 { - log.Error("[tso] didn't find the keyspace serving endpoint", zap.String("primary-key", c.primaryKey)) - return errs.ErrClientGetServingEndpoint - } else if resp.Count > 1 { - return errs.ErrClientGetMultiResponse.FastGenByArgs(resp.Kvs) + oldPrimary, primarySwitched := c.keyspaceGroupSD.update(keyspaceGroup, primaryAddr, secondaryAddrs, addrs) + if primarySwitched { + if err := c.afterPrimarySwitched(oldPrimary, primaryAddr); err != nil { + return err + } } - value := resp.Kvs[0].Value - primary := &tsopb.Participant{} - if err := proto.Unmarshal(value, primary); err != nil { - return errs.ErrClientProtoUnmarshal.Wrap(err).GenWithStackByCause() + // Even if the primary address is empty, we still updated other returned info above, including the + // keyspace group info and the secondary addresses. + if len(primaryAddr) == 0 { + return errors.New("no primary address found") } - listenUrls := primary.GetListenUrls() - if len(listenUrls) == 0 { - log.Error("[tso] the keyspace serving endpoint list is empty", zap.String("primary-key", c.primaryKey)) - return errs.ErrClientGetServingEndpoint + + return nil +} + +// Query the keyspace group info from the tso server by the keyspace ID. The server side will return +// the info of the keyspace group to which this keyspace belongs. +func (c *tsoServiceDiscovery) findGroupByKeyspaceID( + keyspaceID uint32, tsoSrvAddr string, timeout time.Duration, +) (*tsopb.KeyspaceGroup, error) { + ctx, cancel := context.WithTimeout(c.ctx, timeout) + defer cancel() + + cc, err := c.GetOrCreateGRPCConn(tsoSrvAddr) + if err != nil { + return nil, err + } + + resp, err := tsopb.NewTSOClient(cc).FindGroupByKeyspaceID( + ctx, &tsopb.FindGroupByKeyspaceIDRequest{ + Header: &tsopb.RequestHeader{ + ClusterId: c.clusterID, + KeyspaceId: keyspaceID, + KeyspaceGroupId: defaultKeySpaceGroupID, + }, + KeyspaceId: keyspaceID, + }) + if err != nil { + attachErr := errors.Errorf("error:%s target:%s status:%s", + err, cc.Target(), cc.GetState().String()) + return nil, errs.ErrClientFindGroupByKeyspaceID.Wrap(attachErr).GenWithStackByCause() + } + if resp.GetHeader().GetError() != nil { + attachErr := errors.Errorf("error:%s target:%s status:%s", + resp.GetHeader().GetError().String(), cc.Target(), cc.GetState().String()) + return nil, errs.ErrClientFindGroupByKeyspaceID.Wrap(attachErr).GenWithStackByCause() } - return c.switchPrimary(listenUrls) + if resp.KeyspaceGroup == nil { + attachErr := errors.Errorf("error:%s target:%s status:%s", + "no keyspace group found", cc.Target(), cc.GetState().String()) + return nil, errs.ErrClientFindGroupByKeyspaceID.Wrap(attachErr).GenWithStackByCause() + } + + return resp.KeyspaceGroup, nil } diff --git a/client/tso_stream.go b/client/tso_stream.go index baa764dffb2d..5b658279caca 100644 --- a/client/tso_stream.go +++ b/client/tso_stream.go @@ -70,7 +70,9 @@ type tsoTSOStreamBuilder struct { client tsopb.TSOClient } -func (b *tsoTSOStreamBuilder) build(ctx context.Context, cancel context.CancelFunc, timeout time.Duration) (tsoStream, error) { +func (b *tsoTSOStreamBuilder) build( + ctx context.Context, cancel context.CancelFunc, timeout time.Duration, +) (tsoStream, error) { done := make(chan struct{}) // TODO: we need to handle a conner case that this goroutine is timeout while the stream is successfully created. go checkStreamTimeout(ctx, cancel, done, timeout) @@ -97,16 +99,19 @@ func checkStreamTimeout(ctx context.Context, cancel context.CancelFunc, done cha type tsoStream interface { // processRequests processes TSO requests in streaming mode to get timestamps - processRequests(clusterID uint64, dcLocation string, requests []*tsoRequest, - batchStartTime time.Time) (physical, logical int64, suffixBits uint32, err error) + processRequests( + clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, + requests []*tsoRequest, batchStartTime time.Time, + ) (physical, logical int64, suffixBits uint32, err error) } type pdTSOStream struct { stream pdpb.PD_TsoClient } -func (s *pdTSOStream) processRequests(clusterID uint64, dcLocation string, requests []*tsoRequest, - batchStartTime time.Time) (physical, logical int64, suffixBits uint32, err error) { +func (s *pdTSOStream) processRequests( + clusterID uint64, _, _ uint32, dcLocation string, requests []*tsoRequest, batchStartTime time.Time, +) (physical, logical int64, suffixBits uint32, err error) { start := time.Now() count := int64(len(requests)) req := &pdpb.TsoRequest{ @@ -152,13 +157,17 @@ type tsoTSOStream struct { stream tsopb.TSO_TsoClient } -func (s *tsoTSOStream) processRequests(clusterID uint64, dcLocation string, requests []*tsoRequest, - batchStartTime time.Time) (physical, logical int64, suffixBits uint32, err error) { +func (s *tsoTSOStream) processRequests( + clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, + requests []*tsoRequest, batchStartTime time.Time, +) (physical, logical int64, suffixBits uint32, err error) { start := time.Now() count := int64(len(requests)) req := &tsopb.TsoRequest{ Header: &tsopb.RequestHeader{ - ClusterId: clusterID, + ClusterId: clusterID, + KeyspaceId: keyspaceID, + KeyspaceGroupId: keyspaceGroupID, }, Count: uint32(count), DcLocation: dcLocation, diff --git a/errors.toml b/errors.toml index 85178c926ec5..d425288d9558 100644 --- a/errors.toml +++ b/errors.toml @@ -63,7 +63,7 @@ create TSO stream failed, %s ["PD:client:ErrClientGetLeader"] error = ''' -get leader from %v error +get leader failed, %v ''' ["PD:client:ErrClientGetMember"] diff --git a/go.mod b/go.mod index 919230d04d18..08f2972697b3 100644 --- a/go.mod +++ b/go.mod @@ -28,8 +28,8 @@ require ( github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d github.com/pingcap/errcode v0.3.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c - github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce - github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a + github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 + github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 github.com/pingcap/tidb-dashboard v0.0.0-20230209052558-a58fc2a7e924 @@ -41,7 +41,7 @@ require ( github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.2 - github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba + github.com/swaggo/http-swagger v1.2.6 github.com/swaggo/swag v1.8.3 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/unrolled/render v1.0.1 @@ -152,14 +152,14 @@ require ( github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/samber/lo v1.37.0 // indirect - github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44 // indirect + github.com/sergi/go-diff v1.1.0 // indirect github.com/shoenig/go-m1cpu v0.1.5 // indirect github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/sirupsen/logrus v1.6.0 // indirect github.com/soheilhy/cmux v0.1.4 github.com/stretchr/objx v0.5.0 // indirect - github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 // indirect + github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 // indirect github.com/tidwall/gjson v1.9.3 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect @@ -177,7 +177,7 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect + golang.org/x/image v0.5.0 // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/net v0.9.0 // indirect golang.org/x/oauth2 v0.4.0 // indirect diff --git a/go.sum b/go.sum index 25d75636142c..5e6f91ae70ba 100644 --- a/go.sum +++ b/go.sum @@ -9,7 +9,6 @@ github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6Xge github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -20,6 +19,7 @@ github.com/VividCortex/mysqlerr v1.0.0 h1:5pZ2TZA+YnzPgzBfiUWGqWmKDVNBdrkf9g+DNe github.com/VividCortex/mysqlerr v1.0.0/go.mod h1:xERx8E4tBhLvpjzdUyQiSfUxeMcATEQrflDAfXsqcAE= github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502 h1:L8IbaI/W6h5Cwgh0n4zGeZpVK78r/jBf9ASurHo9+/o= github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM= +github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -146,16 +146,12 @@ github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURU github.com/gin-contrib/gzip v0.0.1 h1:ezvKOL6jH+jlzdHNE4h9h8q8uMpDQjyl0NN0Jd7jozc= github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/go-chi/chi v4.0.2+incompatible h1:maB6vn6FqCxrpz4FqWdh4+lwpyZIQS7YEAUcHlgXVRs= -github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-echarts/go-echarts v1.0.0 h1:n181E4iXwj4zrU9VYmdM2m8dyhERt2w9k9YhHqdp6A8= github.com/go-echarts/go-echarts v1.0.0/go.mod h1:qbmyAb/Rl1f2w7wKba1D4LoNq4U164yO4/wedFbcWyo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -167,18 +163,13 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -262,7 +253,7 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -323,6 +314,7 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -347,7 +339,6 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= @@ -357,7 +348,6 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -408,6 +398,11 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= @@ -416,7 +411,6 @@ github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 h1:64bxqeTEN0/xoE github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= @@ -425,11 +419,11 @@ github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= -github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce h1:Y1kCxlCtlPTMtVcOkjUcuQKh+YrluSo7+7YMCQSzy30= -github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce/go.mod h1:w4PEZ5y16LeofeeGwdgZB4ddv9bLyDuIX+ljstgKZyk= +github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= +github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a h1:PWkMSJSDaOuLNKCV84K3tQ9stZuZPN8E148jRPD9TcA= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be h1:eHtwHgPzzm8aIZ4x8o7zg1b23cjUl0AikW+SDLpqf3E= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= @@ -487,8 +481,8 @@ github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= -github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44 h1:tB9NOR21++IjLyVx3/PCPhWMwqGNCMQEH96A6dMZ/gc= -github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= @@ -507,6 +501,8 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smallnest/chanx v0.0.0-20221229104322-eb4c998d2072 h1:Txo4SXVJq/OgEjwgkWoxkMoTjGlcrgsQE/XSghjmu0w= github.com/smallnest/chanx v0.0.0-20221229104322-eb4c998d2072/go.mod h1:+4nWMF0+CqEcU74SnX2NxaGqZ8zX4pcQ8Jcs77DbX5A= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -537,13 +533,11 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 h1:PyYN9JH5jY9j6av01SpfRMb+1DWg/i3MbGOKPxJ2wjM= -github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= -github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= -github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba h1:lUPlXKqgbqT2SVg2Y+eT9mu5wbqMnG+i/+Q9nK7C0Rs= -github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= -github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= -github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= +github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 h1:+iNTcqQJy0OZ5jk6a5NLib47eqXK8uYcPX+O4+cBpEM= +github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= +github.com/swaggo/http-swagger v1.2.6 h1:ihTjChUoSRMpFMjWw+0AkL1Ti4r6v8pCgVYLmQVRlRw= +github.com/swaggo/http-swagger v1.2.6/go.mod h1:CcoICgY3yVDk2u1LQUCMHbAj0fjlxIX+873psXlIKNA= +github.com/swaggo/swag v1.7.9/go.mod h1:gZ+TJ2w/Ve1RwQsA2IRoSOTidHz6DX+PIG8GWvbnoLU= github.com/swaggo/swag v1.8.3 h1:3pZSSCQ//gAH88lfmxM3Cd1+JCsxV8Md6f36b9hrZ5s= github.com/swaggo/swag v1.8.3/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= @@ -567,11 +561,9 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966 h1:j6JEOq5QWFker+d7mFQYOhjTZonQ7YkLTHm56dbn+yM= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= @@ -592,6 +584,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -639,14 +633,16 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a h1:tlXy25amD5A7gOfbXdqCGN5k8ESEed/Ee1E5RcrYnqU= golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg= golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.5.0 h1:5JMiNunQeQw++mMOz48/ISeNu3Iweh/JaZU8ZLqHRrI= +golang.org/x/image v0.5.0/go.mod h1:FVC7BI/5Ym8R25iw5OLsgshdUBbT1h5jZTpA+mvAdZ4= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -658,6 +654,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -670,10 +667,8 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -684,7 +679,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -700,6 +697,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -712,7 +710,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -736,15 +733,19 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -752,6 +753,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -763,10 +765,9 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -784,6 +785,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/errs/errno.go b/pkg/errs/errno.go index fc55af7a23a0..37524798046e 100644 --- a/pkg/errs/errno.go +++ b/pkg/errs/errno.go @@ -83,7 +83,7 @@ var ( ErrClientCreateTSOStream = errors.Normalize("create TSO stream failed, %s", errors.RFCCodeText("PD:client:ErrClientCreateTSOStream")) ErrClientGetTSOTimeout = errors.Normalize("get TSO timeout", errors.RFCCodeText("PD:client:ErrClientGetTSOTimeout")) ErrClientGetTSO = errors.Normalize("get TSO failed, %v", errors.RFCCodeText("PD:client:ErrClientGetTSO")) - ErrClientGetLeader = errors.Normalize("get leader from %v error", errors.RFCCodeText("PD:client:ErrClientGetLeader")) + ErrClientGetLeader = errors.Normalize("get leader failed, %v", errors.RFCCodeText("PD:client:ErrClientGetLeader")) ErrClientGetMember = errors.Normalize("get member failed", errors.RFCCodeText("PD:client:ErrClientGetMember")) ) diff --git a/pkg/keyspace/keyspace_test.go b/pkg/keyspace/keyspace_test.go index 55e6ca352214..f1ef85711fdb 100644 --- a/pkg/keyspace/keyspace_test.go +++ b/pkg/keyspace/keyspace_test.go @@ -61,7 +61,7 @@ func (suite *keyspaceTestSuite) SetupTest() { allocator := mockid.NewIDAllocator() kgm := NewKeyspaceGroupManager(suite.ctx, store, nil, 0) suite.manager = NewKeyspaceManager(store, nil, allocator, &mockConfig{}, kgm) - suite.NoError(kgm.Bootstrap(suite.ctx)) + suite.NoError(kgm.Bootstrap()) suite.NoError(suite.manager.Bootstrap()) } diff --git a/pkg/keyspace/tso_keyspace_group.go b/pkg/keyspace/tso_keyspace_group.go index 28a16afeb747..d1bfbbf59b69 100644 --- a/pkg/keyspace/tso_keyspace_group.go +++ b/pkg/keyspace/tso_keyspace_group.go @@ -36,9 +36,10 @@ import ( ) const ( - defaultBalancerPolicy = balancer.PolicyRoundRobin - allocNodeTimeout = 1 * time.Second - allocNodeInterval = 10 * time.Millisecond + defaultBalancerPolicy = balancer.PolicyRoundRobin + allocNodesToKeyspaceGroupsInterval = 1 * time.Second + allocNodesTimeout = 1 * time.Second + allocNodesInterval = 10 * time.Millisecond // TODO: move it to etcdutil watchEtcdChangeRetryInterval = 1 * time.Second maxRetryTimes = 25 @@ -71,8 +72,6 @@ type GroupManager struct { // tsoServiceEndKey is the end key of TSO service in etcd. tsoServiceEndKey string - policy balancer.Policy - // TODO: add user kind with different balancer // when we ensure where the correspondence between tso node and user kind will be found nodesBalancer balancer.Balancer[string] @@ -88,20 +87,31 @@ func NewKeyspaceGroupManager(ctx context.Context, store endpoint.KeyspaceGroupSt for i := 0; i < int(endpoint.UserKindCount); i++ { groups[endpoint.UserKind(i)] = newIndexedHeap(int(utils.MaxKeyspaceGroupCountInUse)) } - return &GroupManager{ - ctx: ctx, - cancel: cancel, - store: store, - client: client, - tsoServiceKey: key, - tsoServiceEndKey: clientv3.GetPrefixRangeEnd(key) + "/", - policy: defaultBalancerPolicy, - groups: groups, + m := &GroupManager{ + ctx: ctx, + cancel: cancel, + store: store, + client: client, + tsoServiceKey: key, + tsoServiceEndKey: clientv3.GetPrefixRangeEnd(key) + "/", + groups: groups, + nodesBalancer: balancer.GenByPolicy[string](defaultBalancerPolicy), + serviceRegistryMap: make(map[string]string), } + + // If the etcd client is not nil, start the watch loop for the registered tso servers. + // The PD(TSO) Client relies on this info to discover tso servers. + if m.client != nil { + log.Info("start the watch loop for tso service discovery") + m.wg.Add(1) + go m.startWatchLoop(ctx) + } + + return m } // Bootstrap saves default keyspace group info and init group mapping in the memory. -func (m *GroupManager) Bootstrap(ctx context.Context) error { +func (m *GroupManager) Bootstrap() error { // Force the membership restriction that the default keyspace must belong to default keyspace group. // Have no information to specify the distribution of the default keyspace group replicas, so just // leave the replica/member list empty. The TSO service will assign the default keyspace group replica @@ -114,6 +124,13 @@ func (m *GroupManager) Bootstrap(ctx context.Context) error { m.Lock() defer m.Unlock() + + // If the etcd client is not nil, start the watch loop. + if m.client != nil { + m.wg.Add(1) + go m.allocNodesToAllKeyspaceGroups() + } + // Ignore the error if default keyspace group already exists in the storage (e.g. PD restart/recover). err := m.saveKeyspaceGroups([]*endpoint.KeyspaceGroup{defaultKeyspaceGroup}, false) if err != nil && err != ErrKeyspaceGroupExists { @@ -130,13 +147,6 @@ func (m *GroupManager) Bootstrap(ctx context.Context) error { m.groups[userKind].Put(group) } - // If the etcd client is not nil, start the watch loop. - if m.client != nil { - m.nodesBalancer = balancer.GenByPolicy[string](m.policy) - m.serviceRegistryMap = make(map[string]string) - m.wg.Add(1) - go m.startWatchLoop(ctx) - } return nil } @@ -146,6 +156,45 @@ func (m *GroupManager) Close() { m.wg.Wait() } +func (m *GroupManager) allocNodesToAllKeyspaceGroups() { + defer logutil.LogPanic() + defer m.wg.Done() + ticker := time.NewTicker(allocNodesToKeyspaceGroupsInterval) + defer ticker.Stop() + for { + select { + case <-m.ctx.Done(): + return + case <-ticker.C: + } + countOfNodes := m.GetNodesCount() + if countOfNodes < utils.KeyspaceGroupDefaultReplicaCount { + continue + } + groups, err := m.store.LoadKeyspaceGroups(utils.DefaultKeyspaceGroupID, 0) + if err != nil { + log.Error("failed to load the all keyspace group", zap.Error(err)) + continue + } + withError := false + for _, group := range groups { + if len(group.Members) < utils.KeyspaceGroupDefaultReplicaCount { + nodes, err := m.AllocNodesForKeyspaceGroup(group.ID, utils.KeyspaceGroupDefaultReplicaCount) + if err != nil { + withError = true + log.Error("failed to alloc nodes for keyspace group", zap.Error(err)) + continue + } + group.Members = nodes + } + } + if !withError { + // all keyspace groups have equal or more than default replica count + return + } + } +} + func (m *GroupManager) startWatchLoop(parentCtx context.Context) { defer logutil.LogPanic() defer m.wg.Done() @@ -156,12 +205,9 @@ func (m *GroupManager) startWatchLoop(parentCtx context.Context) { revision int64 err error ) + ticker := time.NewTicker(retryInterval) + defer ticker.Stop() for i := 0; i < maxRetryTimes; i++ { - select { - case <-ctx.Done(): - return - case <-time.After(retryInterval): - } resp, err = etcdutil.EtcdKVGet(m.client, m.tsoServiceKey, clientv3.WithRange(m.tsoServiceEndKey)) if err == nil { revision = resp.Header.Revision + 1 @@ -177,6 +223,11 @@ func (m *GroupManager) startWatchLoop(parentCtx context.Context) { break } log.Warn("failed to get tso service addrs from etcd and will retry", zap.Error(err)) + select { + case <-m.ctx.Done(): + return + case <-ticker.C: + } } if err != nil || revision == 0 { log.Warn("failed to get tso service addrs from etcd finally when loading", zap.Error(err)) @@ -265,6 +316,14 @@ func (m *GroupManager) CreateKeyspaceGroups(keyspaceGroups []*endpoint.KeyspaceG return nil } +// GetTSOServiceAddrs gets all TSO service addresses. +func (m *GroupManager) GetTSOServiceAddrs() []string { + if m == nil || m.nodesBalancer == nil { + return nil + } + return m.nodesBalancer.GetAll() +} + // GetKeyspaceGroups gets keyspace groups from the start ID with limit. // If limit is 0, it will load all keyspace groups from the start ID. func (m *GroupManager) GetKeyspaceGroups(startID uint32, limit int) ([]*endpoint.KeyspaceGroup, error) { @@ -603,18 +662,23 @@ func (m *GroupManager) FinishSplitKeyspaceByID(splitTargetID uint32) error { return nil } -// GetNodesNum returns the number of nodes. -func (m *GroupManager) GetNodesNum() int { +// GetNodesCount returns the count of nodes. +func (m *GroupManager) GetNodesCount() int { + if m.nodesBalancer == nil { + return 0 + } return m.nodesBalancer.Len() } // AllocNodesForKeyspaceGroup allocates nodes for the keyspace group. -func (m *GroupManager) AllocNodesForKeyspaceGroup(id uint32, replica int) ([]endpoint.KeyspaceGroupMember, error) { - ctx, cancel := context.WithTimeout(m.ctx, allocNodeTimeout) +func (m *GroupManager) AllocNodesForKeyspaceGroup(id uint32, desiredReplicaCount int) ([]endpoint.KeyspaceGroupMember, error) { + m.Lock() + defer m.Unlock() + ctx, cancel := context.WithTimeout(m.ctx, allocNodesTimeout) defer cancel() - ticker := time.NewTicker(allocNodeInterval) + ticker := time.NewTicker(allocNodesInterval) defer ticker.Stop() - nodes := make([]endpoint.KeyspaceGroupMember, 0, replica) + nodes := make([]endpoint.KeyspaceGroupMember, 0, desiredReplicaCount) err := m.store.RunInTxn(m.ctx, func(txn kv.Txn) error { kg, err := m.store.LoadKeyspaceGroup(txn, id) if err != nil { @@ -628,14 +692,17 @@ func (m *GroupManager) AllocNodesForKeyspaceGroup(id uint32, replica int) ([]end exists[member.Address] = struct{}{} nodes = append(nodes, member) } - for len(exists) < replica { + if len(exists) >= desiredReplicaCount { + return nil + } + for len(exists) < desiredReplicaCount { select { case <-ctx.Done(): return nil case <-ticker.C: } - num := m.GetNodesNum() - if num < replica || num == 0 { // double check + countOfNodes := m.GetNodesCount() + if countOfNodes < desiredReplicaCount || countOfNodes == 0 { // double check return ErrNoAvailableNode } addr := m.nodesBalancer.Next() @@ -654,5 +721,38 @@ func (m *GroupManager) AllocNodesForKeyspaceGroup(id uint32, replica int) ([]end if err != nil { return nil, err } + log.Info("alloc nodes for keyspace group", zap.Uint32("id", id), zap.Reflect("nodes", nodes)) return nodes, nil } + +// SetNodesForKeyspaceGroup sets the nodes for the keyspace group. +func (m *GroupManager) SetNodesForKeyspaceGroup(id uint32, nodes []string) error { + m.Lock() + defer m.Unlock() + return m.store.RunInTxn(m.ctx, func(txn kv.Txn) error { + kg, err := m.store.LoadKeyspaceGroup(txn, id) + if err != nil { + return err + } + if kg == nil { + return ErrKeyspaceGroupNotExists + } + members := make([]endpoint.KeyspaceGroupMember, 0, len(nodes)) + for _, node := range nodes { + members = append(members, endpoint.KeyspaceGroupMember{Address: node}) + } + kg.Members = members + return m.store.SaveKeyspaceGroup(txn, kg) + }) +} + +// IsExistNode checks if the node exists. +func (m *GroupManager) IsExistNode(addr string) bool { + nodes := m.nodesBalancer.GetAll() + for _, node := range nodes { + if node == addr { + return true + } + } + return false +} diff --git a/pkg/keyspace/tso_keyspace_group_test.go b/pkg/keyspace/tso_keyspace_group_test.go index 080e62a6e25c..ed7992ba5529 100644 --- a/pkg/keyspace/tso_keyspace_group_test.go +++ b/pkg/keyspace/tso_keyspace_group_test.go @@ -47,7 +47,7 @@ func (suite *keyspaceGroupTestSuite) SetupTest() { idAllocator := mockid.NewIDAllocator() cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) suite.kg = NewKeyspaceManager(store, cluster, idAllocator, &mockConfig{}, suite.kgm) - suite.NoError(suite.kgm.Bootstrap(suite.ctx)) + suite.NoError(suite.kgm.Bootstrap()) } func (suite *keyspaceGroupTestSuite) TearDownTest() { diff --git a/pkg/mcs/resource_manager/server/manager.go b/pkg/mcs/resource_manager/server/manager.go index b9c9b37c7d95..b768342ce2b3 100644 --- a/pkg/mcs/resource_manager/server/manager.go +++ b/pkg/mcs/resource_manager/server/manager.go @@ -312,25 +312,25 @@ func (m *Manager) backgroundMetricsFlush(ctx context.Context) { ) // RU info. if consumption.RRU != 0 { - rruMetrics.Observe(consumption.RRU) + rruMetrics.Add(consumption.RRU) } if consumption.WRU != 0 { - wruMetrics.Observe(consumption.WRU) + wruMetrics.Add(consumption.WRU) } // Byte info. if consumption.ReadBytes != 0 { - readByteMetrics.Observe(consumption.ReadBytes) + readByteMetrics.Add(consumption.ReadBytes) } if consumption.WriteBytes != 0 { - writeByteMetrics.Observe(consumption.WriteBytes) + writeByteMetrics.Add(consumption.WriteBytes) } // CPU time info. if consumption.TotalCpuTimeMs > 0 { if consumption.SqlLayerCpuTimeMs > 0 { sqlLayerRuMetrics.Add(consumption.SqlLayerCpuTimeMs * m.controllerConfig.RequestUnit.CPUMsCost) - sqlCPUMetrics.Observe(consumption.SqlLayerCpuTimeMs) + sqlCPUMetrics.Add(consumption.SqlLayerCpuTimeMs) } - kvCPUMetrics.Observe(consumption.TotalCpuTimeMs - consumption.SqlLayerCpuTimeMs) + kvCPUMetrics.Add(consumption.TotalCpuTimeMs - consumption.SqlLayerCpuTimeMs) } // RPC count info. if consumption.KvReadRpcCount != 0 { diff --git a/pkg/mcs/resource_manager/server/metrics.go b/pkg/mcs/resource_manager/server/metrics.go index 6ecf93b1d45f..c9539908729f 100644 --- a/pkg/mcs/resource_manager/server/metrics.go +++ b/pkg/mcs/resource_manager/server/metrics.go @@ -14,9 +14,7 @@ package server -import ( - "github.com/prometheus/client_golang/prometheus" -) +import "github.com/prometheus/client_golang/prometheus" const ( namespace = "resource_manager" @@ -39,65 +37,60 @@ var ( Help: "Indicate the resource manager server info, and the value is the start timestamp (s).", }, []string{"version", "hash"}) // RU cost metrics. - readRequestUnitCost = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + // `sum` is added to the name to maintain compatibility with the previous use of histogram. + readRequestUnitCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: ruSubsystem, - Name: "read_request_unit", - Help: "Bucketed histogram of the read request unit cost for all resource groups.", - Buckets: prometheus.ExponentialBuckets(1, 10, 5), // 1 ~ 100000 + Name: "read_request_unit_sum", + Help: "Counter of the read request unit cost for all resource groups.", }, []string{resourceGroupNameLabel}) - writeRequestUnitCost = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + writeRequestUnitCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: ruSubsystem, - Name: "write_request_unit", - Help: "Bucketed histogram of the write request unit cost for all resource groups.", - Buckets: prometheus.ExponentialBuckets(3, 10, 5), // 3 ~ 300000 + Name: "write_request_unit_sum", + Help: "Counter of the write request unit cost for all resource groups.", }, []string{resourceGroupNameLabel}) - sqlLayerRequestUnitCost = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + sqlLayerRequestUnitCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: ruSubsystem, - Name: "sql_layer_request_unit", + Name: "sql_layer_request_unit_sum", Help: "The number of the sql layer request unit cost for all resource groups.", }, []string{resourceGroupNameLabel}) // Resource cost metrics. - readByteCost = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + readByteCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: resourceSubsystem, - Name: "read_byte", - Help: "Bucketed histogram of the read byte cost for all resource groups.", - Buckets: prometheus.ExponentialBuckets(1, 8, 12), + Name: "read_byte_sum", + Help: "Counter of the read byte cost for all resource groups.", }, []string{resourceGroupNameLabel}) - writeByteCost = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + writeByteCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: resourceSubsystem, - Name: "write_byte", - Help: "Bucketed histogram of the write byte cost for all resource groups.", - Buckets: prometheus.ExponentialBuckets(1, 8, 12), + Name: "write_byte_sum", + Help: "Counter of the write byte cost for all resource groups.", }, []string{resourceGroupNameLabel}) - kvCPUCost = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + kvCPUCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: resourceSubsystem, - Name: "kv_cpu_time_ms", - Help: "Bucketed histogram of the KV CPU time cost in milliseconds for all resource groups.", - Buckets: prometheus.ExponentialBuckets(1, 10, 3), // 1 ~ 1000 + Name: "kv_cpu_time_ms_sum", + Help: "Counter of the KV CPU time cost in milliseconds for all resource groups.", }, []string{resourceGroupNameLabel}) - sqlCPUCost = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ + sqlCPUCost = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: resourceSubsystem, - Name: "sql_cpu_time_ms", - Help: "Bucketed histogram of the SQL CPU time cost in milliseconds for all resource groups.", - Buckets: prometheus.ExponentialBuckets(1, 10, 3), // 1 ~ 1000 + Name: "sql_cpu_time_ms_sum", + Help: "Counter of the SQL CPU time cost in milliseconds for all resource groups.", }, []string{resourceGroupNameLabel}) - requestCount = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + requestCount = prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: namespace, Subsystem: resourceSubsystem, Name: "request_count", diff --git a/pkg/mcs/resource_manager/server/server.go b/pkg/mcs/resource_manager/server/server.go index a2ce35edf564..5e89bda11207 100644 --- a/pkg/mcs/resource_manager/server/server.go +++ b/pkg/mcs/resource_manager/server/server.go @@ -264,7 +264,7 @@ func (s *Server) initClient() error { if err != nil { return err } - s.etcdClient, s.httpClient, err = etcdutil.CreateClientsWithMultiEndpoint(tlsConfig, []url.URL(u)) + s.etcdClient, s.httpClient, err = etcdutil.CreateClients(tlsConfig, []url.URL(u)[0]) return err } @@ -366,7 +366,8 @@ func (s *Server) startServer() (err error) { log.Info("joining primary election", zap.String("participant-name", uniqueName), zap.Uint64("participant-id", uniqueID)) resourceManagerPrimaryPrefix := fmt.Sprintf("/ms/%d/resource_manager", s.clusterID) s.participant = member.NewParticipant(s.etcdClient) - s.participant.InitInfo(uniqueName, uniqueID, path.Join(resourceManagerPrimaryPrefix, fmt.Sprintf("%05d", 0)), "primary", "keyspace group primary election", s.cfg.AdvertiseListenAddr) + s.participant.InitInfo(uniqueName, uniqueID, path.Join(resourceManagerPrimaryPrefix, fmt.Sprintf("%05d", 0)), + "primary", "keyspace group primary election", s.cfg.AdvertiseListenAddr) s.service = &Service{ ctx: s.ctx, diff --git a/pkg/mcs/tso/server/grpc_service.go b/pkg/mcs/tso/server/grpc_service.go index 21a5bef0d604..8b5765b1875b 100644 --- a/pkg/mcs/tso/server/grpc_service.go +++ b/pkg/mcs/tso/server/grpc_service.go @@ -18,6 +18,7 @@ import ( "context" "io" "net/http" + "strings" "time" "github.com/pingcap/kvproto/pkg/tsopb" @@ -152,6 +153,52 @@ func (s *Service) Tso(stream tsopb.TSO_TsoServer) error { } } +// FindGroupByKeyspaceID returns the keyspace group that the keyspace belongs to. +func (s *Service) FindGroupByKeyspaceID( + ctx context.Context, request *tsopb.FindGroupByKeyspaceIDRequest, +) (*tsopb.FindGroupByKeyspaceIDResponse, error) { + keyspaceID := request.GetKeyspaceId() + am, keyspaceGroup, keyspaceGroupID, err := s.keyspaceGroupManager.FindGroupByKeyspaceID(keyspaceID) + if err != nil { + return &tsopb.FindGroupByKeyspaceIDResponse{ + Header: s.wrapErrorToHeader(tsopb.ErrorType_UNKNOWN, err.Error(), keyspaceGroupID), + }, nil + } + if keyspaceGroup == nil { + return &tsopb.FindGroupByKeyspaceIDResponse{ + Header: s.wrapErrorToHeader( + tsopb.ErrorType_UNKNOWN, "keyspace group not found", keyspaceGroupID), + }, nil + } + + members := make([]*tsopb.KeyspaceGroupMember, 0, len(keyspaceGroup.Members)) + for _, member := range keyspaceGroup.Members { + members = append(members, &tsopb.KeyspaceGroupMember{ + Address: member.Address, + // TODO: watch the keyspace groups' primary serving address changes + // to get the latest primary serving addresses of all keyspace groups. + IsPrimary: strings.EqualFold(member.Address, am.GetLeaderAddr()), + }) + } + + var splitState *tsopb.SplitState + if keyspaceGroup.SplitState != nil { + splitState = &tsopb.SplitState{ + SplitSource: keyspaceGroup.SplitState.SplitSource, + } + } + + return &tsopb.FindGroupByKeyspaceIDResponse{ + Header: s.header(keyspaceGroupID), + KeyspaceGroup: &tsopb.KeyspaceGroup{ + Id: keyspaceGroupID, + UserKind: keyspaceGroup.UserKind, + SplitState: splitState, + Members: members, + }, + }, nil +} + func (s *Service) header(keyspaceGroupBelongTo uint32) *tsopb.ResponseHeader { if s.clusterID == 0 { return s.wrapErrorToHeader( diff --git a/pkg/mcs/tso/server/server.go b/pkg/mcs/tso/server/server.go index 36207ebf4e0e..d9866b7a9db9 100644 --- a/pkg/mcs/tso/server/server.go +++ b/pkg/mcs/tso/server/server.go @@ -359,7 +359,7 @@ func (s *Server) initClient() error { if err != nil { return err } - s.etcdClient, s.httpClient, err = etcdutil.CreateClientsWithMultiEndpoint(tlsConfig, s.backendUrls) + s.etcdClient, s.httpClient, err = etcdutil.CreateClients(tlsConfig, s.backendUrls[0]) return err } diff --git a/pkg/mcs/utils/constant.go b/pkg/mcs/utils/constant.go index aa81425bc9f0..e29aa6a50082 100644 --- a/pkg/mcs/utils/constant.go +++ b/pkg/mcs/utils/constant.go @@ -63,4 +63,7 @@ const ( // MaxKeyspaceGroupCountInUse is a much more reasonable value of the max count in the // foreseen future, and the former is just for extensibility in theory. MaxKeyspaceGroupCountInUse = uint32(4096) + + // KeyspaceGroupDefaultReplicaCount is the default replica count of keyspace group. + KeyspaceGroupDefaultReplicaCount = 2 ) diff --git a/pkg/member/participant.go b/pkg/member/participant.go index 704d836f0e86..41cdef770047 100644 --- a/pkg/member/participant.go +++ b/pkg/member/participant.go @@ -78,7 +78,7 @@ func (m *Participant) InitInfo(name string, id uint64, rootPath string, leaderNa m.rootPath = rootPath m.leaderPath = path.Join(rootPath, leaderName) m.leadership = election.NewLeadership(m.client, m.GetLeaderPath(), purpose) - log.Info("Participant joining election", zap.Stringer("participant-info", m.member), zap.String("leader-path", m.leaderPath)) + log.Info("participant joining election", zap.Stringer("participant-info", m.member), zap.String("leader-path", m.leaderPath)) } // ID returns the unique ID for this participant in the election group diff --git a/pkg/tso/allocator_manager.go b/pkg/tso/allocator_manager.go index 595585d720c4..f1807fa1c137 100644 --- a/pkg/tso/allocator_manager.go +++ b/pkg/tso/allocator_manager.go @@ -128,12 +128,13 @@ type ElectionMember interface { // Basically it will reset the leader lease and unset leader info. ResetLeader() // GetLeaderListenUrls returns current leader's listen urls + // The first element is the leader/primary url GetLeaderListenUrls() []string // GetLeaderID returns current leader's member ID. GetLeaderID() uint64 // GetLeaderPath returns the path of the leader. GetLeaderPath() string - // GetLeadership returns the leadership of the PD member. + // GetLeadership returns the leadership of the election member. GetLeadership() *election.Leadership // GetDCLocationPathPrefix returns the dc-location path prefix of the cluster. GetDCLocationPathPrefix() string @@ -813,7 +814,7 @@ func (am *AllocatorManager) ClusterDCLocationChecker() { } } // Only leader can write the TSO suffix to etcd in order to make it consistent in the cluster - if am.member.IsLeader() { + if am.IsLeader() { for dcLocation, info := range am.mu.clusterDCLocations { if info.Suffix > 0 { continue @@ -1187,7 +1188,7 @@ func (am *AllocatorManager) getOrCreateGRPCConn(ctx context.Context, addr string } func (am *AllocatorManager) getDCLocationInfoFromLeader(ctx context.Context, dcLocation string) (bool, *pdpb.GetDCLocationInfoResponse, error) { - if am.member.IsLeader() { + if am.IsLeader() { info, ok := am.GetDCLocationInfo(dcLocation) if !ok { return false, &pdpb.GetDCLocationInfoResponse{}, nil @@ -1200,11 +1201,11 @@ func (am *AllocatorManager) getDCLocationInfoFromLeader(ctx context.Context, dcL return ok, dcLocationInfo, nil } - leaderAddrs := am.member.GetLeaderListenUrls() - if leaderAddrs == nil || len(leaderAddrs) < 1 { + leaderAddr := am.GetLeaderAddr() + if len(leaderAddr) < 1 { return false, &pdpb.GetDCLocationInfoResponse{}, fmt.Errorf("failed to get leader client url") } - conn, err := am.getOrCreateGRPCConn(ctx, leaderAddrs[0]) + conn, err := am.getOrCreateGRPCConn(ctx, leaderAddr) if err != nil { return false, &pdpb.GetDCLocationInfoResponse{}, err } @@ -1311,3 +1312,23 @@ func (am *AllocatorManager) nextLeaderKey(dcLocation string) string { func (am *AllocatorManager) EnableLocalTSO() bool { return am.enableLocalTSO } + +// IsLeader returns whether the current member is the leader in the election group. +func (am *AllocatorManager) IsLeader() bool { + if am == nil || am.member == nil || !am.member.IsLeader() { + return false + } + return true +} + +// GetLeaderAddr returns the address of leader in the election group. +func (am *AllocatorManager) GetLeaderAddr() string { + if am == nil || am.member == nil { + return "" + } + leaderAddrs := am.member.GetLeaderListenUrls() + if len(leaderAddrs) < 1 { + return "" + } + return leaderAddrs[0] +} diff --git a/pkg/tso/global_allocator.go b/pkg/tso/global_allocator.go index 18608c1d5d78..0dbf82c2398c 100644 --- a/pkg/tso/global_allocator.go +++ b/pkg/tso/global_allocator.go @@ -491,7 +491,9 @@ func (gta *GlobalTSOAllocator) primaryElectionLoop() { continue } if primary != nil { - log.Info("start to watch the primary", zap.Stringer("tso-primary", primary)) + log.Info("start to watch the primary", + zap.String("campaign-tso-primary-name", gta.member.Name()), + zap.Stringer("tso-primary", primary)) // Watch will keep looping and never return unless the primary has changed. primary.Watch(gta.ctx) log.Info("the tso primary has changed, try to re-campaign a primary") diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index 9370e8664d1e..596c7501f716 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -107,11 +107,15 @@ func (s *state) getKeyspaceGroupMeta( return s.ams[groupID], s.kgs[groupID] } -// getAMWithMembershipCheck returns the AllocatorManager of the given keyspace group and check -// if the keyspace is served by this keyspace group. -func (s *state) getAMWithMembershipCheck( +// getKeyspaceGroupMetaWithCheck returns the keyspace group meta of the given keyspace. +// It also checks if the keyspace is served by the given keyspace group. If not, it returns the meta +// of the keyspace group to which the keyspace currently belongs and returns NotServed (by the given +// keyspace group) error. If the keyspace doesn't belong to any keyspace group, it returns the +// NotAssigned error, which could happen because loading keyspace group meta isn't atomic when there is +// keyspace movement between keyspace groups. +func (s *state) getKeyspaceGroupMetaWithCheck( keyspaceID, keyspaceGroupID uint32, -) (*AllocatorManager, uint32, error) { +) (*AllocatorManager, *endpoint.KeyspaceGroup, uint32, error) { s.RLock() defer s.RUnlock() @@ -119,25 +123,33 @@ func (s *state) getAMWithMembershipCheck( kg := s.kgs[keyspaceGroupID] if kg != nil { if _, ok := kg.KeyspaceLookupTable[keyspaceID]; ok { - return am, keyspaceGroupID, nil + return am, kg, keyspaceGroupID, nil } } } // The keyspace doesn't belong to this keyspace group, we should check if it belongs to any other - // keyspace groups, and return the correct keyspace group ID to the client. + // keyspace groups, and return the correct keyspace group meta to the client. if kgid, ok := s.keyspaceLookupTable[keyspaceID]; ok { - return nil, kgid, genNotServedErr(errs.ErrGetAllocatorManager, keyspaceGroupID) + return s.ams[kgid], s.kgs[kgid], kgid, + genNotServedErr(errs.ErrGetAllocatorManager, keyspaceGroupID) } + // The keyspace doesn't belong to any keyspace group but the keyspace has been assigned to a + // keyspace group before, which means the keyspace group hasn't initialized yet. if keyspaceGroupID != mcsutils.DefaultKeyspaceGroupID { - return nil, keyspaceGroupID, errs.ErrKeyspaceNotAssigned.FastGenByArgs(keyspaceID) + return nil, nil, keyspaceGroupID, errs.ErrKeyspaceNotAssigned.FastGenByArgs(keyspaceID) } - // The keyspace doesn't belong to any keyspace group, so return the default keyspace group. - // It's for migrating the existing keyspaces which have no keyspace group assigned, so the - // the default keyspace group is used to serve the keyspaces. - return s.ams[mcsutils.DefaultKeyspaceGroupID], mcsutils.DefaultKeyspaceGroupID, nil + // For migrating the existing keyspaces which have no keyspace group assigned as configured + // in the keyspace meta. All these keyspaces will be served by the default keyspace group. + if s.ams[mcsutils.DefaultKeyspaceGroupID] == nil { + return nil, nil, mcsutils.DefaultKeyspaceGroupID, + errs.ErrKeyspaceNotAssigned.FastGenByArgs(keyspaceID) + } + return s.ams[mcsutils.DefaultKeyspaceGroupID], + s.kgs[mcsutils.DefaultKeyspaceGroupID], + mcsutils.DefaultKeyspaceGroupID, nil } // KeyspaceGroupManager manages the members of the keyspace groups assigned to this host. @@ -258,14 +270,20 @@ func (kgm *KeyspaceGroupManager) Initialize() error { // Initialize the default keyspace group if it isn't configured in the storage. if !defaultKGConfigured { - keyspaces := []uint32{mcsutils.DefaultKeyspaceID} - kgm.initDefaultKeyspaceGroup(keyspaces) + log.Info("initializing default keyspace group") + group := &endpoint.KeyspaceGroup{ + ID: mcsutils.DefaultKeyspaceGroupID, + Members: []endpoint.KeyspaceGroupMember{{Address: kgm.tsoServiceID.ServiceAddr}}, + Keyspaces: []uint32{mcsutils.DefaultKeyspaceID}, + } + kgm.updateKeyspaceGroup(group) } // Watch/apply keyspace group membership/distribution meta changes dynamically. kgm.wg.Add(1) go kgm.startKeyspaceGroupsMetaWatchLoop(watchStartRevision) + log.Info("keyspace group manager initialized") return nil } @@ -301,18 +319,6 @@ func (kgm *KeyspaceGroupManager) checkInitProgress(ctx context.Context, cancel c <-done } -func (kgm *KeyspaceGroupManager) initDefaultKeyspaceGroup(keyspaces []uint32) { - log.Info("initializing default keyspace group", - zap.Int("keyspaces-length", len(keyspaces))) - - group := &endpoint.KeyspaceGroup{ - ID: mcsutils.DefaultKeyspaceGroupID, - Members: []endpoint.KeyspaceGroupMember{{Address: kgm.tsoServiceID.ServiceAddr}}, - Keyspaces: keyspaces, - } - kgm.updateKeyspaceGroup(group) -} - // initAssignment loads initial keyspace group assignment from storage and initialize the group manager. // Return watchStartRevision, the start revision for watching keyspace group membership/distribution change. func (kgm *KeyspaceGroupManager) initAssignment( @@ -493,15 +499,7 @@ func (kgm *KeyspaceGroupManager) watchKeyspaceGroupsMetaChange(revision int64) ( } kgm.updateKeyspaceGroup(group) case clientv3.EventTypeDelete: - if groupID == mcsutils.DefaultKeyspaceGroupID { - keyspaces := kgm.kgs[groupID].Keyspaces - kgm.deleteKeyspaceGroup(groupID) - log.Warn("removed default keyspace group meta config from the storage. " + - "now every tso node/pod will initialize it") - kgm.initDefaultKeyspaceGroup(keyspaces) - } else { - kgm.deleteKeyspaceGroup(groupID) - } + kgm.deleteKeyspaceGroup(groupID) } } // Retry the groups that are not initialized successfully before. @@ -521,11 +519,6 @@ func (kgm *KeyspaceGroupManager) watchKeyspaceGroupsMetaChange(revision int64) ( } func (kgm *KeyspaceGroupManager) isAssignedToMe(group *endpoint.KeyspaceGroup) bool { - // If the default keyspace group isn't assigned to any tso node/pod, assign it to everyone. - if group.ID == mcsutils.DefaultKeyspaceGroupID && len(group.Members) == 0 { - return true - } - for _, member := range group.Members { if member.Address == kgm.tsoServiceID.ServiceAddr { return true @@ -541,22 +534,30 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro log.Warn("keyspace group ID is invalid, ignore it", zap.Error(err)) return } - // Not assigned to me. If this host/pod owns this keyspace group, it should resign. + + // If the default keyspace group isn't assigned to any tso node/pod, assign it to everyone. + if group.ID == mcsutils.DefaultKeyspaceGroupID && len(group.Members) == 0 { + log.Warn("configured the default keyspace group but no members/distribution specified. " + + "ignore it for now and fallback to the way of every tso node/pod owning a replica") + // TODO: fill members with all tso nodes/pods. + group.Members = []endpoint.KeyspaceGroupMember{{Address: kgm.tsoServiceID.ServiceAddr}} + } + if !kgm.isAssignedToMe(group) { - if group.ID == mcsutils.DefaultKeyspaceGroupID { - log.Info("resign default keyspace group membership", - zap.Any("default-keyspace-group", group)) - } - kgm.deleteKeyspaceGroup(group.ID) + // Not assigned to me. If this host/pod owns a replica of this keyspace group, + // it should resign the election membership now. + kgm.exitElectionMembership(group) return } - // If the keyspace group is already initialized, just update the meta. - if oldAM, oldGroup := kgm.state.getKeyspaceGroupMeta(group.ID); oldAM != nil { + + // If this host is already assigned a replica of this keyspace group, that is to is already initialized, just update the meta. + if oldAM, oldGroup := kgm.getKeyspaceGroupMeta(group.ID); oldAM != nil { log.Info("keyspace group already initialized, so update meta only", zap.Uint32("keyspace-group-id", group.ID)) - kgm.updateKeyspaceGroupMembership(oldGroup, group) + kgm.updateKeyspaceGroupMembership(oldGroup, group, true) return } + // If the keyspace group is not initialized, initialize it. uniqueName := fmt.Sprintf("%s-%05d", kgm.electionNamePrefix, group.ID) uniqueID := memberutil.GenerateUniqueID(uniqueName) @@ -617,10 +618,20 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro // updateKeyspaceGroupMembership updates the keyspace lookup table for the given keyspace group. func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( - oldGroup, newGroup *endpoint.KeyspaceGroup, + oldGroup, newGroup *endpoint.KeyspaceGroup, updateWithLock bool, ) { + var ( + oldKeyspaces []uint32 + oldKeyspaceLookupTable map[uint32]struct{} + ) + + if oldGroup != nil { + oldKeyspaces = oldGroup.Keyspaces + oldKeyspaceLookupTable = oldGroup.KeyspaceLookupTable + } + groupID := newGroup.ID - oldKeyspaces, newKeyspaces := oldGroup.Keyspaces, newGroup.Keyspaces + newKeyspaces := newGroup.Keyspaces oldLen, newLen := len(oldKeyspaces), len(newKeyspaces) // Sort the keyspaces in ascending order @@ -641,12 +652,14 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( } } - kgm.Lock() - defer kgm.Unlock() + if updateWithLock { + kgm.Lock() + defer kgm.Unlock() + } if sameMembership { // The keyspace group membership is not changed. Reuse the old one. - newGroup.KeyspaceLookupTable = oldGroup.KeyspaceLookupTable + newGroup.KeyspaceLookupTable = oldKeyspaceLookupTable } else { // The keyspace group membership is changed. Update the keyspace lookup table. newGroup.KeyspaceLookupTable = make(map[uint32]struct{}) @@ -683,7 +696,7 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( } } // Check if the split is completed. - if oldGroup.IsSplitTarget() && !newGroup.IsSplitting() { + if oldGroup != nil && oldGroup.IsSplitTarget() && !newGroup.IsSplitting() { kgm.ams[groupID].GetMember().(*member.Participant).SetCampaignChecker(nil) } kgm.kgs[groupID] = newGroup @@ -693,6 +706,18 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( func (kgm *KeyspaceGroupManager) deleteKeyspaceGroup(groupID uint32) { log.Info("delete keyspace group", zap.Uint32("keyspace-group-id", groupID)) + if groupID == mcsutils.DefaultKeyspaceGroupID { + log.Info("removed default keyspace group meta config from the storage. " + + "now every tso node/pod will initialize it") + group := &endpoint.KeyspaceGroup{ + ID: mcsutils.DefaultKeyspaceGroupID, + Members: []endpoint.KeyspaceGroupMember{{Address: kgm.tsoServiceID.ServiceAddr}}, + Keyspaces: []uint32{mcsutils.DefaultKeyspaceID}, + } + kgm.updateKeyspaceGroup(group) + return + } + kgm.Lock() defer kgm.Unlock() @@ -717,6 +742,24 @@ func (kgm *KeyspaceGroupManager) deleteKeyspaceGroup(groupID uint32) { } } +// exitElectionMembership exits the election membership of the given keyspace group by +// deinitializing the allocator manager, but still keeps the keyspace group info. +func (kgm *KeyspaceGroupManager) exitElectionMembership(group *endpoint.KeyspaceGroup) { + log.Info("resign election membership", zap.Uint32("keyspace-group-id", group.ID)) + + kgm.Lock() + defer kgm.Unlock() + + am := kgm.ams[group.ID] + if am != nil { + am.close() + kgm.ams[group.ID] = nil + } + + oldGroup := kgm.kgs[group.ID] + kgm.updateKeyspaceGroupMembership(oldGroup, group, false) +} + // GetAllocatorManager returns the AllocatorManager of the given keyspace group func (kgm *KeyspaceGroupManager) GetAllocatorManager(keyspaceGroupID uint32) (*AllocatorManager, error) { if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { @@ -728,6 +771,18 @@ func (kgm *KeyspaceGroupManager) GetAllocatorManager(keyspaceGroupID uint32) (*A return nil, genNotServedErr(errs.ErrGetAllocatorManager, keyspaceGroupID) } +// FindGroupByKeyspaceID returns the keyspace group that contains the keyspace with the given ID. +func (kgm *KeyspaceGroupManager) FindGroupByKeyspaceID( + keyspaceID uint32, +) (*AllocatorManager, *endpoint.KeyspaceGroup, uint32, error) { + curAM, curKeyspaceGroup, curKeyspaceGroupID, err := + kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, mcsutils.DefaultKeyspaceGroupID) + if err != nil { + return nil, nil, curKeyspaceGroupID, err + } + return curAM, curKeyspaceGroup, curKeyspaceGroupID, nil +} + // GetElectionMember returns the election member of the given keyspace group func (kgm *KeyspaceGroupManager) GetElectionMember( keyspaceID, keyspaceGroupID uint32, @@ -735,7 +790,7 @@ func (kgm *KeyspaceGroupManager) GetElectionMember( if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { return nil, err } - am, _, err := kgm.getAMWithMembershipCheck(keyspaceID, keyspaceGroupID) + am, _, _, err := kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, keyspaceGroupID) if err != nil { return nil, err } @@ -746,17 +801,17 @@ func (kgm *KeyspaceGroupManager) GetElectionMember( func (kgm *KeyspaceGroupManager) HandleTSORequest( keyspaceID, keyspaceGroupID uint32, dcLocation string, count uint32, -) (ts pdpb.Timestamp, currentKeyspaceGroupID uint32, err error) { +) (ts pdpb.Timestamp, curKeyspaceGroupID uint32, err error) { if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { return pdpb.Timestamp{}, keyspaceGroupID, err } - am, currentKeyspaceGroupID, err := kgm.getAMWithMembershipCheck(keyspaceID, keyspaceGroupID) + am, _, curKeyspaceGroupID, err := kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, keyspaceGroupID) if err != nil { - return pdpb.Timestamp{}, currentKeyspaceGroupID, err + return pdpb.Timestamp{}, curKeyspaceGroupID, err } - err = kgm.checkTSOSplit(currentKeyspaceGroupID, dcLocation) + err = kgm.checkTSOSplit(curKeyspaceGroupID, dcLocation) if err != nil { - return pdpb.Timestamp{}, currentKeyspaceGroupID, err + return pdpb.Timestamp{}, curKeyspaceGroupID, err } ts, err = am.HandleRequest(dcLocation, count) return ts, keyspaceGroupID, err diff --git a/pkg/tso/keyspace_group_manager_test.go b/pkg/tso/keyspace_group_manager_test.go index 3fa5bbd3fc65..c023efa02f89 100644 --- a/pkg/tso/keyspace_group_manager_test.go +++ b/pkg/tso/keyspace_group_manager_test.go @@ -251,46 +251,57 @@ func (suite *keyspaceGroupManagerTestSuite) TestWatchAndDynamicallyApplyChanges( // Initialize PUT/DELETE events events := []*etcdEvent{} // Assign keyspace group 0 to this host/pod/keyspace-group-manager. - // final result: [0] + // final result: assigned [0], loaded [0] events = append(events, generateKeyspaceGroupPutEvent(0, []uint32{0}, []string{svcAddr})) // Assign keyspace group 1 to this host/pod/keyspace-group-manager. - // final result: [0,1] + // final result: assigned [0,1], loaded [0,1] events = append(events, generateKeyspaceGroupPutEvent(1, []uint32{1}, []string{"unknown", svcAddr})) // Assign keyspace group 2 to other host/pod/keyspace-group-manager. - // final result: [0,1] + // final result: assigned [0,1], loaded [0,1,2] events = append(events, generateKeyspaceGroupPutEvent(2, []uint32{2}, []string{"unknown"})) // Assign keyspace group 3 to this host/pod/keyspace-group-manager. - // final result: [0,1,3] + // final result: assigned [0,1,3], loaded [0,1,2,3] events = append(events, generateKeyspaceGroupPutEvent(3, []uint32{3}, []string{svcAddr})) // Delete keyspace group 0. Every tso node/pod now should initialize keyspace group 0. - // final result: [0,1,3] + // final result: assigned [0,1,3], loaded [0,1,2,3] events = append(events, generateKeyspaceGroupDeleteEvent(0)) // Put keyspace group 4 which doesn't belong to anyone. - // final result: [0,1,3] + // final result: assigned [0,1,3], loaded [0,1,2,3,4] events = append(events, generateKeyspaceGroupPutEvent(4, []uint32{4}, []string{})) // Put keyspace group 5 which doesn't belong to anyone. - // final result: [0,1,3] + // final result: assigned [0,1,3], loaded [0,1,2,3,4,5] events = append(events, generateKeyspaceGroupPutEvent(5, []uint32{5}, []string{})) // Assign keyspace group 2 to this host/pod/keyspace-group-manager. - // final result: [0,1,2,3] + // final result: assigned [0,1,2,3], loaded [0,1,2,3,4,5] events = append(events, generateKeyspaceGroupPutEvent(2, []uint32{2}, []string{svcAddr})) // Reassign keyspace group 3 to no one. - // final result: [0,1,2] + // final result: assigned [0,1,2], loaded [0,1,2,3,4,5] events = append(events, generateKeyspaceGroupPutEvent(3, []uint32{3}, []string{})) // Reassign keyspace group 4 to this host/pod/keyspace-group-manager. - // final result: [0,1,2,4] + // final result: assigned [0,1,2,4], loaded [0,1,2,3,4,5] events = append(events, generateKeyspaceGroupPutEvent(4, []uint32{4}, []string{svcAddr})) - - // Eventually, this keyspace groups manager is expected to serve the following keyspace groups. - expectedGroupIDs := []uint32{0, 1, 2, 4} + // Delete keyspace group 2. + // final result: assigned [0,1,4], loaded [0,1,3,4,5] + events = append(events, generateKeyspaceGroupDeleteEvent(2)) // Apply the keyspace group assignment change events to etcd. suite.applyEtcdEvents(re, rootPath, events) - // Verify the keyspace group assignment. + // Verify the keyspace groups assigned. + // Eventually, this keyspace groups manager is expected to serve the following keyspace groups. + expectedAssignedGroups := []uint32{0, 1, 4} testutil.Eventually(re, func() bool { - assignedGroupIDs := collectAssignedKeyspaceGroupIDs(re, mgr) - return reflect.DeepEqual(expectedGroupIDs, assignedGroupIDs) + assignedGroups := collectAssignedKeyspaceGroupIDs(re, mgr) + return reflect.DeepEqual(expectedAssignedGroups, assignedGroups) + }) + + // Verify the keyspace groups loaded. + // Eventually, this keyspace groups manager is expected to load the following keyspace groups + // in which keyspace group 3, 5 aren't served by this tso node/pod. + expectedLoadedGroups := []uint32{0, 1, 3, 4, 5} + testutil.Eventually(re, func() bool { + loadedGroups := collectAllLoadedKeyspaceGroupIDs(mgr) + return reflect.DeepEqual(expectedLoadedGroups, loadedGroups) }) } @@ -362,8 +373,8 @@ func (suite *keyspaceGroupManagerTestSuite) TestInitDefaultKeyspaceGroup() { }) } -// TestGetAMWithMembershipCheck tests GetAMWithMembershipCheck. -func (suite *keyspaceGroupManagerTestSuite) TestGetAMWithMembershipCheck() { +// TestGetKeyspaceGroupMetaWithCheck tests GetKeyspaceGroupMetaWithCheck. +func (suite *keyspaceGroupManagerTestSuite) TestGetKeyspaceGroupMetaWithCheck() { re := suite.Require() mgr := suite.newUniqueKeyspaceGroupManager(1) @@ -372,6 +383,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestGetAMWithMembershipCheck() { var ( am *AllocatorManager + kg *endpoint.KeyspaceGroup kgid uint32 err error ) @@ -386,29 +398,43 @@ func (suite *keyspaceGroupManagerTestSuite) TestGetAMWithMembershipCheck() { re.NoError(err) // Should be able to get AM for keyspace 0, 1, 2 in keyspace group 0. - am, kgid, err = mgr.getAMWithMembershipCheck(0, 0) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(0, 0) re.NoError(err) re.Equal(uint32(0), kgid) re.NotNil(am) - am, kgid, err = mgr.getAMWithMembershipCheck(1, 0) + re.NotNil(kg) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(1, 0) re.NoError(err) re.Equal(uint32(0), kgid) re.NotNil(am) - am, kgid, err = mgr.getAMWithMembershipCheck(2, 0) + re.NotNil(kg) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(2, 0) re.NoError(err) re.Equal(uint32(0), kgid) re.NotNil(am) + re.NotNil(kg) // Should still succeed even keyspace 3 isn't explicitly assigned to any // keyspace group. It will be assigned to the default keyspace group. - am, kgid, err = mgr.getAMWithMembershipCheck(3, 0) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(3, 0) re.NoError(err) re.Equal(uint32(0), kgid) re.NotNil(am) - // Should fail because keyspace group 1 doesn't exist. - am, kgid, err = mgr.getAMWithMembershipCheck(0, 1) + re.NotNil(kg) + // Should fail because keyspace 3 isn't explicitly assigned to any keyspace + // group, and the specified group isn't the default keyspace group. + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(3, 100) re.Error(err) - re.Equal(uint32(0), kgid) + re.Equal(uint32(100), kgid) re.Nil(am) + re.Nil(kg) + // Should fail but still be able to get the meta of keyspace group 0, + // because keyspace 0 belongs to group 0, though the specified group 1 + // doesn't exist. + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(0, 1) + re.Error(err) + re.Equal(uint32(0), kgid) + re.NotNil(am) + re.NotNil(kg) } // TestDefaultMembershipRestriction tests the restriction of default keyspace always @@ -425,6 +451,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestDefaultMembershipRestriction() { var ( am *AllocatorManager + kg *endpoint.KeyspaceGroup kgid uint32 err error event *etcdEvent @@ -445,11 +472,12 @@ func (suite *keyspaceGroupManagerTestSuite) TestDefaultMembershipRestriction() { re.NoError(err) // Should be able to get AM for keyspace 0 in keyspace group 0. - am, kgid, err = mgr.getAMWithMembershipCheck( + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck( mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) re.NoError(err) re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) re.NotNil(am) + re.NotNil(kg) event = generateKeyspaceGroupPutEvent( mcsutils.DefaultKeyspaceGroupID, []uint32{1, 2}, []string{svcAddr}) @@ -464,16 +492,18 @@ func (suite *keyspaceGroupManagerTestSuite) TestDefaultMembershipRestriction() { // it will cause random failure. time.Sleep(1 * time.Second) // Should still be able to get AM for keyspace 0 in keyspace group 0. - am, kgid, err = mgr.getAMWithMembershipCheck( + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck( mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) re.NoError(err) re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) re.NotNil(am) - // Can't get the default keyspace from the keyspace group 3 - am, kgid, err = mgr.getAMWithMembershipCheck(mcsutils.DefaultKeyspaceID, 3) + re.NotNil(kg) + // Should fail but still be able to get the keyspace group meta from the default keyspace group + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(mcsutils.DefaultKeyspaceID, 3) re.Error(err) re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) - re.Nil(am) + re.NotNil(am) + re.NotNil(kg) } // TestHandleTSORequestWithWrongMembership tests the case that HandleTSORequest receives @@ -720,13 +750,14 @@ func collectAssignedKeyspaceGroupIDs(re *require.Assertions, kgm *KeyspaceGroupM re.Nil(kgm.ams[i], fmt.Sprintf("ksg is nil but am is not nil for id %d", i)) } else { am := kgm.ams[i] - re.NotNil(am, fmt.Sprintf("ksg is not nil but am is nil for id %d", i)) - re.Equal(i, int(am.kgID)) - re.Equal(i, int(kg.ID)) - for _, m := range kg.Members { - if m.Address == kgm.tsoServiceID.ServiceAddr { - ids = append(ids, uint32(i)) - break + if am != nil { + re.Equal(i, int(am.kgID)) + re.Equal(i, int(kg.ID)) + for _, m := range kg.Members { + if m.Address == kgm.tsoServiceID.ServiceAddr { + ids = append(ids, uint32(i)) + break + } } } } @@ -735,6 +766,21 @@ func collectAssignedKeyspaceGroupIDs(re *require.Assertions, kgm *KeyspaceGroupM return ids } +func collectAllLoadedKeyspaceGroupIDs(kgm *KeyspaceGroupManager) []uint32 { + kgm.RLock() + defer kgm.RUnlock() + + ids := []uint32{} + for i := 0; i < len(kgm.kgs); i++ { + kg := kgm.kgs[i] + if kg != nil { + ids = append(ids, uint32(i)) + } + } + + return ids +} + func (suite *keyspaceGroupManagerTestSuite) TestUpdateKeyspaceGroupMembership() { re := suite.Require() @@ -750,7 +796,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestUpdateKeyspaceGroupMembership() keyspaceLookupTable: make(map[uint32]uint32), }} - kgm.updateKeyspaceGroupMembership(oldGroup, newGroup) + kgm.updateKeyspaceGroupMembership(oldGroup, newGroup, true) verifyLocalKeyspaceLookupTable(re, newGroup.KeyspaceLookupTable, newGroup.Keyspaces) verifyGlobalKeyspaceLookupTable(re, kgm.keyspaceLookupTable, newGroup.KeyspaceLookupTable) @@ -774,7 +820,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestUpdateKeyspaceGroupMembership() keyspacesCopy := make([]uint32, len(keyspaces)) copy(keyspacesCopy, keyspaces) newGroup = &endpoint.KeyspaceGroup{ID: groupID, Keyspaces: keyspacesCopy} - kgm.updateKeyspaceGroupMembership(oldGroup, newGroup) + kgm.updateKeyspaceGroupMembership(oldGroup, newGroup, true) verifyLocalKeyspaceLookupTable(re, newGroup.KeyspaceLookupTable, newGroup.Keyspaces) verifyGlobalKeyspaceLookupTable(re, kgm.keyspaceLookupTable, newGroup.KeyspaceLookupTable) diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index ad0b277c8f0c..e63761ca9a98 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -260,6 +260,33 @@ func TestEtcdWithHangLeaderEnableCheck(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/tikv/pd/pkg/utils/etcdutil/closeKeepAliveCheck")) } +func TestEtcdScaleInAndOutWithoutMultiPoint(t *testing.T) { + re := require.New(t) + // Start a etcd server. + cfg1 := NewTestSingleConfig(t) + etcd1, err := embed.StartEtcd(cfg1) + re.NoError(err) + ep1 := cfg1.LCUrls[0].String() + <-etcd1.Server.ReadyNotify() + + // Create two etcd clients with etcd1 as endpoint. + urls, err := types.NewURLs([]string{ep1}) + re.NoError(err) + client1, err := createEtcdClient(nil, urls[0]) // execute member change operation with this client + re.NoError(err) + client2, err := createEtcdClient(nil, urls[0]) // check member change with this client + re.NoError(err) + + // Add a new member and check members + etcd2 := checkAddEtcdMember(t, cfg1, client1) + checkMembers(re, client2, []*embed.Etcd{etcd1, etcd2}) + + // scale in etcd1 + _, err = RemoveEtcdMember(client1, uint64(etcd1.Server.ID())) + re.NoError(err) + checkMembers(re, client2, []*embed.Etcd{etcd2}) +} + func checkEtcdWithHangLeader(t *testing.T) error { re := require.New(t) // Start a etcd server. diff --git a/server/apiv2/handlers/tso_keyspace_group.go b/server/apiv2/handlers/tso_keyspace_group.go index 8db553e765ac..a9f7d9d13958 100644 --- a/server/apiv2/handlers/tso_keyspace_group.go +++ b/server/apiv2/handlers/tso_keyspace_group.go @@ -35,7 +35,8 @@ func RegisterTSOKeyspaceGroup(r *gin.RouterGroup) { router.GET("", GetKeyspaceGroups) router.GET("/:id", GetKeyspaceGroupByID) router.DELETE("/:id", DeleteKeyspaceGroupByID) - router.POST("/:id/alloc", AllocNodeForKeyspaceGroup) + router.POST("/:id/alloc", AllocNodesForKeyspaceGroup) + router.POST("/:id/nodes", SetNodesForKeyspaceGroup) router.POST("/:id/split", SplitKeyspaceGroupByID) router.DELETE("/:id/split", FinishSplitKeyspaceByID) } @@ -190,13 +191,13 @@ func FinishSplitKeyspaceByID(c *gin.Context) { c.JSON(http.StatusOK, nil) } -// AllocNodeForKeyspaceGroupParams defines the params for allocating nodes for keyspace groups. -type AllocNodeForKeyspaceGroupParams struct { +// AllocNodesForKeyspaceGroupParams defines the params for allocating nodes for keyspace groups. +type AllocNodesForKeyspaceGroupParams struct { Replica int `json:"replica"` } -// AllocNodeForKeyspaceGroup allocates nodes for keyspace group. -func AllocNodeForKeyspaceGroup(c *gin.Context) { +// AllocNodesForKeyspaceGroup allocates nodes for keyspace group. +func AllocNodesForKeyspaceGroup(c *gin.Context) { id, err := validateKeyspaceGroupID(c) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, "invalid keyspace group id") @@ -204,14 +205,14 @@ func AllocNodeForKeyspaceGroup(c *gin.Context) { } svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) manager := svr.GetKeyspaceGroupManager() - allocParams := &AllocNodeForKeyspaceGroupParams{} + allocParams := &AllocNodesForKeyspaceGroupParams{} err = c.BindJSON(allocParams) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, errs.ErrBindJSON.Wrap(err).GenWithStackByCause()) return } - if manager.GetNodesNum() < allocParams.Replica || allocParams.Replica < 1 { - c.AbortWithStatusJSON(http.StatusBadRequest, "invalid replica, should be in [1, nodes_num]") + if manager.GetNodesCount() < allocParams.Replica || allocParams.Replica < utils.KeyspaceGroupDefaultReplicaCount { + c.AbortWithStatusJSON(http.StatusBadRequest, "invalid replica, should be in [2, nodes_num]") return } keyspaceGroup, err := manager.GetKeyspaceGroupByID(id) @@ -232,6 +233,54 @@ func AllocNodeForKeyspaceGroup(c *gin.Context) { c.JSON(http.StatusOK, nodes) } +// SetNodesForKeyspaceGroupParams defines the params for setting nodes for keyspace groups. +// Notes: it should be used carefully. +type SetNodesForKeyspaceGroupParams struct { + Nodes []string `json:"nodes"` +} + +// SetNodesForKeyspaceGroup sets nodes for keyspace group. +func SetNodesForKeyspaceGroup(c *gin.Context) { + id, err := validateKeyspaceGroupID(c) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, "invalid keyspace group id") + return + } + svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) + manager := svr.GetKeyspaceGroupManager() + setParams := &SetNodesForKeyspaceGroupParams{} + err = c.BindJSON(setParams) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, errs.ErrBindJSON.Wrap(err).GenWithStackByCause()) + return + } + // check if keyspace group exists + keyspaceGroup, err := manager.GetKeyspaceGroupByID(id) + if err != nil || keyspaceGroup == nil { + c.AbortWithStatusJSON(http.StatusBadRequest, "keyspace group does not exist") + return + } + // check if nodes is less than default replica count + if len(setParams.Nodes) < utils.KeyspaceGroupDefaultReplicaCount { + c.AbortWithStatusJSON(http.StatusBadRequest, "invalid num of nodes") + return + } + // check if node exists + for _, node := range setParams.Nodes { + if !manager.IsExistNode(node) { + c.AbortWithStatusJSON(http.StatusBadRequest, "node does not exist") + return + } + } + // set nodes + err = manager.SetNodesForKeyspaceGroup(id, setParams.Nodes) + if err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + return + } + c.JSON(http.StatusOK, nil) +} + func validateKeyspaceGroupID(c *gin.Context) (uint32, error) { id, err := strconv.ParseUint(c.Param("id"), 10, 64) if err != nil { diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 69710be506cc..48ddf6a8ad02 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -273,12 +273,14 @@ func (c *RaftCluster) Start(s Server) error { if cluster == nil { return nil } + if s.IsAPIServiceMode() { - err = c.keyspaceGroupManager.Bootstrap(c.ctx) + err = c.keyspaceGroupManager.Bootstrap() if err != nil { return err } } + c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts()) if c.opt.IsPlacementRulesEnabled() { err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels()) diff --git a/server/grpc_service.go b/server/grpc_service.go index d9883e4453fd..aac69e4b8c22 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -108,9 +108,11 @@ func (s *GrpcServer) GetClusterInfo(ctx context.Context, _ *pdpb.GetClusterInfoR }, nil } + var tsoServiceAddrs []string svcModes := make([]pdpb.ServiceMode, 0) if s.IsAPIServiceMode() { svcModes = append(svcModes, pdpb.ServiceMode_API_SVC_MODE) + tsoServiceAddrs = s.keyspaceGroupManager.GetTSOServiceAddrs() } else { svcModes = append(svcModes, pdpb.ServiceMode_PD_SVC_MODE) } @@ -118,6 +120,7 @@ func (s *GrpcServer) GetClusterInfo(ctx context.Context, _ *pdpb.GetClusterInfoR return &pdpb.GetClusterInfoResponse{ Header: s.header(), ServiceModes: svcModes, + TsoUrls: tsoServiceAddrs, }, nil } diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index 5ea05bde40ed..8ada9f9d5195 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -144,7 +144,7 @@ func TestClientLeaderChange(t *testing.T) { // Check URL list. cli.Close() - urls := innerCli.GetServiceDiscovery().GetURLs() + urls := innerCli.GetServiceDiscovery().GetServiceURLs() sort.Strings(urls) sort.Strings(endpoints) re.Equal(endpoints, urls) @@ -256,7 +256,7 @@ func TestTSOAllocatorLeader(t *testing.T) { cli.Close() for dcLocation, url := range getTSOAllocatorServingEndpointURLs(cli.(TSOAllocatorsGetter)) { if dcLocation == tso.GlobalDCLocation { - urls := innerCli.GetServiceDiscovery().GetURLs() + urls := innerCli.GetServiceDiscovery().GetServiceURLs() sort.Strings(urls) sort.Strings(endpoints) re.Equal(endpoints, urls) diff --git a/tests/integrations/client/go.mod b/tests/integrations/client/go.mod index d847f733e81b..ba31fb49b911 100644 --- a/tests/integrations/client/go.mod +++ b/tests/integrations/client/go.mod @@ -13,7 +13,7 @@ replace google.golang.org/grpc v1.54.0 => google.golang.org/grpc v1.26.0 require ( github.com/docker/go-units v0.4.0 github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a + github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/stretchr/testify v1.8.2 github.com/tikv/pd v0.0.0-00010101000000-000000000000 diff --git a/tests/integrations/client/go.sum b/tests/integrations/client/go.sum index 05f464f862eb..1f71321d1f3b 100644 --- a/tests/integrations/client/go.sum +++ b/tests/integrations/client/go.sum @@ -385,8 +385,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a h1:PWkMSJSDaOuLNKCV84K3tQ9stZuZPN8E148jRPD9TcA= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be h1:eHtwHgPzzm8aIZ4x8o7zg1b23cjUl0AikW+SDLpqf3E= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= diff --git a/tests/integrations/mcs/go.mod b/tests/integrations/mcs/go.mod index 7d135779defd..117223aadfb3 100644 --- a/tests/integrations/mcs/go.mod +++ b/tests/integrations/mcs/go.mod @@ -12,7 +12,7 @@ replace google.golang.org/grpc v1.54.0 => google.golang.org/grpc v1.26.0 require ( github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a + github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.8.2 diff --git a/tests/integrations/mcs/go.sum b/tests/integrations/mcs/go.sum index 6affa3c24d35..2b8713a062de 100644 --- a/tests/integrations/mcs/go.sum +++ b/tests/integrations/mcs/go.sum @@ -385,8 +385,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a h1:PWkMSJSDaOuLNKCV84K3tQ9stZuZPN8E148jRPD9TcA= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be h1:eHtwHgPzzm8aIZ4x8o7zg1b23cjUl0AikW+SDLpqf3E= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index 7b0c09c2a7b5..91186ca82111 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -22,9 +22,11 @@ import ( "io" "net/http" "testing" + "time" "github.com/stretchr/testify/suite" bs "github.com/tikv/pd/pkg/basicserver" + "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" @@ -80,7 +82,7 @@ func (suite *keyspaceGroupTestSuite) TearDownTest() { func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { // add three nodes. nodes := make(map[string]bs.Server) - for i := 0; i < 3; i++ { + for i := 0; i < utils.KeyspaceGroupDefaultReplicaCount+2; i++ { s, cleanup := mcs.StartSingleTSOTestServer(suite.ctx, suite.Require(), suite.backendEndpoints, tempurl.Alloc()) defer cleanup() nodes[s.GetAddr()] = s @@ -99,55 +101,65 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { // alloc nodes for the keyspace group. id := 1 - params := &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 1, + params := &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount, } - code, got := suite.tryAllocNodesForKeyspaceGroup(id, params) + got, code := suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusOK, code) - suite.Equal(1, len(got)) - suite.Contains(nodes, got[0].Address) - oldNode := got[0].Address + suite.Equal(utils.KeyspaceGroupDefaultReplicaCount, len(got)) + oldMembers := make(map[string]struct{}) + for _, member := range got { + suite.Contains(nodes, member.Address) + oldMembers[member.Address] = struct{}{} + } // alloc node update to 2. - params.Replica = 2 - code, got = suite.tryAllocNodesForKeyspaceGroup(id, params) + params.Replica = utils.KeyspaceGroupDefaultReplicaCount + 1 + got, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusOK, code) - suite.Equal(2, len(got)) - suite.Contains(nodes, got[0].Address) - suite.Contains(nodes, got[1].Address) - suite.True(oldNode == got[0].Address || oldNode == got[1].Address) // the old node is also in the new result. - suite.NotEqual(got[0].Address, got[1].Address) // the two nodes are different. + suite.Equal(params.Replica, len(got)) + newMembers := make(map[string]struct{}) + for _, member := range got { + suite.Contains(nodes, member.Address) + newMembers[member.Address] = struct{}{} + } + for member := range oldMembers { + // old members should be in new members. + suite.Contains(newMembers, member) + } } -func (suite *keyspaceGroupTestSuite) TestReplica() { +func (suite *keyspaceGroupTestSuite) TestAllocReplica() { nodes := make(map[string]bs.Server) - s, cleanup := mcs.StartSingleTSOTestServer(suite.ctx, suite.Require(), suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() - nodes[s.GetAddr()] = s + for i := 0; i < utils.KeyspaceGroupDefaultReplicaCount; i++ { + s, cleanup := mcs.StartSingleTSOTestServer(suite.ctx, suite.Require(), suite.backendEndpoints, tempurl.Alloc()) + defer cleanup() + nodes[s.GetAddr()] = s + } mcs.WaitForPrimaryServing(suite.Require(), nodes) // miss replica. id := 1 - params := &handlers.AllocNodeForKeyspaceGroupParams{} - code, got := suite.tryAllocNodesForKeyspaceGroup(id, params) + params := &handlers.AllocNodesForKeyspaceGroupParams{} + got, code := suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) suite.Empty(got) - // replica is negative. - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: -1, + // replica is less than default replica. + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount - 1, } - code, _ = suite.tryAllocNodesForKeyspaceGroup(id, params) + _, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) // there is no any keyspace group. - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 1, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount, } - code, _ = suite.tryAllocNodesForKeyspaceGroup(id, params) + _, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) - // the keyspace group is exist. + // create a keyspace group. kgs := &handlers.CreateKeyspaceGroupParams{KeyspaceGroups: []*endpoint.KeyspaceGroup{ { ID: uint32(id), @@ -156,55 +168,141 @@ func (suite *keyspaceGroupTestSuite) TestReplica() { }} code = suite.tryCreateKeyspaceGroup(kgs) suite.Equal(http.StatusOK, code) - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 1, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount, } - code, got = suite.tryAllocNodesForKeyspaceGroup(id, params) + got, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusOK, code) - suite.True(checkNodes(got, nodes)) + for _, member := range got { + suite.Contains(nodes, member.Address) + } // the keyspace group is exist, but the replica is more than the num of nodes. - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 2, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount + 1, } - code, _ = suite.tryAllocNodesForKeyspaceGroup(id, params) + _, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) + // the keyspace group is exist, the new replica is more than the old replica. s2, cleanup2 := mcs.StartSingleTSOTestServer(suite.ctx, suite.Require(), suite.backendEndpoints, tempurl.Alloc()) defer cleanup2() nodes[s2.GetAddr()] = s2 mcs.WaitForPrimaryServing(suite.Require(), nodes) - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 2, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount + 1, } - code, got = suite.tryAllocNodesForKeyspaceGroup(id, params) + got, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusOK, code) - suite.True(checkNodes(got, nodes)) + for _, member := range got { + suite.Contains(nodes, member.Address) + } // the keyspace group is exist, the new replica is equal to the old replica. - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 2, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount + 1, } - code, _ = suite.tryAllocNodesForKeyspaceGroup(id, params) + _, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) // the keyspace group is exist, the new replica is less than the old replica. - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 1, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount, } - code, _ = suite.tryAllocNodesForKeyspaceGroup(id, params) + _, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) // the keyspace group is not exist. id = 2 - params = &handlers.AllocNodeForKeyspaceGroupParams{ - Replica: 1, + params = &handlers.AllocNodesForKeyspaceGroupParams{ + Replica: utils.KeyspaceGroupDefaultReplicaCount, } - code, _ = suite.tryAllocNodesForKeyspaceGroup(id, params) + _, code = suite.tryAllocNodesForKeyspaceGroup(id, params) suite.Equal(http.StatusBadRequest, code) } -func (suite *keyspaceGroupTestSuite) tryAllocNodesForKeyspaceGroup(id int, request *handlers.AllocNodeForKeyspaceGroupParams) (int, []endpoint.KeyspaceGroupMember) { +func (suite *keyspaceGroupTestSuite) TestSetNodes() { + nodes := make(map[string]bs.Server) + nodesList := []string{} + for i := 0; i < utils.KeyspaceGroupDefaultReplicaCount; i++ { + s, cleanup := mcs.StartSingleTSOTestServer(suite.ctx, suite.Require(), suite.backendEndpoints, tempurl.Alloc()) + defer cleanup() + nodes[s.GetAddr()] = s + nodesList = append(nodesList, s.GetAddr()) + } + mcs.WaitForPrimaryServing(suite.Require(), nodes) + + // the keyspace group is not exist. + id := 1 + params := &handlers.SetNodesForKeyspaceGroupParams{ + Nodes: nodesList, + } + _, code := suite.trySetNodesForKeyspaceGroup(id, params) + suite.Equal(http.StatusBadRequest, code) + + // the keyspace group is exist. + kgs := &handlers.CreateKeyspaceGroupParams{KeyspaceGroups: []*endpoint.KeyspaceGroup{ + { + ID: uint32(id), + UserKind: endpoint.Standard.String(), + }, + }} + code = suite.tryCreateKeyspaceGroup(kgs) + suite.Equal(http.StatusOK, code) + params = &handlers.SetNodesForKeyspaceGroupParams{ + Nodes: nodesList, + } + kg, code := suite.trySetNodesForKeyspaceGroup(id, params) + suite.Equal(http.StatusOK, code) + suite.Len(kg.Members, 2) + for _, member := range kg.Members { + suite.Contains(nodes, member.Address) + } + + // the keyspace group is exist, but the nodes is not exist. + params = &handlers.SetNodesForKeyspaceGroupParams{ + Nodes: append(nodesList, "pingcap.com:2379"), + } + _, code = suite.trySetNodesForKeyspaceGroup(id, params) + suite.Equal(http.StatusBadRequest, code) + + // the keyspace group is exist, but the count of nodes is less than the default replica. + params = &handlers.SetNodesForKeyspaceGroupParams{ + Nodes: []string{nodesList[0]}, + } + _, code = suite.trySetNodesForKeyspaceGroup(id, params) + suite.Equal(http.StatusBadRequest, code) + + // the keyspace group is not exist. + id = 2 + params = &handlers.SetNodesForKeyspaceGroupParams{ + Nodes: nodesList, + } + _, code = suite.trySetNodesForKeyspaceGroup(id, params) + suite.Equal(http.StatusBadRequest, code) +} + +func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { + nodes := make(map[string]bs.Server) + for i := 0; i < utils.KeyspaceGroupDefaultReplicaCount; i++ { + s, cleanup := mcs.StartSingleTSOTestServer(suite.ctx, suite.Require(), suite.backendEndpoints, tempurl.Alloc()) + defer cleanup() + nodes[s.GetAddr()] = s + } + mcs.WaitForPrimaryServing(suite.Require(), nodes) + + // the default keyspace group is exist. + time.Sleep(2 * time.Second) + kg, code := suite.tryGetKeyspaceGroup(utils.DefaultKeyspaceGroupID) + suite.Equal(http.StatusOK, code) + suite.Equal(utils.DefaultKeyspaceGroupID, kg.ID) + suite.Len(kg.Members, utils.KeyspaceGroupDefaultReplicaCount) + for _, member := range kg.Members { + suite.Contains(nodes, member.Address) + } +} + +func (suite *keyspaceGroupTestSuite) tryAllocNodesForKeyspaceGroup(id int, request *handlers.AllocNodesForKeyspaceGroupParams) ([]endpoint.KeyspaceGroupMember, int) { data, err := json.Marshal(request) suite.NoError(err) httpReq, err := http.NewRequest(http.MethodPost, suite.server.GetAddr()+keyspaceGroupsPrefix+fmt.Sprintf("/%d/alloc", id), bytes.NewBuffer(data)) @@ -218,7 +316,7 @@ func (suite *keyspaceGroupTestSuite) tryAllocNodesForKeyspaceGroup(id int, reque suite.NoError(err) suite.NoError(json.Unmarshal(bodyBytes, &nodes)) } - return resp.StatusCode, nodes + return nodes, resp.StatusCode } func (suite *keyspaceGroupTestSuite) tryCreateKeyspaceGroup(request *handlers.CreateKeyspaceGroupParams) int { @@ -232,14 +330,31 @@ func (suite *keyspaceGroupTestSuite) tryCreateKeyspaceGroup(request *handlers.Cr return resp.StatusCode } -func checkNodes(nodes []endpoint.KeyspaceGroupMember, servers map[string]bs.Server) bool { - if len(nodes) != len(servers) { - return false +func (suite *keyspaceGroupTestSuite) tryGetKeyspaceGroup(id uint32) (*endpoint.KeyspaceGroup, int) { + httpReq, err := http.NewRequest(http.MethodGet, suite.server.GetAddr()+keyspaceGroupsPrefix+fmt.Sprintf("/%d", id), nil) + suite.NoError(err) + resp, err := suite.dialClient.Do(httpReq) + suite.NoError(err) + defer resp.Body.Close() + kg := &endpoint.KeyspaceGroup{} + if resp.StatusCode == http.StatusOK { + bodyBytes, err := io.ReadAll(resp.Body) + suite.NoError(err) + suite.NoError(json.Unmarshal(bodyBytes, kg)) } - for _, node := range nodes { - if _, ok := servers[node.Address]; !ok { - return false - } + return kg, resp.StatusCode +} + +func (suite *keyspaceGroupTestSuite) trySetNodesForKeyspaceGroup(id int, request *handlers.SetNodesForKeyspaceGroupParams) (*endpoint.KeyspaceGroup, int) { + data, err := json.Marshal(request) + suite.NoError(err) + httpReq, err := http.NewRequest(http.MethodPost, suite.server.GetAddr()+keyspaceGroupsPrefix+fmt.Sprintf("/%d/nodes", id), bytes.NewBuffer(data)) + suite.NoError(err) + resp, err := suite.dialClient.Do(httpReq) + suite.NoError(err) + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, resp.StatusCode } - return true + return suite.tryGetKeyspaceGroup(uint32(id)) } diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 81b85d314f9c..9cd46ad2d41c 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -26,6 +26,7 @@ import ( pd "github.com/tikv/pd/client" "github.com/tikv/pd/client/testutil" "github.com/tikv/pd/pkg/election" + mcsutils "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/storage/endpoint" tsopkg "github.com/tikv/pd/pkg/tso" @@ -86,6 +87,46 @@ func cleanupKeyspaceGroups(re *require.Assertions, server *tests.TestServer) { } } +func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByDefaultKeyspaceGroup() { + // There is only default keyspace group. Any keyspace, which hasn't been assigned to + // a keyspace group before, will be served by the default keyspace group. + re := suite.Require() + testutil.Eventually(re, func() bool { + for _, server := range suite.tsoCluster.GetServers() { + allServed := true + for _, keyspaceID := range []uint32{0, 1, 2} { + if server.IsKeyspaceServing(keyspaceID, mcsutils.DefaultKeyspaceGroupID) { + tam, err := server.GetTSOAllocatorManager(mcsutils.DefaultKeyspaceGroupID) + re.NoError(err) + re.NotNil(tam) + } else { + allServed = false + } + } + return allServed + } + return false + }, testutil.WithWaitFor(5*time.Second), testutil.WithTickInterval(50*time.Millisecond)) + + // Any keyspace that was assigned to a keyspace group before, except default keyspace, + // won't be served at this time. Default keyspace will be served by default keyspace group + // all the time. + for _, server := range suite.tsoCluster.GetServers() { + server.IsKeyspaceServing(mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + for _, keyspaceGroupID := range []uint32{1, 2, 3} { + server.IsKeyspaceServing(mcsutils.DefaultKeyspaceID, keyspaceGroupID) + server.IsKeyspaceServing(mcsutils.DefaultKeyspaceID, keyspaceGroupID) + for _, keyspaceID := range []uint32{1, 2, 3} { + if server.IsKeyspaceServing(keyspaceID, keyspaceGroupID) { + tam, err := server.GetTSOAllocatorManager(keyspaceGroupID) + re.NoError(err) + re.NotNil(tam) + } + } + } + } +} + func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupSplit() { re := suite.Require() // Create the keyspace group 1 with keyspaces [111, 222, 333]. diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index 0ff65575cc22..96a5f054dee7 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -77,6 +77,7 @@ func (suite *tsoServerTestSuite) SetupSuite() { leaderName := suite.cluster.WaitLeader() suite.pdLeader = suite.cluster.GetServer(leaderName) suite.backendEndpoints = suite.pdLeader.GetAddr() + suite.NoError(suite.pdLeader.BootstrapCluster()) } func (suite *tsoServerTestSuite) TearDownSuite() { @@ -175,6 +176,7 @@ func checkTSOPath(re *require.Assertions, isAPIServiceMode bool) { pdLeader := cluster.GetServer(leaderName) re.NoError(pdLeader.BootstrapCluster()) backendEndpoints := pdLeader.GetAddr() + re.NoError(pdLeader.BootstrapCluster()) client := pdLeader.GetEtcdClient() if isAPIServiceMode { re.Equal(0, getEtcdTimestampKeyNum(re, client)) @@ -272,7 +274,7 @@ func (suite *APIServerForwardTestSuite) TestForwardTSORelated() { func (suite *APIServerForwardTestSuite) TestForwardTSOWhenPrimaryChanged() { re := suite.Require() - tc, err := mcs.NewTestTSOCluster(suite.ctx, 3, suite.backendEndpoints) + tc, err := mcs.NewTestTSOCluster(suite.ctx, 2, suite.backendEndpoints) re.NoError(err) defer tc.Destroy() tc.WaitForDefaultPrimaryServing(re) diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index 0bde4f1a18b2..2d41aad4b84a 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -187,7 +187,10 @@ func (suite *tsoClientTestSuite) TestRandomResignLeader() { wg.Add(1) go func() { defer wg.Done() - n := r.Intn(2) + 1 + // After https://github.com/tikv/pd/issues/6376 is fixed, we can use a smaller number here. + // currently, the time to discover tso service is usually a little longer than 1s, compared + // to the previous time taken < 1s. + n := r.Intn(2) + 3 time.Sleep(time.Duration(n) * time.Second) if !suite.legacy { suite.tsoCluster.ResignPrimary() @@ -214,7 +217,10 @@ func (suite *tsoClientTestSuite) TestRandomShutdown() { wg.Add(1) go func() { defer wg.Done() - n := r.Intn(2) + 1 + // After https://github.com/tikv/pd/issues/6376 is fixed, we can use a smaller number here. + // currently, the time to discover tso service is usually a little longer than 1s, compared + // to the previous time taken < 1s. + n := r.Intn(2) + 3 time.Sleep(time.Duration(n) * time.Second) if !suite.legacy { suite.tsoCluster.WaitForDefaultPrimaryServing(re).Close() diff --git a/tests/integrations/tso/go.mod b/tests/integrations/tso/go.mod index 1b93f8e8eee9..7678db9b5ddd 100644 --- a/tests/integrations/tso/go.mod +++ b/tests/integrations/tso/go.mod @@ -13,7 +13,7 @@ replace google.golang.org/grpc v1.54.0 => google.golang.org/grpc v1.26.0 require ( github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a + github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be github.com/stretchr/testify v1.8.2 github.com/tikv/pd v0.0.0-00010101000000-000000000000 github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 diff --git a/tests/integrations/tso/go.sum b/tests/integrations/tso/go.sum index 6aad523861ca..6eb433c74a36 100644 --- a/tests/integrations/tso/go.sum +++ b/tests/integrations/tso/go.sum @@ -383,8 +383,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a h1:PWkMSJSDaOuLNKCV84K3tQ9stZuZPN8E148jRPD9TcA= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be h1:eHtwHgPzzm8aIZ4x8o7zg1b23cjUl0AikW+SDLpqf3E= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= diff --git a/tests/pdctl/scheduler/scheduler_test.go b/tests/pdctl/scheduler/scheduler_test.go index aced6750a11f..d10de75d0691 100644 --- a/tests/pdctl/scheduler/scheduler_test.go +++ b/tests/pdctl/scheduler/scheduler_test.go @@ -460,7 +460,15 @@ func TestScheduler(t *testing.T) { checkSchedulerWithStatusCommand(nil, "paused", []string{ "balance-leader-scheduler", }) - checkSchedulerDescribeCommand("balance-leader-scheduler", "paused", "") + result := make(map[string]interface{}) + testutil.Eventually(re, func() bool { + mightExec([]string{"-u", pdAddr, "scheduler", "describe", "balance-leader-scheduler"}, &result) + return len(result) != 0 + }, testutil.WithTickInterval(50*time.Millisecond)) + + testutil.Eventually(re, func() bool { + return result["status"] == "paused" && result["summary"] == "" + }, testutil.WithTickInterval(50*time.Millisecond)) mustUsage([]string{"-u", pdAddr, "scheduler", "resume", "balance-leader-scheduler", "60"}) mustExec([]string{"-u", pdAddr, "scheduler", "resume", "balance-leader-scheduler"}, nil) diff --git a/tools/pd-tso-bench/go.sum b/tools/pd-tso-bench/go.sum index 66ea008a8971..ab85155db70b 100644 --- a/tools/pd-tso-bench/go.sum +++ b/tools/pd-tso-bench/go.sum @@ -851,8 +851,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTm github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a h1:PWkMSJSDaOuLNKCV84K3tQ9stZuZPN8E148jRPD9TcA= -github.com/pingcap/kvproto v0.0.0-20230407040905-68d0eebd564a/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be h1:eHtwHgPzzm8aIZ4x8o7zg1b23cjUl0AikW+SDLpqf3E= +github.com/pingcap/kvproto v0.0.0-20230426023724-d90a321b46be/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=