From eec48508be965a167afae55dc7b2bdd1ff50292a Mon Sep 17 00:00:00 2001 From: husharp Date: Thu, 7 Sep 2023 15:44:46 +0800 Subject: [PATCH 1/4] fix Signed-off-by: husharp --- server/api/min_resolved_ts.go | 9 ++++ server/api/store.go | 4 ++ server/grpc_service.go | 4 +- server/handler.go | 92 ++++++++++++++++++++++++++++++++--- server/server.go | 60 +++++++++++++++-------- 5 files changed, 141 insertions(+), 28 deletions(-) diff --git a/server/api/min_resolved_ts.go b/server/api/min_resolved_ts.go index ef05e91b9f7..efda01c70da 100644 --- a/server/api/min_resolved_ts.go +++ b/server/api/min_resolved_ts.go @@ -20,6 +20,7 @@ import ( "strings" "github.com/gorilla/mux" + "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/server" "github.com/unrolled/render" @@ -54,6 +55,10 @@ type minResolvedTS struct { // @Router /min-resolved-ts/{store_id} [get] func (h *minResolvedTSHandler) GetStoreMinResolvedTS(w http.ResponseWriter, r *http.Request) { c := h.svr.GetRaftCluster() + if c == nil { + h.rd.JSON(w, http.StatusInternalServerError, errs.ErrNotBootstrapped.FastGenByArgs().Error()) + return + } idStr := mux.Vars(r)["store_id"] storeID, err := strconv.ParseUint(idStr, 10, 64) if err != nil { @@ -85,6 +90,10 @@ func (h *minResolvedTSHandler) GetStoreMinResolvedTS(w http.ResponseWriter, r *h // @Router /min-resolved-ts [get] func (h *minResolvedTSHandler) GetMinResolvedTS(w http.ResponseWriter, r *http.Request) { c := h.svr.GetRaftCluster() + if c == nil { + h.rd.JSON(w, http.StatusInternalServerError, errs.ErrNotBootstrapped.FastGenByArgs().Error()) + return + } scopeMinResolvedTS := c.GetMinResolvedTS() persistInterval := c.GetPDServerConfig().MinResolvedTSPersistenceInterval diff --git a/server/api/store.go b/server/api/store.go index a3e8c4518a2..61973f34918 100644 --- a/server/api/store.go +++ b/server/api/store.go @@ -428,6 +428,10 @@ func (h *storeHandler) SetStoreWeight(w http.ResponseWriter, r *http.Request) { // @Router /store/{id}/limit [post] func (h *storeHandler) SetStoreLimit(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) + if rc == nil { + h.rd.JSON(w, http.StatusInternalServerError, errs.ErrNotBootstrapped.GenWithStackByArgs().Error()) + return + } vars := mux.Vars(r) storeID, errParse := apiutil.ParseUint64VarsField(vars, "id") if errParse != nil { diff --git a/server/grpc_service.go b/server/grpc_service.go index 0563371cdc3..973c45a622f 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -1600,7 +1600,6 @@ func (s *GrpcServer) ReportBatchSplit(ctx context.Context, request *pdpb.ReportB if rc == nil { return &pdpb.ReportBatchSplitResponse{Header: s.notBootstrappedHeader()}, nil } - _, err := rc.HandleBatchReportSplit(request) if err != nil { return &pdpb.ReportBatchSplitResponse{ @@ -2089,6 +2088,9 @@ func (s *GrpcServer) SplitAndScatterRegions(ctx context.Context, request *pdpb.S return rsp.(*pdpb.SplitAndScatterRegionsResponse), err } rc := s.GetRaftCluster() + if rc == nil { + return &pdpb.SplitAndScatterRegionsResponse{Header: s.notBootstrappedHeader()}, nil + } splitFinishedPercentage, newRegionIDs := rc.GetRegionSplitter().SplitRegions(ctx, request.GetSplitKeys(), int(request.GetRetryLimit())) scatterFinishedPercentage, err := scatterRegions(rc, newRegionIDs, request.GetGroup(), int(request.GetRetryLimit()), false) if err != nil { diff --git a/server/handler.go b/server/handler.go index 2e4b88b20e2..785d78a99f7 100644 --- a/server/handler.go +++ b/server/handler.go @@ -114,6 +114,9 @@ func (h *Handler) IsSchedulerPaused(name string) (bool, error) { if err != nil { return false, err } + if rc == nil { + return false, errs.ErrNotBootstrapped.GenWithStackByArgs() + } return rc.GetCoordinator().GetSchedulersController().IsSchedulerPaused(name) } @@ -123,6 +126,9 @@ func (h *Handler) IsSchedulerDisabled(name string) (bool, error) { if err != nil { return false, err } + if rc == nil { + return false, errs.ErrNotBootstrapped.GenWithStackByArgs() + } return rc.GetCoordinator().GetSchedulersController().IsSchedulerDisabled(name) } @@ -132,6 +138,9 @@ func (h *Handler) IsSchedulerExisted(name string) (bool, error) { if err != nil { return false, err } + if rc == nil { + return false, errs.ErrNotBootstrapped.GenWithStackByArgs() + } return rc.GetCoordinator().GetSchedulersController().IsSchedulerExisted(name) } @@ -146,6 +155,9 @@ func (h *Handler) GetSchedulers() ([]string, error) { if err != nil { return nil, err } + if c == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } return c.GetSchedulers(), nil } @@ -155,6 +167,9 @@ func (h *Handler) IsCheckerPaused(name string) (bool, error) { if err != nil { return false, err } + if rc == nil { + return false, errs.ErrNotBootstrapped.GenWithStackByArgs() + } return rc.GetCoordinator().IsCheckerPaused(name) } @@ -180,7 +195,7 @@ func (h *Handler) GetStores() ([]*core.StoreInfo, error) { // GetHotWriteRegions gets all hot write regions stats. func (h *Handler) GetHotWriteRegions() *statistics.StoreHotPeersInfos { c, err := h.GetRaftCluster() - if err != nil { + if err != nil || c == nil { return nil } return c.GetHotWriteRegions() @@ -189,7 +204,7 @@ func (h *Handler) GetHotWriteRegions() *statistics.StoreHotPeersInfos { // GetHotBuckets returns all hot buckets stats. func (h *Handler) GetHotBuckets(regionIDs ...uint64) map[uint64][]*buckets.BucketStat { c, err := h.GetRaftCluster() - if err != nil { + if err != nil || c == nil { return nil } degree := c.GetOpts().GetHotRegionCacheHitsThreshold() @@ -199,7 +214,7 @@ func (h *Handler) GetHotBuckets(regionIDs ...uint64) map[uint64][]*buckets.Bucke // GetHotReadRegions gets all hot read regions stats. func (h *Handler) GetHotReadRegions() *statistics.StoreHotPeersInfos { c, err := h.GetRaftCluster() - if err != nil { + if err != nil || c == nil { return nil } return c.GetHotReadRegions() @@ -230,6 +245,9 @@ func (h *Handler) AddScheduler(name string, args ...string) error { if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } s, err := schedulers.CreateScheduler(name, c.GetOperatorController(), h.s.storage, schedulers.ConfigSliceDecoder(name, args), c.GetCoordinator().GetSchedulersController().RemoveScheduler) if err != nil { @@ -258,6 +276,9 @@ func (h *Handler) RemoveScheduler(name string) error { if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } if !h.s.IsAPIServiceMode() { if err = c.RemoveScheduler(name); err != nil { log.Error("can not remove scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) @@ -288,6 +309,9 @@ func (h *Handler) PauseOrResumeScheduler(name string, t int64) error { if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } if err = c.PauseOrResumeScheduler(name, t); err != nil { if t == 0 { log.Error("can not resume scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) @@ -312,6 +336,9 @@ func (h *Handler) PauseOrResumeChecker(name string, t int64) error { if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } if err = c.PauseOrResumeChecker(name, t); err != nil { if t == 0 { log.Error("can not resume checker", zap.String("checker-name", name), errs.ZapError(err)) @@ -526,6 +553,9 @@ func (h *Handler) SetAllStoresLimit(ratePerMin float64, limitType storelimit.Typ if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } return c.SetAllStoresLimit(limitType, ratePerMin) } @@ -535,6 +565,9 @@ func (h *Handler) SetAllStoresLimitTTL(ratePerMin float64, limitType storelimit. if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } c.SetAllStoresLimitTTL(limitType, ratePerMin, ttl) return nil } @@ -545,6 +578,9 @@ func (h *Handler) SetLabelStoresLimit(ratePerMin float64, limitType storelimit.T if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } for _, store := range c.GetStores() { for _, label := range labels { for _, sl := range store.GetLabels() { @@ -564,6 +600,9 @@ func (h *Handler) GetAllStoresLimit(limitType storelimit.Type) (map[uint64]sc.St if err != nil { return nil, err } + if c == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } return c.GetAllStoresLimit(), nil } @@ -573,6 +612,9 @@ func (h *Handler) SetStoreLimit(storeID uint64, ratePerMin float64, limitType st if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } return c.SetStoreLimit(storeID, limitType, ratePerMin) } @@ -582,6 +624,9 @@ func (h *Handler) AddTransferLeaderOperator(regionID uint64, storeID uint64) err if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -610,6 +655,9 @@ func (h *Handler) AddTransferRegionOperator(regionID uint64, storeIDs map[uint64 if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -654,6 +702,9 @@ func (h *Handler) AddTransferPeerOperator(regionID uint64, fromStoreID, toStoreI if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -687,6 +738,9 @@ func (h *Handler) checkAdminAddPeerOperator(regionID uint64, toStoreID uint64) ( if err != nil { return nil, nil, err } + if c == nil { + return nil, nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -752,6 +806,9 @@ func (h *Handler) AddRemovePeerOperator(regionID uint64, fromStoreID uint64) err if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -779,6 +836,9 @@ func (h *Handler) AddMergeRegionOperator(regionID uint64, targetID uint64) error if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -821,6 +881,9 @@ func (h *Handler) AddSplitRegionOperator(regionID uint64, policyStr string, keys if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -860,6 +923,9 @@ func (h *Handler) AddScatterRegionOperator(regionID uint64, group string) error if err != nil { return err } + if c == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } region := c.GetRegion(regionID) if region == nil { @@ -890,6 +956,9 @@ func (h *Handler) AddScatterRegionsOperators(regionIDs []uint64, startRawKey, en if err != nil { return 0, err } + if c == nil { + return 0, errs.ErrNotBootstrapped.GenWithStackByArgs() + } opsCount := 0 var failures map[uint64]error // If startKey and endKey are both defined, use them first. @@ -934,6 +1003,9 @@ func (h *Handler) GetSchedulerConfigHandler() (http.Handler, error) { if err != nil { return nil, err } + if c == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } mux := http.NewServeMux() for name, handler := range c.GetSchedulerHandlers() { prefix := path.Join(pdRootPath, SchedulerConfigHandlerPath, name) @@ -961,14 +1033,20 @@ func (h *Handler) ResetTS(ts uint64, ignoreSmaller, skipUpperBoundCheck bool, _ // SetStoreLimitScene sets the limit values for different scenes func (h *Handler) SetStoreLimitScene(scene *storelimit.Scene, limitType storelimit.Type) { - cluster := h.s.GetRaftCluster() - cluster.GetStoreLimiter().ReplaceStoreLimitScene(scene, limitType) + rc := h.s.GetRaftCluster() + if rc == nil { + return + } + rc.GetStoreLimiter().ReplaceStoreLimitScene(scene, limitType) } // GetStoreLimitScene returns the limit values for different scenes func (h *Handler) GetStoreLimitScene(limitType storelimit.Type) *storelimit.Scene { - cluster := h.s.GetRaftCluster() - return cluster.GetStoreLimiter().StoreLimitScene(limitType) + rc := h.s.GetRaftCluster() + if rc == nil { + return nil + } + return rc.GetStoreLimiter().StoreLimitScene(limitType) } // GetProgressByID returns the progress details for a given store ID. diff --git a/server/server.go b/server/server.go index a72c1c23f0c..55ca2e88f4d 100644 --- a/server/server.go +++ b/server/server.go @@ -1023,18 +1023,18 @@ func (s *Server) SetReplicationConfig(cfg sc.ReplicationConfig) error { } old := s.persistOptions.GetReplicationConfig() if cfg.EnablePlacementRules != old.EnablePlacementRules { - raftCluster := s.GetRaftCluster() - if raftCluster == nil { + rc := s.GetRaftCluster() + if rc == nil { return errs.ErrNotBootstrapped.GenWithStackByArgs() } if cfg.EnablePlacementRules { // initialize rule manager. - if err := raftCluster.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels); err != nil { + if err := rc.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels); err != nil { return err } } else { // NOTE: can be removed after placement rules feature is enabled by default. - for _, s := range raftCluster.GetStores() { + for _, s := range rc.GetStores() { if !s.IsRemoved() && s.IsTiFlash() { return errors.New("cannot disable placement rules with TiFlash nodes") } @@ -1044,8 +1044,12 @@ func (s *Server) SetReplicationConfig(cfg sc.ReplicationConfig) error { var rule *placement.Rule if cfg.EnablePlacementRules { + rc := s.GetRaftCluster() + if rc == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } // replication.MaxReplicas won't work when placement rule is enabled and not only have one default rule. - defaultRule := s.GetRaftCluster().GetRuleManager().GetRule("pd", "default") + defaultRule := rc.GetRuleManager().GetRule("pd", "default") CheckInDefaultRule := func() error { // replication config won't work when placement rule is enabled and exceeds one default rule @@ -1071,7 +1075,11 @@ func (s *Server) SetReplicationConfig(cfg sc.ReplicationConfig) error { if rule != nil { rule.Count = int(cfg.MaxReplicas) rule.LocationLabels = cfg.LocationLabels - if err := s.GetRaftCluster().GetRuleManager().SetRule(rule); err != nil { + rc := s.GetRaftCluster() + if rc == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } + if err := rc.GetRuleManager().SetRule(rule); err != nil { log.Error("failed to update rule count", errs.ZapError(err)) return err @@ -1083,7 +1091,11 @@ func (s *Server) SetReplicationConfig(cfg sc.ReplicationConfig) error { s.persistOptions.SetReplicationConfig(old) if rule != nil { rule.Count = int(old.MaxReplicas) - if e := s.GetRaftCluster().GetRuleManager().SetRule(rule); e != nil { + rc := s.GetRaftCluster() + if rc == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } + if e := rc.GetRuleManager().SetRule(rule); e != nil { log.Error("failed to roll back count of rule when update replication config", errs.ZapError(e)) } } @@ -1371,18 +1383,18 @@ func (s *Server) GetServerOption() *config.PersistOptions { // GetMetaRegions gets meta regions from cluster. func (s *Server) GetMetaRegions() []*metapb.Region { - cluster := s.GetRaftCluster() - if cluster != nil { - return cluster.GetMetaRegions() + rc := s.GetRaftCluster() + if rc != nil { + return rc.GetMetaRegions() } return nil } // GetRegions gets regions from cluster. func (s *Server) GetRegions() []*core.RegionInfo { - cluster := s.GetRaftCluster() - if cluster != nil { - return cluster.GetRegions() + rc := s.GetRaftCluster() + if rc != nil { + return rc.GetRegions() } return nil } @@ -1519,9 +1531,9 @@ func (s *Server) SetReplicationModeConfig(cfg config.ReplicationModeConfig) erro } log.Info("replication mode config is updated", zap.Reflect("new", cfg), zap.Reflect("old", old)) - cluster := s.GetRaftCluster() - if cluster != nil { - err := cluster.GetReplicationMode().UpdateConfig(cfg) + rc := s.GetRaftCluster() + if rc != nil { + err := rc.GetReplicationMode().UpdateConfig(cfg) if err != nil { log.Warn("failed to update replication mode", errs.ZapError(err)) // revert to old config @@ -1992,7 +2004,11 @@ func (s *Server) RecoverAllocID(ctx context.Context, id uint64) error { // GetExternalTS returns external timestamp. func (s *Server) GetExternalTS() uint64 { - return s.GetRaftCluster().GetExternalTS() + rc := s.GetRaftCluster() + if rc == nil { + return 0 + } + return rc.GetExternalTS() } // SetExternalTS returns external timestamp. @@ -2002,14 +2018,18 @@ func (s *Server) SetExternalTS(externalTS, globalTS uint64) error { log.Error(desc, zap.Uint64("request timestamp", externalTS), zap.Uint64("global ts", globalTS)) return errors.New(desc) } - currentExternalTS := s.GetRaftCluster().GetExternalTS() + c := s.GetRaftCluster() + if c == nil { + return errs.ErrNotBootstrapped.FastGenByArgs() + } + currentExternalTS := c.GetExternalTS() if tsoutil.CompareTimestampUint64(externalTS, currentExternalTS) != 1 { desc := "the external timestamp should be larger than current external timestamp" log.Error(desc, zap.Uint64("request", externalTS), zap.Uint64("current", currentExternalTS)) return errors.New(desc) } - s.GetRaftCluster().SetExternalTS(externalTS) - return nil + + return c.SetExternalTS(externalTS) } // IsLocalTSOEnabled returns if the local TSO is enabled. From 539d4dcc718018b6bd8eca928164f0ee7f9be224 Mon Sep 17 00:00:00 2001 From: husharp Date: Fri, 8 Sep 2023 17:01:40 +0800 Subject: [PATCH 2/4] add test Signed-off-by: husharp --- server/handler.go | 78 ++---------------------------------- tests/server/api/api_test.go | 40 ++++++++++++++++++ 2 files changed, 43 insertions(+), 75 deletions(-) diff --git a/server/handler.go b/server/handler.go index 785d78a99f7..adc1e8ecd31 100644 --- a/server/handler.go +++ b/server/handler.go @@ -114,9 +114,6 @@ func (h *Handler) IsSchedulerPaused(name string) (bool, error) { if err != nil { return false, err } - if rc == nil { - return false, errs.ErrNotBootstrapped.GenWithStackByArgs() - } return rc.GetCoordinator().GetSchedulersController().IsSchedulerPaused(name) } @@ -126,9 +123,6 @@ func (h *Handler) IsSchedulerDisabled(name string) (bool, error) { if err != nil { return false, err } - if rc == nil { - return false, errs.ErrNotBootstrapped.GenWithStackByArgs() - } return rc.GetCoordinator().GetSchedulersController().IsSchedulerDisabled(name) } @@ -138,9 +132,6 @@ func (h *Handler) IsSchedulerExisted(name string) (bool, error) { if err != nil { return false, err } - if rc == nil { - return false, errs.ErrNotBootstrapped.GenWithStackByArgs() - } return rc.GetCoordinator().GetSchedulersController().IsSchedulerExisted(name) } @@ -155,9 +146,6 @@ func (h *Handler) GetSchedulers() ([]string, error) { if err != nil { return nil, err } - if c == nil { - return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() - } return c.GetSchedulers(), nil } @@ -167,9 +155,6 @@ func (h *Handler) IsCheckerPaused(name string) (bool, error) { if err != nil { return false, err } - if rc == nil { - return false, errs.ErrNotBootstrapped.GenWithStackByArgs() - } return rc.GetCoordinator().IsCheckerPaused(name) } @@ -195,7 +180,7 @@ func (h *Handler) GetStores() ([]*core.StoreInfo, error) { // GetHotWriteRegions gets all hot write regions stats. func (h *Handler) GetHotWriteRegions() *statistics.StoreHotPeersInfos { c, err := h.GetRaftCluster() - if err != nil || c == nil { + if err != nil { return nil } return c.GetHotWriteRegions() @@ -204,7 +189,7 @@ func (h *Handler) GetHotWriteRegions() *statistics.StoreHotPeersInfos { // GetHotBuckets returns all hot buckets stats. func (h *Handler) GetHotBuckets(regionIDs ...uint64) map[uint64][]*buckets.BucketStat { c, err := h.GetRaftCluster() - if err != nil || c == nil { + if err != nil { return nil } degree := c.GetOpts().GetHotRegionCacheHitsThreshold() @@ -214,7 +199,7 @@ func (h *Handler) GetHotBuckets(regionIDs ...uint64) map[uint64][]*buckets.Bucke // GetHotReadRegions gets all hot read regions stats. func (h *Handler) GetHotReadRegions() *statistics.StoreHotPeersInfos { c, err := h.GetRaftCluster() - if err != nil || c == nil { + if err != nil { return nil } return c.GetHotReadRegions() @@ -245,9 +230,6 @@ func (h *Handler) AddScheduler(name string, args ...string) error { if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } s, err := schedulers.CreateScheduler(name, c.GetOperatorController(), h.s.storage, schedulers.ConfigSliceDecoder(name, args), c.GetCoordinator().GetSchedulersController().RemoveScheduler) if err != nil { @@ -276,9 +258,6 @@ func (h *Handler) RemoveScheduler(name string) error { if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } if !h.s.IsAPIServiceMode() { if err = c.RemoveScheduler(name); err != nil { log.Error("can not remove scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) @@ -309,9 +288,6 @@ func (h *Handler) PauseOrResumeScheduler(name string, t int64) error { if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } if err = c.PauseOrResumeScheduler(name, t); err != nil { if t == 0 { log.Error("can not resume scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) @@ -336,9 +312,6 @@ func (h *Handler) PauseOrResumeChecker(name string, t int64) error { if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } if err = c.PauseOrResumeChecker(name, t); err != nil { if t == 0 { log.Error("can not resume checker", zap.String("checker-name", name), errs.ZapError(err)) @@ -553,9 +526,6 @@ func (h *Handler) SetAllStoresLimit(ratePerMin float64, limitType storelimit.Typ if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } return c.SetAllStoresLimit(limitType, ratePerMin) } @@ -565,9 +535,6 @@ func (h *Handler) SetAllStoresLimitTTL(ratePerMin float64, limitType storelimit. if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } c.SetAllStoresLimitTTL(limitType, ratePerMin, ttl) return nil } @@ -578,9 +545,6 @@ func (h *Handler) SetLabelStoresLimit(ratePerMin float64, limitType storelimit.T if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } for _, store := range c.GetStores() { for _, label := range labels { for _, sl := range store.GetLabels() { @@ -600,9 +564,6 @@ func (h *Handler) GetAllStoresLimit(limitType storelimit.Type) (map[uint64]sc.St if err != nil { return nil, err } - if c == nil { - return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() - } return c.GetAllStoresLimit(), nil } @@ -612,9 +573,6 @@ func (h *Handler) SetStoreLimit(storeID uint64, ratePerMin float64, limitType st if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } return c.SetStoreLimit(storeID, limitType, ratePerMin) } @@ -624,9 +582,6 @@ func (h *Handler) AddTransferLeaderOperator(regionID uint64, storeID uint64) err if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -655,9 +610,6 @@ func (h *Handler) AddTransferRegionOperator(regionID uint64, storeIDs map[uint64 if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -702,9 +654,6 @@ func (h *Handler) AddTransferPeerOperator(regionID uint64, fromStoreID, toStoreI if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -738,9 +687,6 @@ func (h *Handler) checkAdminAddPeerOperator(regionID uint64, toStoreID uint64) ( if err != nil { return nil, nil, err } - if c == nil { - return nil, nil, errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -806,9 +752,6 @@ func (h *Handler) AddRemovePeerOperator(regionID uint64, fromStoreID uint64) err if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -836,9 +779,6 @@ func (h *Handler) AddMergeRegionOperator(regionID uint64, targetID uint64) error if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -881,9 +821,6 @@ func (h *Handler) AddSplitRegionOperator(regionID uint64, policyStr string, keys if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -923,9 +860,6 @@ func (h *Handler) AddScatterRegionOperator(regionID uint64, group string) error if err != nil { return err } - if c == nil { - return errs.ErrNotBootstrapped.GenWithStackByArgs() - } region := c.GetRegion(regionID) if region == nil { @@ -956,9 +890,6 @@ func (h *Handler) AddScatterRegionsOperators(regionIDs []uint64, startRawKey, en if err != nil { return 0, err } - if c == nil { - return 0, errs.ErrNotBootstrapped.GenWithStackByArgs() - } opsCount := 0 var failures map[uint64]error // If startKey and endKey are both defined, use them first. @@ -1003,9 +934,6 @@ func (h *Handler) GetSchedulerConfigHandler() (http.Handler, error) { if err != nil { return nil, err } - if c == nil { - return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() - } mux := http.NewServeMux() for name, handler := range c.GetSchedulerHandlers() { prefix := path.Join(pdRootPath, SchedulerConfigHandlerPath, name) diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index 623af9b0a82..61d47d7790c 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -807,6 +807,46 @@ func TestRemovingProgress(t *testing.T) { re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) } +func TestSendApiWhenRestartRaftCluster(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + conf.Replication.MaxReplicas = 1 + }) + re.NoError(err) + defer cluster.Destroy() + + err = cluster.RunInitialServers() + re.NoError(err) + leader := cluster.GetServer(cluster.WaitLeader()) + + grpcPDClient := testutil.MustNewGrpcClient(re, leader.GetAddr()) + clusterID := leader.GetClusterID() + req := &pdpb.BootstrapRequest{ + Header: testutil.NewRequestHeader(clusterID), + Store: &metapb.Store{Id: 1, Address: "127.0.0.1:0"}, + Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, + } + resp, err := grpcPDClient.Bootstrap(context.Background(), req) + re.NoError(err) + re.Nil(resp.GetHeader().GetError()) + + // Mock restart raft cluster + rc := leader.GetRaftCluster() + re.NotNil(rc) + rc.Stop() + + // Mock client-go will still send request + output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/min-resolved-ts", http.MethodGet, http.StatusInternalServerError) + re.Contains(string(output), "TiKV cluster not bootstrapped, please start TiKV first") + + err = rc.Start(leader.GetServer()) + re.NoError(err) + rc = leader.GetRaftCluster() + re.NotNil(rc) +} + func TestPreparingProgress(t *testing.T) { re := require.New(t) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) From 79ddabf027a24bc873cb31f15cc75f38f3bdc5f7 Mon Sep 17 00:00:00 2001 From: husharp Date: Fri, 8 Sep 2023 17:46:06 +0800 Subject: [PATCH 3/4] address comment Signed-off-by: husharp --- server/api/min_resolved_ts.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/server/api/min_resolved_ts.go b/server/api/min_resolved_ts.go index efda01c70da..1edf924370f 100644 --- a/server/api/min_resolved_ts.go +++ b/server/api/min_resolved_ts.go @@ -20,7 +20,6 @@ import ( "strings" "github.com/gorilla/mux" - "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/server" "github.com/unrolled/render" @@ -54,11 +53,7 @@ type minResolvedTS struct { // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /min-resolved-ts/{store_id} [get] func (h *minResolvedTSHandler) GetStoreMinResolvedTS(w http.ResponseWriter, r *http.Request) { - c := h.svr.GetRaftCluster() - if c == nil { - h.rd.JSON(w, http.StatusInternalServerError, errs.ErrNotBootstrapped.FastGenByArgs().Error()) - return - } + c := getCluster(r) idStr := mux.Vars(r)["store_id"] storeID, err := strconv.ParseUint(idStr, 10, 64) if err != nil { @@ -89,11 +84,7 @@ func (h *minResolvedTSHandler) GetStoreMinResolvedTS(w http.ResponseWriter, r *h // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /min-resolved-ts [get] func (h *minResolvedTSHandler) GetMinResolvedTS(w http.ResponseWriter, r *http.Request) { - c := h.svr.GetRaftCluster() - if c == nil { - h.rd.JSON(w, http.StatusInternalServerError, errs.ErrNotBootstrapped.FastGenByArgs().Error()) - return - } + c := getCluster(r) scopeMinResolvedTS := c.GetMinResolvedTS() persistInterval := c.GetPDServerConfig().MinResolvedTSPersistenceInterval From 2f5117e2109c38cea4b911fc11700296f735c7d1 Mon Sep 17 00:00:00 2001 From: husharp Date: Fri, 8 Sep 2023 18:02:02 +0800 Subject: [PATCH 4/4] remove redundant code Signed-off-by: husharp --- server/api/store.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/server/api/store.go b/server/api/store.go index 61973f34918..a3e8c4518a2 100644 --- a/server/api/store.go +++ b/server/api/store.go @@ -428,10 +428,6 @@ func (h *storeHandler) SetStoreWeight(w http.ResponseWriter, r *http.Request) { // @Router /store/{id}/limit [post] func (h *storeHandler) SetStoreLimit(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) - if rc == nil { - h.rd.JSON(w, http.StatusInternalServerError, errs.ErrNotBootstrapped.GenWithStackByArgs().Error()) - return - } vars := mux.Vars(r) storeID, errParse := apiutil.ParseUint64VarsField(vars, "id") if errParse != nil {