-
Notifications
You must be signed in to change notification settings - Fork 225
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fallback to follower when leader is busy #916
Changes from all commits
dff471a
add9f79
ebc2437
aac21b7
7da51eb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -371,38 +371,75 @@ func (state *accessKnownLeader) onNoLeader(selector *replicaSelector) { | |
// the leader will be updated to replicas[0] and give it another chance. | ||
type tryFollower struct { | ||
stateBase | ||
leaderIdx AccessIndex | ||
lastIdx AccessIndex | ||
// if the leader is unavailable, but it still holds the leadership, fallbackFromLeader is true and replica read is enabled. | ||
fallbackFromLeader bool | ||
you06 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
leaderIdx AccessIndex | ||
lastIdx AccessIndex | ||
labels []*metapb.StoreLabel | ||
} | ||
|
||
func (state *tryFollower) next(bo *retry.Backoffer, selector *replicaSelector) (*RPCContext, error) { | ||
var targetReplica *replica | ||
// Search replica that is not attempted from the last accessed replica | ||
for i := 1; i < len(selector.replicas); i++ { | ||
idx := AccessIndex((int(state.lastIdx) + i) % len(selector.replicas)) | ||
if idx == state.leaderIdx { | ||
continue | ||
filterReplicas := func(fn func(*replica) bool) (AccessIndex, *replica) { | ||
for i := 0; i < len(selector.replicas); i++ { | ||
idx := AccessIndex((int(state.lastIdx) + i) % len(selector.replicas)) | ||
if idx == state.leaderIdx { | ||
continue | ||
} | ||
selectReplica := selector.replicas[idx] | ||
if fn(selectReplica) && selectReplica.store.getLivenessState() != unreachable { | ||
return idx, selectReplica | ||
} | ||
} | ||
targetReplica = selector.replicas[idx] | ||
// Each follower is only tried once | ||
if !targetReplica.isExhausted(1) && targetReplica.store.getLivenessState() != unreachable { | ||
return -1, nil | ||
} | ||
|
||
if len(state.labels) > 0 { | ||
idx, selectReplica := filterReplicas(func(selectReplica *replica) bool { | ||
return selectReplica.store.IsLabelsMatch(state.labels) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The replica may be exhausted by data-is-not-ready, which does not affect follower read. |
||
}) | ||
if selectReplica != nil && idx >= 0 { | ||
state.lastIdx = idx | ||
selector.targetIdx = idx | ||
} | ||
// labels only take effect for first try. | ||
state.labels = nil | ||
} | ||
if selector.targetIdx < 0 { | ||
// Search replica that is not attempted from the last accessed replica | ||
idx, selectReplica := filterReplicas(func(selectReplica *replica) bool { | ||
return !selectReplica.isExhausted(1) | ||
}) | ||
if selectReplica != nil && idx >= 0 { | ||
state.lastIdx = idx | ||
selector.targetIdx = idx | ||
break | ||
} | ||
} | ||
|
||
// If all followers are tried and fail, backoff and retry. | ||
if selector.targetIdx < 0 { | ||
metrics.TiKVReplicaSelectorFailureCounter.WithLabelValues("exhausted").Inc() | ||
selector.invalidateRegion() | ||
return nil, nil | ||
} | ||
return selector.buildRPCContext(bo) | ||
rpcCtx, err := selector.buildRPCContext(bo) | ||
if err != nil || rpcCtx == nil { | ||
return rpcCtx, err | ||
} | ||
if state.fallbackFromLeader { | ||
replicaRead := true | ||
rpcCtx.contextPatcher.replicaRead = &replicaRead | ||
} | ||
return rpcCtx, nil | ||
} | ||
|
||
func (state *tryFollower) onSendSuccess(selector *replicaSelector) { | ||
if !selector.region.switchWorkLeaderToPeer(selector.targetReplica().peer) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The former naming and meaning of the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The former usage of |
||
panic("the store must exist") | ||
if !state.fallbackFromLeader { | ||
peer := selector.targetReplica().peer | ||
if !selector.region.switchWorkLeaderToPeer(peer) { | ||
logutil.BgLogger().Warn("the store must exist", | ||
zap.Uint64("store", peer.StoreId), | ||
zap.Uint64("peer", peer.Id)) | ||
} | ||
} | ||
} | ||
|
||
|
@@ -888,6 +925,27 @@ func (s *replicaSelector) updateLeader(leader *metapb.Peer) { | |
s.region.invalidate(StoreNotFound) | ||
} | ||
|
||
// For some reason, the leader is unreachable by now, try followers instead. | ||
func (s *replicaSelector) fallback2Follower(ctx *RPCContext) bool { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. By now is the only situation that would be used There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, when fallbacking to replica from leader, it's a follower read request, not stale read. |
||
if ctx == nil || s == nil || s.state == nil { | ||
return false | ||
} | ||
state, ok := s.state.(*accessFollower) | ||
if !ok { | ||
return false | ||
} | ||
if state.lastIdx != state.leaderIdx { | ||
return false | ||
} | ||
s.state = &tryFollower{ | ||
fallbackFromLeader: true, | ||
leaderIdx: state.leaderIdx, | ||
lastIdx: state.leaderIdx, | ||
labels: state.option.labels, | ||
} | ||
return true | ||
} | ||
|
||
func (s *replicaSelector) invalidateRegion() { | ||
if s.region != nil { | ||
s.region.invalidate(Other) | ||
|
@@ -1566,6 +1624,10 @@ func (s *RegionRequestSender) onRegionError(bo *retry.Backoffer, ctx *RPCContext | |
logutil.BgLogger().Warn("tikv reports `ServerIsBusy` retry later", | ||
zap.String("reason", regionErr.GetServerIsBusy().GetReason()), | ||
zap.Stringer("ctx", ctx)) | ||
if s.replicaSelector.fallback2Follower(ctx) { | ||
// immediately retry on followers. | ||
return true, nil | ||
} | ||
if ctx != nil && ctx.Store != nil && ctx.Store.storeType.IsTiFlashRelatedType() { | ||
err = bo.Backoff(retry.BoTiFlashServerBusy, errors.Errorf("server is busy, ctx: %v", ctx)) | ||
} else { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is this an extra fix?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, a possible data race.