-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
dist_sender_rangefeed.go
995 lines (899 loc) · 32.8 KB
/
dist_sender_rangefeed.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvcoord
import (
"context"
"fmt"
"io"
"math"
"sort"
"sync"
"time"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/iterutil"
"github.com/cockroachdb/cockroach/pkg/util/limit"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/pprofutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
)
type singleRangeInfo struct {
rs roachpb.RSpan
startAfter hlc.Timestamp
token rangecache.EvictionToken
}
var useDedicatedRangefeedConnectionClass = settings.RegisterBoolSetting(
settings.TenantReadOnly,
"kv.rangefeed.use_dedicated_connection_class.enabled",
"uses dedicated connection when running rangefeeds",
util.ConstantWithMetamorphicTestBool(
"kv.rangefeed.use_dedicated_connection_class.enabled", false),
)
var catchupScanConcurrency = settings.RegisterIntSetting(
settings.TenantWritable,
"kv.rangefeed.catchup_scan_concurrency",
"number of catchup scans that a single rangefeed can execute concurrently; 0 implies unlimited",
8,
settings.NonNegativeInt,
)
var rangefeedRangeStuckThreshold = settings.RegisterDurationSetting(
settings.TenantWritable,
"kv.rangefeed.range_stuck_threshold",
"restart rangefeeds if they don't emit anything for the specified threshold; 0 disables (kv.rangefeed.closed_timestamp_refresh_interval takes precedence)",
time.Minute,
settings.NonNegativeDuration,
settings.WithPublic)
// LaggingRangesCheckFrequency is the frequency at which the rangefeed will
// check for ranges which have fallen behind.
var LaggingRangesCheckFrequency = settings.RegisterDurationSetting(
settings.TenantWritable,
"kv.rangefeed.lagging_ranges_frequency",
"controls the frequency at which a rangefeed checks for ranges which have fallen behind",
1*time.Minute,
settings.NonNegativeDuration,
settings.WithPublic,
)
// LaggingRangesThreshold is how far behind a range must be from the present to
// be considered as 'lagging' behind in metrics
var LaggingRangesThreshold = settings.RegisterDurationSetting(
settings.TenantWritable,
"kv.rangefeed.lagging_ranges_threshold",
"controls how far behind a range must be from the present to be considered as 'lagging' behind in metrics",
3*time.Minute,
settings.NonNegativeDuration,
settings.WithPublic,
)
func maxConcurrentCatchupScans(sv *settings.Values) int {
l := catchupScanConcurrency.Get(sv)
if l == 0 {
return math.MaxInt
}
return int(l)
}
type rangeFeedConfig struct {
useMuxRangeFeed bool
overSystemTable bool
withDiff bool
withLaggingRangesUpdate func(int64)
knobs struct {
// onRangefeedEvent invoked on each rangefeed event.
// Returns boolean indicating if event should be skipped or an error
// indicating if rangefeed should terminate.
// streamID set only for mux rangefeed.
onRangefeedEvent func(ctx context.Context, s roachpb.Span, muxStreamID int64, event *kvpb.RangeFeedEvent) (skip bool, _ error)
// metrics overrides rangefeed metrics to use.
metrics *DistSenderRangeFeedMetrics
// captureMuxRangeFeedRequestSender is a callback invoked when mux
// rangefeed establishes connection to the node.
captureMuxRangeFeedRequestSender func(nodeID roachpb.NodeID, sender func(req *kvpb.RangeFeedRequest) error)
}
}
// RangeFeedOption configures a RangeFeed.
type RangeFeedOption interface {
set(*rangeFeedConfig)
}
type optionFunc func(*rangeFeedConfig)
func (o optionFunc) set(c *rangeFeedConfig) { o(c) }
// WithMuxRangeFeed configures range feed to use MuxRangeFeed RPC.
func WithMuxRangeFeed() RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.useMuxRangeFeed = true
})
}
// WithSystemTablePriority is used for system-internal rangefeeds, it uses a
// higher admission priority during catch up scans.
func WithSystemTablePriority() RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.overSystemTable = true
})
}
// WithDiff turns on "diff" option for the rangefeed.
func WithDiff() RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.withDiff = true
})
}
// WithLaggingRangesUpdate registers a callback which is called periodically
// with the number of lagging ranges. The frequency and strictness of this check
// are determined by cluster settings in this package.
func WithLaggingRangesUpdate(f func(int64)) RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.withLaggingRangesUpdate = f
})
}
// A "kill switch" to disable multiplexing rangefeed if severe issues discovered with new implementation.
var enableMuxRangeFeed = envutil.EnvOrDefaultBool("COCKROACH_ENABLE_MULTIPLEXING_RANGEFEED", true)
// RangeFeed divides a RangeFeed request on range boundaries and establishes a
// RangeFeed to each of the individual ranges. It streams back results on the
// provided channel.
//
// Note that the timestamps in RangeFeedCheckpoint events that are streamed back
// may be lower than the timestamp given here.
//
// Rangefeeds do not support inline (unversioned) values, and may omit them or
// error on them. Similarly, rangefeeds will error if MVCC history is mutated
// via e.g. ClearRange. Do not use rangefeeds across such key spans.
//
// NB: the given startAfter timestamp is exclusive, i.e. the first possible
// emitted event (including catchup scans) will be at startAfter.Next().
func (ds *DistSender) RangeFeed(
ctx context.Context,
spans []roachpb.Span,
startAfter hlc.Timestamp, // exclusive
eventCh chan<- RangeFeedMessage,
opts ...RangeFeedOption,
) error {
timedSpans := make([]SpanTimePair, 0, len(spans))
for _, sp := range spans {
timedSpans = append(timedSpans, SpanTimePair{
Span: sp,
StartAfter: startAfter,
})
}
return ds.RangeFeedSpans(ctx, timedSpans, eventCh, opts...)
}
// SpanTimePair is a pair of span along with its starting time. The starting
// time is exclusive, i.e. the first possible emitted event (including catchup
// scans) will be at startAfter.Next().
type SpanTimePair struct {
Span roachpb.Span
StartAfter hlc.Timestamp // exclusive
}
func (p SpanTimePair) String() string {
return fmt.Sprintf("%s@%s", p.Span, p.StartAfter)
}
// RangeFeedSpans is similar to RangeFeed but allows specification of different
// starting time for each span.
func (ds *DistSender) RangeFeedSpans(
ctx context.Context,
spans []SpanTimePair,
eventCh chan<- RangeFeedMessage,
opts ...RangeFeedOption,
) error {
if len(spans) == 0 {
return errors.AssertionFailedf("expected at least 1 span, got none")
}
var cfg rangeFeedConfig
for _, opt := range opts {
opt.set(&cfg)
}
metrics := &ds.metrics.DistSenderRangeFeedMetrics
if cfg.knobs.metrics != nil {
metrics = cfg.knobs.metrics
}
ctx = ds.AnnotateCtx(ctx)
ctx, sp := tracing.EnsureChildSpan(ctx, ds.AmbientContext.Tracer, "dist sender")
defer sp.Finish()
rr := newRangeFeedRegistry(ctx, cfg.withDiff)
ds.activeRangeFeeds.Store(rr, nil)
defer ds.activeRangeFeeds.Delete(rr)
catchupSem := limit.MakeConcurrentRequestLimiter(
"distSenderCatchupLimit", maxConcurrentCatchupScans(&ds.st.SV))
if ds.st.Version.IsActive(ctx, clusterversion.TODODelete_V22_2RangefeedUseOneStreamPerNode) &&
enableMuxRangeFeed && cfg.useMuxRangeFeed {
return muxRangeFeed(ctx, cfg, spans, ds, rr, &catchupSem, eventCh)
}
// Goroutine that processes subdivided ranges and creates a rangefeed for
// each.
g := ctxgroup.WithContext(ctx)
rangeCh := make(chan singleRangeInfo, 16)
g.GoCtx(func(ctx context.Context) error {
for {
select {
case sri := <-rangeCh:
// Spawn a child goroutine to process this feed.
g.GoCtx(func(ctx context.Context) error {
return ds.partialRangeFeed(ctx, rr, sri.rs, sri.startAfter,
sri.token, &catchupSem, rangeCh, eventCh, cfg, metrics)
})
case <-ctx.Done():
return ctx.Err()
}
}
})
if cfg.withLaggingRangesUpdate != nil {
g.GoCtx(func(ctx context.Context) error {
return ds.monitorLaggingRanges(ctx, rr, cfg.withLaggingRangesUpdate)
})
}
// Kick off the initial set of ranges.
divideAllSpansOnRangeBoundaries(spans, sendSingleRangeInfo(rangeCh), ds, &g)
return g.Wait()
}
func (ds *DistSender) monitorLaggingRanges(
ctx context.Context, rr *rangeFeedRegistry, updateLaggingRanges func(int64),
) error {
// If we are getting shut down, we should reset this metric.
defer func() {
updateLaggingRanges(0)
}()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(LaggingRangesCheckFrequency.Get(&ds.st.SV)):
count := int64(0)
thresholdTS := timeutil.Now().Add(-1 * LaggingRangesThreshold.Get(&ds.st.SV))
i := 0
if err := rr.ForEachPartialRangefeed(func(rfCtx RangeFeedContext, feed PartialRangeFeed) error {
// The resolved timestamp of a range determines the timestamp which is caught up to.
// However, during catchup scans, this is not set. For catchup scans,
// we consider the time the partial rangefeed was created to be its starting time.
ts := hlc.Timestamp{WallTime: feed.CreatedTime.UnixNano()}
if !feed.Resolved.EqOrdering(hlc.Timestamp{}) {
ts = feed.Resolved
}
i += 1
if ts.Less(hlc.Timestamp{WallTime: thresholdTS.UnixNano()}) {
count += 1
}
return nil
}, true); err != nil {
return err
}
updateLaggingRanges(count)
}
}
}
// divideAllSpansOnRangeBoundaries divides all spans on range boundaries and invokes
// provided onRange function for each range. Resolution happens concurrently using provided
// context group.
func divideAllSpansOnRangeBoundaries(
spans []SpanTimePair, onRange onRangeFn, ds *DistSender, g *ctxgroup.Group,
) {
// Sort input spans based on their start time -- older spans first.
// Starting rangefeed over large number of spans is an expensive proposition,
// since this involves initiating catch-up scan operation for each span. These
// operations are throttled (both on the client and on the server). Thus, it
// is possible that only some portion of the spans will make it past catch-up
// phase. If the caller maintains checkpoint, and then restarts rangefeed
// (for any reason), then we will restart against the same list of spans --
// but this time, we'll begin with the spans that might be substantially ahead
// of the rest of the spans. We simply sort input spans so that the oldest
// spans get a chance to complete their catch-up scan.
sort.Slice(spans, func(i, j int) bool {
return spans[i].StartAfter.Less(spans[j].StartAfter)
})
for _, s := range spans {
func(stp SpanTimePair) {
g.GoCtx(func(ctx context.Context) error {
rs, err := keys.SpanAddr(stp.Span)
if err != nil {
return err
}
return divideSpanOnRangeBoundaries(ctx, ds, rs, stp.StartAfter, onRange)
})
}(s)
}
}
// RangeFeedContext is the structure containing arguments passed to
// RangeFeed call. It functions as a kind of key for an active range feed.
type RangeFeedContext struct {
ID int64 // unique ID identifying range feed.
CtxTags string // context tags
// WithDiff options passed to RangeFeed call. StartFrom hlc.Timestamp
WithDiff bool
}
// PartialRangeFeed structure describes the state of currently executing partial range feed.
type PartialRangeFeed struct {
Span roachpb.Span
StartAfter hlc.Timestamp // exclusive
NodeID roachpb.NodeID
RangeID roachpb.RangeID
CreatedTime time.Time
LastValueReceived time.Time
Resolved hlc.Timestamp
NumErrs int
LastErr error
}
// ActiveRangeFeedIterFn is an iterator function which is passed PartialRangeFeed structure.
// Iterator function may return an iterutil.StopIteration sentinel error to stop iteration
// early.
type ActiveRangeFeedIterFn func(rfCtx RangeFeedContext, feed PartialRangeFeed) error
const continueIter = true
const stopIter = false
// ForEachActiveRangeFeed invokes provided function for each active rangefeed.
func (ds *DistSender) ForEachActiveRangeFeed(fn ActiveRangeFeedIterFn) (iterErr error) {
ds.activeRangeFeeds.Range(func(k, v interface{}) bool {
r := k.(*rangeFeedRegistry)
iterErr = r.ForEachPartialRangefeed(fn, false)
return iterErr == nil
})
return iterutil.Map(iterErr)
}
// ForEachPartialRangefeed invokes provided function for each partial rangefeed. Use manageIterationErrs
// if the fn uses iterutil.StopIteration to stop iteration.
func (r *rangeFeedRegistry) ForEachPartialRangefeed(
fn ActiveRangeFeedIterFn, manageIterationErrs bool,
) (iterErr error) {
partialRangeFeed := func(active *activeRangeFeed) PartialRangeFeed {
active.Lock()
defer active.Unlock()
return active.PartialRangeFeed
}
r.ranges.Range(func(k, v interface{}) bool {
active := k.(*activeRangeFeed)
if err := fn(r.RangeFeedContext, partialRangeFeed(active)); err != nil {
iterErr = err
return stopIter
}
return continueIter
})
if manageIterationErrs {
return iterutil.Map(iterErr)
}
return iterErr
}
// activeRangeFeed is a thread safe PartialRangeFeed.
type activeRangeFeed struct {
release func()
syncutil.Mutex
PartialRangeFeed
}
func (a *activeRangeFeed) onRangeEvent(
nodeID roachpb.NodeID, rangeID roachpb.RangeID, event *kvpb.RangeFeedEvent,
) {
a.Lock()
defer a.Unlock()
if event.Val != nil || event.SST != nil {
a.LastValueReceived = timeutil.Now()
} else if event.Checkpoint != nil {
a.Resolved = event.Checkpoint.ResolvedTS
}
a.NodeID = nodeID
a.RangeID = rangeID
}
func (a *activeRangeFeed) setLastError(err error) {
now := timeutil.Now()
a.Lock()
defer a.Unlock()
a.LastErr = errors.Wrapf(err, "disconnect at %s: checkpoint %s/-%s",
redact.SafeString(now.Format(time.RFC3339)), a.Resolved, now.Sub(a.Resolved.GoTime()))
a.NumErrs++
}
// rangeFeedRegistry is responsible for keeping track of currently executing
// range feeds.
type rangeFeedRegistry struct {
RangeFeedContext
ranges sync.Map // map[*activeRangeFeed]nil
}
func newRangeFeedRegistry(ctx context.Context, withDiff bool) *rangeFeedRegistry {
rr := &rangeFeedRegistry{
RangeFeedContext: RangeFeedContext{WithDiff: withDiff},
}
rr.ID = *(*int64)(unsafe.Pointer(&rr))
if b := logtags.FromContext(ctx); b != nil {
rr.CtxTags = b.String()
}
return rr
}
func sendSingleRangeInfo(rangeCh chan<- singleRangeInfo) onRangeFn {
return func(ctx context.Context, rs roachpb.RSpan, startAfter hlc.Timestamp, token rangecache.EvictionToken) error {
select {
case rangeCh <- singleRangeInfo{
rs: rs,
startAfter: startAfter,
token: token,
}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
type onRangeFn func(
ctx context.Context, rs roachpb.RSpan, startAfter hlc.Timestamp, token rangecache.EvictionToken,
) error
func divideSpanOnRangeBoundaries(
ctx context.Context,
ds *DistSender,
rs roachpb.RSpan,
startAfter hlc.Timestamp,
onRange onRangeFn,
) error {
// As RangeIterator iterates, it can return overlapping descriptors (and
// during splits, this happens frequently), but divideAndSendRangeFeedToRanges
// intends to split up the input into non-overlapping spans aligned to range
// boundaries. So, as we go, keep track of the remaining uncovered part of
// `rs` in `nextRS`.
nextRS := rs
ri := MakeRangeIterator(ds)
for ri.Seek(ctx, nextRS.Key, Ascending); ri.Valid(); ri.Next(ctx) {
desc := ri.Desc()
partialRS, err := nextRS.Intersect(desc.RSpan())
if err != nil {
return err
}
nextRS.Key = partialRS.EndKey
if err := onRange(ctx, partialRS, startAfter, ri.Token()); err != nil {
return err
}
if !ri.NeedAnother(nextRS) {
break
}
}
return ri.Error()
}
// newActiveRangeFeed registers active rangefeed with rangefeedRegistry.
// The caller must call active.release() in order to cleanup.
func newActiveRangeFeed(
span roachpb.Span, startAfter hlc.Timestamp, rr *rangeFeedRegistry, c *metric.Gauge,
) *activeRangeFeed {
// Register partial range feed with registry.
active := &activeRangeFeed{
PartialRangeFeed: PartialRangeFeed{
Span: span,
StartAfter: startAfter,
CreatedTime: timeutil.Now(),
},
}
active.release = func() {
rr.ranges.Delete(active)
c.Dec(1)
}
rr.ranges.Store(active, nil)
c.Inc(1)
return active
}
// partialRangeFeed establishes a RangeFeed to the range specified by desc. It
// manages lifecycle events of the range in order to maintain the RangeFeed
// connection; this may involve instructing higher-level functions to retry
// this rangefeed, or subdividing the range further in the event of a split.
func (ds *DistSender) partialRangeFeed(
ctx context.Context,
rr *rangeFeedRegistry,
rs roachpb.RSpan,
startAfter hlc.Timestamp,
token rangecache.EvictionToken,
catchupSem *limit.ConcurrentRequestLimiter,
rangeCh chan<- singleRangeInfo,
eventCh chan<- RangeFeedMessage,
cfg rangeFeedConfig,
metrics *DistSenderRangeFeedMetrics,
) error {
// Bound the partial rangefeed to the partial span.
span := rs.AsRawSpanWithNoLocals()
// Register partial range feed with registry.
active := newActiveRangeFeed(span, startAfter, rr, metrics.RangefeedRanges)
defer active.release()
// Start a retry loop for sending the batch to the range.
for r := retry.StartWithCtx(ctx, ds.rpcRetryOptions); r.Next(); {
// If we've cleared the descriptor on a send failure, re-lookup.
if !token.Valid() {
var err error
ri, err := ds.getRoutingInfo(ctx, rs.Key, rangecache.EvictionToken{}, false)
if err != nil {
log.VErrEventf(ctx, 1, "range descriptor re-lookup failed: %s", err)
if !rangecache.IsRangeLookupErrorRetryable(err) {
return err
}
continue
}
token = ri
}
// Establish a RangeFeed for a single Range.
if log.V(1) {
log.Infof(ctx, "RangeFeed starting for range %d@%s (%s)", token.Desc().RangeID, startAfter, span)
}
maxTS, err := ds.singleRangeFeed(
ctx, span, startAfter, token.Desc(),
catchupSem, eventCh, active.onRangeEvent, cfg, metrics)
// Forward the timestamp in case we end up sending it again.
startAfter.Forward(maxTS)
if log.V(1) {
log.Infof(ctx, "RangeFeed %s@%s disconnected with last checkpoint %s ago: %v",
active.Span, active.StartAfter, timeutil.Since(active.Resolved.GoTime()), err)
}
active.setLastError(err)
errInfo, err := handleRangefeedError(ctx, err)
if err != nil {
return err
}
metrics.RangefeedRestartRanges.Inc(1)
if errInfo.evict {
token.Evict(ctx)
token = rangecache.EvictionToken{}
}
if errInfo.resolveSpan {
return divideSpanOnRangeBoundaries(ctx, ds, rs, startAfter, sendSingleRangeInfo(rangeCh))
}
}
return ctx.Err()
}
type rangefeedErrorInfo struct {
resolveSpan bool // true if the span resolution needs to be performed, and rangefeed restarted.
evict bool // true if routing info needs to be updated prior to retry.
}
// handleRangefeedError handles an error that occurred while running rangefeed.
// Returns rangefeedErrorInfo describing how the error should be handled for the
// range. Returns an error if the entire rangefeed should terminate.
func handleRangefeedError(ctx context.Context, err error) (rangefeedErrorInfo, error) {
if err == nil {
return rangefeedErrorInfo{}, nil
}
switch {
case errors.Is(err, io.EOF):
// If we got an EOF, treat it as a signal to restart single range feed.
return rangefeedErrorInfo{}, nil
case errors.HasType(err, (*kvpb.StoreNotFoundError)(nil)) ||
errors.HasType(err, (*kvpb.NodeUnavailableError)(nil)):
// These errors are likely to be unique to the replica that
// reported them, so no action is required before the next
// retry.
return rangefeedErrorInfo{}, nil
case errors.Is(err, errRestartStuckRange):
// Stuck ranges indicate a bug somewhere in the system. We are being
// defensive and attempt to restart this rangefeed. Usually, any
// stuck-ness is cleared out if we just attempt to re-resolve range
// descriptor and retry.
//
// The error contains the replica which we were waiting for.
log.Warningf(ctx, "restarting stuck rangefeed: %s", err)
return rangefeedErrorInfo{evict: true}, nil
case IsSendError(err), errors.HasType(err, (*kvpb.RangeNotFoundError)(nil)):
return rangefeedErrorInfo{evict: true}, nil
case errors.HasType(err, (*kvpb.RangeKeyMismatchError)(nil)):
return rangefeedErrorInfo{evict: true, resolveSpan: true}, nil
case errors.HasType(err, (*kvpb.RangeFeedRetryError)(nil)):
var t *kvpb.RangeFeedRetryError
if ok := errors.As(err, &t); !ok {
return rangefeedErrorInfo{}, errors.AssertionFailedf("wrong error type: %T", err)
}
switch t.Reason {
case kvpb.RangeFeedRetryError_REASON_REPLICA_REMOVED,
kvpb.RangeFeedRetryError_REASON_RAFT_SNAPSHOT,
kvpb.RangeFeedRetryError_REASON_LOGICAL_OPS_MISSING,
kvpb.RangeFeedRetryError_REASON_SLOW_CONSUMER,
kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED:
// Try again with same descriptor. These are transient
// errors that should not show up again.
return rangefeedErrorInfo{}, nil
case kvpb.RangeFeedRetryError_REASON_RANGE_SPLIT,
kvpb.RangeFeedRetryError_REASON_RANGE_MERGED,
kvpb.RangeFeedRetryError_REASON_NO_LEASEHOLDER:
return rangefeedErrorInfo{evict: true, resolveSpan: true}, nil
default:
return rangefeedErrorInfo{}, errors.AssertionFailedf("unrecognized retryable error type: %T", err)
}
default:
return rangefeedErrorInfo{}, err
}
}
// catchup alloc is a catchup scan allocation.
type catchupAlloc func()
// Release implements limit.Reservation
func (a catchupAlloc) Release() {
a()
}
func acquireCatchupScanQuota(
ctx context.Context,
sv *settings.Values,
catchupSem *limit.ConcurrentRequestLimiter,
metrics *DistSenderRangeFeedMetrics,
) (catchupAlloc, error) {
// Indicate catchup scan is starting; Before potentially blocking on a semaphore, take
// opportunity to update semaphore limit.
catchupSem.SetLimit(maxConcurrentCatchupScans(sv))
res, err := catchupSem.Begin(ctx)
if err != nil {
return nil, err
}
metrics.RangefeedCatchupRanges.Inc(1)
return func() {
metrics.RangefeedCatchupRanges.Dec(1)
res.Release()
}, nil
}
// nweTransportForRange returns Transport for the specified range descriptor.
func newTransportForRange(
ctx context.Context, desc *roachpb.RangeDescriptor, ds *DistSender,
) (Transport, error) {
var latencyFn LatencyFunc
if ds.rpcContext != nil {
latencyFn = ds.rpcContext.RemoteClocks.Latency
}
replicas, err := NewReplicaSlice(ctx, ds.nodeDescs, desc, nil, AllExtantReplicas)
if err != nil {
return nil, err
}
replicas.OptimizeReplicaOrder(ds.getNodeID(), latencyFn, ds.locality)
opts := SendOptions{class: connectionClass(&ds.st.SV)}
return ds.transportFactory(opts, ds.nodeDialer, replicas)
}
// onRangeEventCb is invoked for each non-error range event.
// nodeID identifies the node ID which generated the event.
type onRangeEventCb func(nodeID roachpb.NodeID, rangeID roachpb.RangeID, event *kvpb.RangeFeedEvent)
// makeRangeFeedRequest constructs kvpb.RangeFeedRequest for specified span and
// rangeID. Request is constructed to watch event after specified timestamp, and
// with optional diff. If the request corresponds to a system range, request
// receives higher admission priority.
func makeRangeFeedRequest(
span roachpb.Span,
rangeID roachpb.RangeID,
isSystemRange bool,
startAfter hlc.Timestamp,
withDiff bool,
) kvpb.RangeFeedRequest {
admissionPri := admissionpb.BulkNormalPri
if isSystemRange {
admissionPri = admissionpb.NormalPri
}
return kvpb.RangeFeedRequest{
Span: span,
Header: kvpb.Header{
Timestamp: startAfter,
RangeID: rangeID,
},
WithDiff: withDiff,
AdmissionHeader: kvpb.AdmissionHeader{
// NB: AdmissionHeader is used only at the start of the range feed
// stream since the initial catch-up scan is expensive.
Priority: int32(admissionPri),
CreateTime: timeutil.Now().UnixNano(),
Source: kvpb.AdmissionHeader_FROM_SQL,
NoMemoryReservedAtSource: true,
},
}
}
func defaultStuckRangeThreshold(st *cluster.Settings) func() time.Duration {
return func() time.Duration {
// Before the introduction of kv.rangefeed.range_stuck_threshold = 1m,
// clusters may already have kv.closed_timestamp.side_transport_interval or
// kv.rangefeed.closed_timestamp_refresh_interval set to >1m. This would
// cause rangefeeds to continually restart. We therefore conservatively use
// the highest value, with a 1.2 safety factor.
threshold := rangefeedRangeStuckThreshold.Get(&st.SV)
if threshold > 0 {
interval := kvserverbase.RangeFeedRefreshInterval.Get(&st.SV)
if i := closedts.SideTransportCloseInterval.Get(&st.SV); i > interval {
interval = i
}
if t := time.Duration(math.Round(1.2 * float64(interval))); t > threshold {
threshold = t
}
}
return threshold
}
}
// singleRangeFeed gathers and rearranges the replicas, and makes a RangeFeed
// RPC call. Results will be sent on the provided channel. Returns the timestamp
// of the maximum rangefeed checkpoint seen, which can be used to re-establish
// the rangefeed with a larger starting timestamp, reflecting the fact that all
// values up to the last checkpoint have already been observed. Returns the
// request's timestamp if not checkpoints are seen.
func (ds *DistSender) singleRangeFeed(
ctx context.Context,
span roachpb.Span,
startAfter hlc.Timestamp,
desc *roachpb.RangeDescriptor,
catchupSem *limit.ConcurrentRequestLimiter,
eventCh chan<- RangeFeedMessage,
onRangeEvent onRangeEventCb,
cfg rangeFeedConfig,
metrics *DistSenderRangeFeedMetrics,
) (_ hlc.Timestamp, retErr error) {
// Ensure context is cancelled on all errors, to prevent gRPC stream leaks.
ctx, cancelFeed := context.WithCancel(ctx)
defer func() {
if log.V(1) {
log.Infof(ctx, "singleRangeFeed terminating with err=%v", retErr)
}
cancelFeed()
}()
args := makeRangeFeedRequest(span, desc.RangeID, cfg.overSystemTable, startAfter, cfg.withDiff)
transport, err := newTransportForRange(ctx, desc, ds)
if err != nil {
return args.Timestamp, err
}
defer transport.Release()
// Indicate catchup scan is starting; Before potentially blocking on a semaphore, take
// opportunity to update semaphore limit.
catchupRes, err := acquireCatchupScanQuota(ctx, &ds.st.SV, catchupSem, metrics)
if err != nil {
return hlc.Timestamp{}, err
}
finishCatchupScan := func() {
if catchupRes != nil {
catchupRes.Release()
catchupRes = nil
}
}
// cleanup catchup reservation in case of early termination.
defer finishCatchupScan()
stuckWatcher := newStuckRangeFeedCanceler(cancelFeed, defaultStuckRangeThreshold(ds.st))
defer stuckWatcher.stop()
var streamCleanup func()
maybeCleanupStream := func() {
if streamCleanup != nil {
streamCleanup()
streamCleanup = nil
}
}
defer maybeCleanupStream()
for {
stuckWatcher.stop() // if timer is running from previous iteration, stop it now
if transport.IsExhausted() {
return args.Timestamp, newSendError(errors.New("sending to all replicas failed"))
}
maybeCleanupStream()
args.Replica = transport.NextReplica()
client, err := transport.NextInternalClient(ctx)
if err != nil {
log.VErrEventf(ctx, 2, "RPC error: %s", err)
continue
}
log.VEventf(ctx, 3, "attempting to create a RangeFeed over replica %s", args.Replica)
ctx := ds.AnnotateCtx(ctx)
ctx = logtags.AddTag(ctx, "dest_n", args.Replica.NodeID)
ctx = logtags.AddTag(ctx, "dest_s", args.Replica.StoreID)
ctx = logtags.AddTag(ctx, "dest_r", args.RangeID)
ctx, restore := pprofutil.SetProfilerLabelsFromCtxTags(ctx)
streamCleanup = restore
stream, err := client.RangeFeed(ctx, &args)
if err != nil {
restore()
log.VErrEventf(ctx, 2, "RPC error: %s", err)
if grpcutil.IsAuthError(err) {
// Authentication or authorization error. Propagate.
return args.Timestamp, err
}
continue
}
var event *kvpb.RangeFeedEvent
for {
if err := stuckWatcher.do(func() (err error) {
event, err = stream.Recv()
return err
}); err != nil {
log.VErrEventf(ctx, 2, "RPC error: %s", err)
if stuckWatcher.stuck() {
afterCatchUpScan := catchupRes == nil
return args.Timestamp, handleStuckEvent(&args, afterCatchUpScan, stuckWatcher.threshold(), metrics)
}
return args.Timestamp, err
}
if cfg.knobs.onRangefeedEvent != nil {
skip, err := cfg.knobs.onRangefeedEvent(ctx, span, 0 /*streamID */, event)
if err != nil {
return args.Timestamp, err
}
if skip {
continue
}
}
msg := RangeFeedMessage{RangeFeedEvent: event, RegisteredSpan: span}
switch t := event.GetValue().(type) {
case *kvpb.RangeFeedCheckpoint:
if t.Span.Contains(args.Span) {
// If we see the first non-empty checkpoint, we know we're done with the catchup scan.
if !t.ResolvedTS.IsEmpty() && catchupRes != nil {
finishCatchupScan()
}
// Note that this timestamp means that all rows in the span with
// writes at or before the timestamp have now been seen. The
// Timestamp field in the request is exclusive, meaning if we send
// the request with exactly the ResolveTS, we'll see only rows after
// that timestamp.
args.Timestamp.Forward(t.ResolvedTS)
}
case *kvpb.RangeFeedError:
log.VErrEventf(ctx, 2, "RangeFeedError: %s", t.Error.GoError())
if catchupRes != nil {
metrics.RangefeedErrorCatchup.Inc(1)
}
if stuckWatcher.stuck() {
// When the stuck watcher fired, and the rangefeed call is local,
// the remote might notice the cancellation first and return from
// Recv with an error that we need to special-case here.
afterCatchUpScan := catchupRes == nil
return args.Timestamp, handleStuckEvent(&args, afterCatchUpScan, stuckWatcher.threshold(), metrics)
}
return args.Timestamp, t.Error.GoError()
}
onRangeEvent(args.Replica.NodeID, desc.RangeID, event)
select {
case eventCh <- msg:
case <-ctx.Done():
return args.Timestamp, ctx.Err()
}
}
}
}
func connectionClass(sv *settings.Values) rpc.ConnectionClass {
if useDedicatedRangefeedConnectionClass.Get(sv) {
return rpc.RangefeedClass
}
return rpc.DefaultClass
}
func handleStuckEvent(
args *kvpb.RangeFeedRequest,
afterCatchupScan bool,
threshold time.Duration,
m *DistSenderRangeFeedMetrics,
) error {
m.RangefeedRestartStuck.Inc(1)
if afterCatchupScan {
telemetry.Count("rangefeed.stuck.after-catchup-scan")
} else {
telemetry.Count("rangefeed.stuck.during-catchup-scan")
}
return errors.Wrapf(errRestartStuckRange, "waiting for r%d %s [threshold %s]", args.RangeID, args.Replica, threshold)
}
// sentinel error returned when cancelling rangefeed when it is stuck.
var errRestartStuckRange = errors.New("rangefeed restarting due to inactivity")
// TestingWithOnRangefeedEvent returns a test only option to modify rangefeed event.
func TestingWithOnRangefeedEvent(
fn func(ctx context.Context, s roachpb.Span, streamID int64, event *kvpb.RangeFeedEvent) (skip bool, _ error),
) RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.knobs.onRangefeedEvent = fn
})
}
// TestingWithRangeFeedMetrics returns a test only option to specify metrics to
// use while executing this rangefeed.
func TestingWithRangeFeedMetrics(m *DistSenderRangeFeedMetrics) RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.knobs.metrics = m
})
}
// TestingWithMuxRangeFeedRequestSenderCapture returns a test only option to specify a callback
// that will be invoked when mux establishes connection to a node.
func TestingWithMuxRangeFeedRequestSenderCapture(
fn func(nodeID roachpb.NodeID, capture func(request *kvpb.RangeFeedRequest) error),
) RangeFeedOption {
return optionFunc(func(c *rangeFeedConfig) {
c.knobs.captureMuxRangeFeedRequestSender = fn
})
}
// TestingMakeRangeFeedMetrics exposes makeDistSenderRangeFeedMetrics for test use.
var TestingMakeRangeFeedMetrics = makeDistSenderRangeFeedMetrics