Skip to content

Commit

Permalink
sqlccl: rate limit presplitRanges in RESTORE
Browse files Browse the repository at this point in the history
On clusters of more than 1 node, dumping all 16000 splits at once (more
or less) would consistenly get the cluster stuck in some state it never
got out of. Given that our normal codepaths probably weren't designed
with this sort of abuse in mind, spread the splits out a bit.

For cockroachdb#14792.
  • Loading branch information
danhhz committed Apr 11, 2017
1 parent 19a8486 commit 4f92ff4
Showing 1 changed file with 13 additions and 0 deletions.
13 changes: 13 additions & 0 deletions pkg/ccl/sqlccl/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"

"github.com/cockroachdb/cockroach/pkg/ccl/storageccl"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
Expand Down Expand Up @@ -479,14 +480,26 @@ func presplitRanges(baseCtx context.Context, db client.DB, input []roachpb.Key)

ctx, span := tracing.ChildSpan(baseCtx, "presplitRanges")
defer tracing.FinishSpan(span)
log.Infof(ctx, "presplitting %d ranges", len(input))

if len(input) == 0 {
return nil
}

// 100 was picked because it's small enough to work with on a 3-node cluster
// on my laptop and large enough that it only takes a couple minutes to
// presplit for a ~16000 range dataset.
// TODO(dan): See if there's some better solution #14798.
const splitsPerSecond, splitsBurst = 100, 1
limiter := rate.NewLimiter(splitsPerSecond, splitsBurst)

g, ctx := errgroup.WithContext(ctx)
var splitFn func([]roachpb.Key) error
splitFn = func(splitPoints []roachpb.Key) error {
if err := limiter.Wait(ctx); err != nil {
return err
}

// Pick the index such that it's 0 if len(splitPoints) == 1.
splitIdx := len(splitPoints) / 2
// AdminSplit requires that the key be a valid table key, which means
Expand Down

0 comments on commit 4f92ff4

Please sign in to comment.