Skip to content

Commit

Permalink
storage: flow control throttling replica operations
Browse files Browse the repository at this point in the history
Repurposing #13869.

The leader maintains a pool of "proposal quota". Before proposing a Raft
command, we acquire 1 unit of proposal quota. When all of the healthy
followers have committed an entry, that unit of proposal quota is
returned to the pool. The proposal quota pool size is hard coded to 1000
which allows fairly deep pipelining of Raft commands.

We only consider followers that have "healthy" RPC connections when
determining if a unit of quota should be returned to the pool.
  • Loading branch information
irfansharif committed May 15, 2017
1 parent 9785785 commit 188b45d
Show file tree
Hide file tree
Showing 4 changed files with 383 additions and 2 deletions.
116 changes: 116 additions & 0 deletions pkg/storage/client_raft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1678,6 +1678,122 @@ func TestReplicateRemoveAndAdd(t *testing.T) {
testReplicaAddRemove(t, false)
}

// TestQuotaPool verifies that writes get throttled in the case where we have
// two fast moving replicas with sufficiently fast growing raft logs and a
// slower replica catching up. By throttling write throughput we avoid having
// to constantly catch up the slower node via snapshots. See #8659.
func TestQuotaPool(t *testing.T) {
defer leaktest.AfterTest(t)()

const quota = 1
const numReplicas = 3
const rangeID = 1
sc := storage.TestStoreConfig(nil)
// Suppress timeout-based elections to avoid leadership changes in ways
// this test doesn't expect.
sc.RaftElectionTimeoutTicks = 100000
mtc := &multiTestContext{storeConfig: &sc}
mtc.Start(t, numReplicas)
defer mtc.Stop()

mtc.replicateRange(rangeID, 1, 2)

// Log truncation requests generate raft log entries and consequently acquire
// quota. To deterministically simulate a fixed number of quota
// acquisitions we deactivate the raft log queue on each replica.
for _, store := range mtc.stores {
store.SetRaftLogQueueActive(false)
}

// Heartbeats (for node liveness) generate raft log entries and
// consequently acquire quota. To deterministically simulate a fixed number
// of quota acquisitions we pause heartbeats on each replica.
for _, nl := range mtc.nodeLivenesses {
nl.PauseHeartbeat(true)
}

leaderRepl := mtc.getRaftLeader(rangeID)
leaderRepl.SetQuotaPool(quota)

followerRepl := func() *storage.Replica {
for _, store := range mtc.stores {
repl, err := store.GetReplica(rangeID)
if err != nil {
t.Fatal(err)
}
if repl == leaderRepl {
continue
}
return repl
}
return nil
}()
if followerRepl == nil {
t.Fatal("could not get a handle on a follower replica")
}

followerDesc, err := followerRepl.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}

// NB: See TestRaftBlockedReplica/#9914 for why we use a separate goroutine.
// We block the third replica.
var wg sync.WaitGroup
wg.Add(1)
go func() {
followerRepl.RaftLock()
wg.Done()
}()
wg.Wait()

// We can write up to 'quota' number of keys before writes get throttled.
// We verify this by writing this many keys and ensuring the next write is
// blocked.
//
// NB: This can block if some other moving part of the system gets a
// proposal in. At the time of writing the only moving parts are the node
// liveness heartbeats and raft log truncations, both of which are disabled
// for the purposes of this test.
//
// TODO(irfansharif): Once we move to quota acquisitions based on the size
// (in bytes) of the generated raft log entry this will have to be
// revisited.
incArgs := incrementArgs([]byte("k"), 1)
for i := 0; i < quota; i++ {
if _, err := client.SendWrapped(context.Background(), leaderRepl, incArgs); err != nil {
t.Fatal(err)
}
}

ch := make(chan error, 1)
go func() {
defer close(ch)
_, err := client.SendWrapped(context.Background(), leaderRepl, incArgs)
ch <- errors.New(err.String())
}()

select {
case err := <-ch:
t.Fatalf("write not throttled by the quota pool: err=%v", err)
case <-time.After(15 * time.Millisecond):
}

expected := []int64{quota, quota, quota}
expected[followerDesc.ReplicaID-1] = 0
mtc.waitForValues(roachpb.Key("k"), expected)

followerRepl.RaftUnlock()

mtc.waitForValues(roachpb.Key("k"), []int64{quota + 1, quota + 1, quota + 1})

select {
case <-ch:
case <-time.After(15 * time.Millisecond):
t.Fatal(errors.New("throttled write not unblocked"))
}
}

// TestRaftHeartbeats verifies that coalesced heartbeats are correctly
// suppressing elections in an idle cluster.
func TestRaftHeartbeats(t *testing.T) {
Expand Down
13 changes: 13 additions & 0 deletions pkg/storage/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,19 @@ func (r *Replica) GetLease() (roachpb.Lease, *roachpb.Lease) {
return r.getLease()
}

// SetQuotaPool allows the caller to set a replica's quota pool initialized to
// a given quota. Only safe to call on the leader replica.
func (r *Replica) SetQuotaPool(quota int64) {
r.mu.Lock()
defer r.mu.Unlock()

r.mu.proposalQuotaBaseIndex = r.mu.lastIndex
if r.mu.proposalQuota != nil {
r.mu.proposalQuota.close()
}
r.mu.proposalQuota = newQuotaPool(quota)
}

// GetTimestampCacheLowWater returns the timestamp cache low water mark.
func (r *Replica) GetTimestampCacheLowWater() hlc.Timestamp {
r.store.tsCacheMu.Lock()
Expand Down
126 changes: 126 additions & 0 deletions pkg/storage/quota_pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Irfan Sharif ([email protected])
//
// The code below is a simplified version of a similar structure found in
// grpc-go (github.com/grpc/grpc-go/blob/b2fae0c/transport/control.go).

/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

package storage

import (
"errors"

"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"golang.org/x/net/context"
)

type quotaPool struct {
syncutil.Mutex

// We use a channel to 'park' our quota value for easier composition with
// context cancellation and leadership changes (see quotaPool.acquire).
//
// NB: A value of '0' is never allowed to be parked in the
// channel, the lack of quota is represented by an empty channel. Quota
// additions push a value into the channel whereas acquisitions wait on the
// channel itself.
quota chan int64
done chan struct{}
}

// newQuotaPool returns a new quota pool initialized with a given quota,
// newQuotaPool(0) disallowed.
func newQuotaPool(q int64) *quotaPool {
qp := &quotaPool{
quota: make(chan int64, 1),
}
qp.quota <- q
return qp
}

// add adds the specified quota back to the pool. Safe for concurrent use.
func (qp *quotaPool) add(v int64) {
if v == 0 {
return
}

qp.Lock()
select {
case q := <-qp.quota:
v += q
default:
}
qp.quota <- v
qp.Unlock()
}

// acquire acquires a single unit of quota from the pool. On success, nil is
// returned and the caller must call add(1) or otherwise arrange for the quota
// to be returned to the pool. Safe for concurrent use.
func (qp *quotaPool) acquire(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case q := <-qp.quota:
if q > 1 {
qp.add(q - 1)
}
return nil
case <-qp.done:
return errors.New("quota pool no longer in use")
}
}

func (qp *quotaPool) close() {
qp.Lock()
if qp.done != nil {
close(qp.done)
qp.done = nil
}
qp.Unlock()
}
Loading

0 comments on commit 188b45d

Please sign in to comment.