Skip to content

Commit

Permalink
storage: exit process when disks are stalled
Browse files Browse the repository at this point in the history
We've had two recent incidents in which we saw clusters with disks
stalled  on a subset of nodes in the cluster. This is a fairly
treacherous failure mode since

- the symptoms are nondescript: from the UI it often looks like a Raft
problem, logspy will freeze up as well, and so you waste some time until
you end up looking at the goroutine dump and notice the writes stuck in
syscall
- the node is in some semi-live state that borders the byzantine and
can cause further trouble for the part of the cluster that isn't
affected (we have some mitigations against this in place but not
enough, and need to improve our defense mechanisms).
- it's sudden and often can't be gleaned from the logs (since everything
is fine and then nothing ever completes so no "alertable" metrics are
emitted).

This commit introduces a simple mechanism that periodically checks for
these conditions (both on the engines and logging) and invokes a fatal
error if necessary.

The accompanying roachtest exercises both a data and a logging disk
stall.

Fixes #7882.
Fixes #32736.

Touches #7646.

Release note (bug fix): CockroachDB will error with a fatal exit when
data or logging partitions become unresponsive. Previously, the process
would remain running, though in an unresponsive state.
  • Loading branch information
tbg committed Jan 3, 2019
1 parent 4f701b9 commit d591a49
Show file tree
Hide file tree
Showing 11 changed files with 273 additions and 202 deletions.
3 changes: 1 addition & 2 deletions pkg/cli/interactive_tests/test_missing_log_output.tcl
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ start_test "Check that a broken log file prints a message to stderr."
system "mkdir -p logs"
system "touch logs/broken"
send "$argv start -s=path=logs/db --log-dir=logs/broken --insecure --logtostderr\r"
eexpect "log: exiting because of error: log: cannot create log: open"
eexpect "not a directory"
eexpect "unable to create log directory"
eexpect ":/# "
end_test

Expand Down
14 changes: 9 additions & 5 deletions pkg/cli/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -1004,6 +1004,11 @@ func setupAndInitializeLoggingAndProfiling(ctx context.Context) (*stop.Stopper,
}
}

// Make sure the path exists.
if err := os.MkdirAll(logDir, 0755); err != nil {
return nil, errors.Wrap(err, "unable to create log directory")
}

// Note that we configured the --log-dir flag to set
// startContext.logDir. This is the point at which we set log-dir for the
// util/log package. We don't want to set it earlier to avoid spuriously
Expand All @@ -1014,11 +1019,10 @@ func setupAndInitializeLoggingAndProfiling(ctx context.Context) (*stop.Stopper,
return nil, err
}

// Make sure the path exists.
if err := os.MkdirAll(logDir, 0755); err != nil {
return nil, err
}
log.Eventf(ctx, "created log directory %s", logDir)
// NB: this message is a crutch until #33458 is addressed. Without it,
// the calls to log.Shout below can be the first use of logging, hitting
// the bug described in the issue.
log.Infof(ctx, "logging to directory %s", logDir)

// Start the log file GC daemon to remove files that make the log
// directory too large.
Expand Down
141 changes: 141 additions & 0 deletions pkg/cmd/roachtest/disk_stall.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
// Copyright 2018 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.

package main

import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"strings"
"time"

"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)

func registerDiskStalledDetection(r *registry) {
for _, affectsLogDir := range []bool{false, true} {
for _, affectsDataDir := range []bool{false, true} {
// Grab copies of the args because we'll pass them into a closure.
// Everyone's favorite bug to write in Go.
affectsLogDir := affectsLogDir
affectsDataDir := affectsDataDir
r.Add(testSpec{
Name: fmt.Sprintf(
"disk-stalled/log=%t,data=%t",
affectsLogDir, affectsDataDir,
),
MinVersion: `v2.2.0`,
Nodes: nodes(1),
Run: func(ctx context.Context, t *test, c *cluster) {
runDiskStalledDetection(ctx, t, c, affectsLogDir, affectsDataDir)
},
})
}
}
}

func runDiskStalledDetection(
ctx context.Context, t *test, c *cluster, affectsLogDir bool, affectsDataDir bool,
) {
n := c.Node(1)
tmpDir, err := ioutil.TempDir("", "stalled")
if err != nil {
t.Fatal(err)
}
defer func() {
_ = os.RemoveAll(tmpDir)
}()

c.Put(ctx, cockroach, "./cockroach")
c.Run(ctx, n, "sudo umount -f {store-dir}/faulty || true")
c.Run(ctx, n, "mkdir -p {store-dir}/{real,faulty} || true")
// Make sure the actual logs are downloaded as artifacts.
c.Run(ctx, n, "rm -f logs/real && ln -s {store-dir}/real/logs logs/real || true")

t.Status("setting up charybdefs")

if err := execCmd(ctx, t.l, roachprod, "install", c.makeNodes(n), "charybdefs"); err != nil {
t.Fatal(err)
}
c.Run(ctx, n, "sudo charybdefs {store-dir}/faulty -oallow_other,modules=subdir,subdir={store-dir}/real && chmod 777 {store-dir}/{real,faulty}")
l, err := t.l.ChildLogger("cockroach")
if err != nil {
t.Fatal(err)
}
type result struct {
err error
out string
}
errCh := make(chan result)

// NB: charybdefs' delay nemesis introduces 50ms per syscall. It would
// be nicer to introduce a longer delay, but this works.
tooShortSync := 40 * time.Millisecond

maxLogSync := time.Hour
logDir := "real/logs"
if affectsLogDir {
logDir = "faulty/logs"
maxLogSync = tooShortSync
}
maxDataSync := time.Hour
dataDir := "real"
if affectsDataDir {
maxDataSync = tooShortSync
dataDir = "faulty"
}

tStarted := timeutil.Now()
dur := 10 * time.Minute
if !affectsDataDir && !affectsLogDir {
dur = 30 * time.Second
}

go func() {
t.WorkerStatus("running server")
out, err := c.RunWithBuffer(ctx, l, n,
fmt.Sprintf("timeout --signal 9 %ds env COCKROACH_ENGINE_MAX_SYNC_DURATION=%s COCKROACH_LOG_MAX_SYNC_DURATION=%s "+
"./cockroach start --insecure --logtostderr=INFO --store {store-dir}/%s --log-dir {store-dir}/%s",
int(dur.Seconds()), maxDataSync, maxLogSync, dataDir, logDir,
),
)
errCh <- result{err, string(out)}
}()

time.Sleep(time.Duration(rand.Intn(5)) * time.Second)

t.Status("blocking storage")
c.Run(ctx, n, "charybdefs-nemesis --delay")

res := <-errCh
if res.err == nil {
t.Fatalf("expected an error: %s", res.out)
}

// This test can also run in sanity check mode to make sure it doesn't fail
// due to the aggressive env vars above.
expectMsg := affectsDataDir || affectsLogDir

if expectMsg != strings.Contains(res.out, "disk stall detected") {
t.Fatalf("unexpected output: %v %s", res.err, res.out)
} else if elapsed := timeutil.Since(tStarted); !expectMsg && elapsed < dur {
t.Fatalf("no disk stall injected, but process terminated too early after %s (expected >= %s)", elapsed, dur)
}

c.Run(ctx, n, "charybdefs-nemesis --clear")
c.Run(ctx, n, "sudo umount {store-dir}/faulty")
}
1 change: 1 addition & 0 deletions pkg/cmd/roachtest/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ func registerTests(r *registry) {
registerDecommission(r)
registerDiskUsage(r)
registerDiskFull(r)
registerDiskStalledDetection(r)
registerDrop(r)
registerElectionAfterRestart(r)
registerEncryption(r)
Expand Down
2 changes: 2 additions & 0 deletions pkg/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -1309,6 +1309,8 @@ func (s *Server) Start(ctx context.Context) error {
}
s.stopper.AddCloser(&s.engines)

startAssertEngineHealth(ctx, s.stopper, s.engines)

// Write listener info files early in the startup sequence. `listenerInfo` has a comment.
listenerFiles := listenerInfo{
advertise: s.cfg.AdvertiseAddr,
Expand Down
72 changes: 72 additions & 0 deletions pkg/server/server_engine_health.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
// Copyright 2018 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.

package server

import (
"context"
"fmt"
"time"

"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)

var maxSyncDuration = envutil.EnvOrDefaultDuration("COCKROACH_ENGINE_MAX_SYNC_DURATION", 10*time.Second)

// startAssertEngineHealth starts a goroutine that periodically verifies that
// syncing the engines is possible within maxSyncDuration. If not,
// the process is terminated (with an attempt at a descriptive message).
func startAssertEngineHealth(ctx context.Context, stopper *stop.Stopper, engines []engine.Engine) {
stopper.RunWorker(ctx, func(ctx context.Context) {
t := timeutil.NewTimer()
t.Reset(0)

for {
select {
case <-t.C:
t.Read = true
t.Reset(10 * time.Second)
assertEngineHealth(ctx, engines, maxSyncDuration)
case <-stopper.ShouldQuiesce():
return
}
}
})
}

func guaranteedExitFatal(ctx context.Context, msg string, args ...interface{}) {
// NB: log.Shout sets up a timer that guarantees process termination.
log.Shout(ctx, log.Severity_FATAL, fmt.Sprintf(msg, args...))
}

func assertEngineHealth(ctx context.Context, engines []engine.Engine, maxDuration time.Duration) {
for _, eng := range engines {
func() {
t := time.AfterFunc(maxDuration, func() {
// NB: the disk-stall-detected roachtest matches on this message.
guaranteedExitFatal(ctx, "disk stall detected: unable to write to %s within %s",
eng, maxSyncDuration,
)
})
defer t.Stop()
if err := engine.WriteSyncNoop(ctx, eng); err != nil {
log.Fatal(ctx, err)
}
}()
}
}
19 changes: 16 additions & 3 deletions pkg/storage/engine/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,7 @@ type Writer interface {
// Put sets the given key to the value provided.
Put(key MVCCKey, value []byte) error
// LogData adds the specified data to the RocksDB WAL. The data is
// uninterpreted by RocksDB (i.e. not added to the memtable or
// sstables). Currently only used for performance testing of appending to the
// RocksDB WAL.
// uninterpreted by RocksDB (i.e. not added to the memtable or sstables).
LogData(data []byte) error
// LogLogicalOp logs the specified logical mvcc operation with the provided
// details to the writer, if it has logical op logging enabled. For most
Expand Down Expand Up @@ -455,3 +453,18 @@ func Scan(engine Reader, start, end MVCCKey, max int64) ([]MVCCKeyValue, error)
})
return kvs, err
}

// WriteSyncNoop carries out a synchronous no-op write to the engine.
func WriteSyncNoop(ctx context.Context, eng Engine) error {
batch := eng.NewBatch()
defer batch.Close()

if err := batch.LogData(nil); err != nil {
return err
}

if err := batch.Commit(true /* sync */); err != nil {
return err
}
return nil
}
15 changes: 0 additions & 15 deletions pkg/storage/node_liveness.go
Original file line number Diff line number Diff line change
Expand Up @@ -767,21 +767,6 @@ func (nl *NodeLiveness) updateLiveness(
if err := ctx.Err(); err != nil {
return err
}
for _, eng := range nl.engines {
// Synchronously writing to all disks before updating node liveness because
// we don't want any excessively slow disks to prevent the lease from
// shifting to other nodes. If the disk is slow, batch.Commit() will block.
batch := eng.NewBatch()
defer batch.Close()

if err := batch.LogData(nil); err != nil {
return errors.Wrapf(err, "couldn't update node liveness because LogData to disk fails")
}

if err := batch.Commit(true /* sync */); err != nil {
return errors.Wrapf(err, "couldn't update node liveness because Commit to disk fails")
}
}
if err := nl.updateLivenessAttempt(ctx, update, oldLiveness, handleCondFailed); err != nil {
// Intentionally don't errors.Cause() the error, or we'd hop past errRetryLiveness.
if _, ok := err.(*errRetryLiveness); ok {
Expand Down
Loading

0 comments on commit d591a49

Please sign in to comment.