Skip to content

Commit

Permalink
Merge #41018 #41020
Browse files Browse the repository at this point in the history
41018: c-deps: bump rocksdb for unique cache IDs on Windows r=ajkr a=ajkr

Picks up cockroachdb/rocksdb#58.

We found a corruption caused by multiple FAT32 files assigned the same
block cache key prefix. We don't know the extent to which this problem
affects other filesystems or other Windows file ID generation mechanisms.
We decided to turn off the reliance on filesystem for generating cache
keys on Windows. Instead we use randomization per table reader. This
would cause a performance penalty for use cases that open multiple table
readers per file, but I believe cockroach is not such a use case.

Fixes #40918, fixes #40950.

Release justification: Prevents corruption on some Windows filesystems

Release note: None

41020: util/log: fix GC of secondary loggers r=petermattis a=knz

Fixes #40974.

This is a subset of #40993 suitable for 19.2 and backport to 19.1.

Release justification: bug fix

Release note (bug fix): CockroachDB will now properly remove excess
secondary log files (SQL audit logging, statement execution logging,
and RocksDB events).

Co-authored-by: Andrew Kryczka <[email protected]>
Co-authored-by: Raphael 'kena' Poss <[email protected]>
  • Loading branch information
3 people committed Sep 24, 2019
3 parents ded2b6c + ece7b8b + 70be7eb commit 83eb885
Show file tree
Hide file tree
Showing 5 changed files with 195 additions and 87 deletions.
2 changes: 1 addition & 1 deletion c-deps/rocksdb
Submodule rocksdb updated 1 files
+11 −54 port/win/io_win.cc
2 changes: 1 addition & 1 deletion pkg/util/log/clog.go
Original file line number Diff line number Diff line change
Expand Up @@ -1161,7 +1161,7 @@ func (sb *syncBuffer) rotateFile(now time.Time) error {
}

select {
case logging.gcNotify <- struct{}{}:
case sb.logger.gcNotify <- struct{}{}:
default:
}
return nil
Expand Down
84 changes: 0 additions & 84 deletions pkg/util/log/clog_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"regexp"
"strings"
"sync"
"sync/atomic"
"testing"
"time"

Expand Down Expand Up @@ -559,89 +558,6 @@ func TestRollover(t *testing.T) {
}
}

func TestGC(t *testing.T) {
s := ScopeWithoutShowLogs(t)
defer s.Close(t)

logging.mu.Lock()
logging.disableDaemons = true
defer func(previous bool) {
logging.mu.Lock()
logging.disableDaemons = previous
logging.mu.Unlock()
}(logging.disableDaemons)
logging.mu.Unlock()

setFlags()

const newLogFiles = 20

// Prevent writes to stderr from being sent to log files which would screw up
// the expected number of log file calculation below.
logging.noStderrRedirect = true

// Create 1 log file to figure out its size.
Infof(context.Background(), "0")

allFilesOriginal, err := ListLogFiles()
if err != nil {
t.Fatal(err)
}
if e, a := 1, len(allFilesOriginal); e != a {
t.Fatalf("expected %d files, but found %d", e, a)
}
dir, err := logging.logDir.get()
if err != nil {
t.Fatal(err)
}
f, err := os.Open(filepath.Join(dir, allFilesOriginal[0].Name))
if err != nil {
t.Fatal(err)
}
stat, err := f.Stat()
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
// logFileSize is the size of the first log file we wrote to.
logFileSize := stat.Size()
const expectedFilesAfterGC = 2
// Pick a max total size that's between 2 and 3 log files in size.
maxTotalLogFileSize := logFileSize*expectedFilesAfterGC + logFileSize // 2

defer func(previous int64) { LogFileMaxSize = previous }(LogFileMaxSize)
LogFileMaxSize = 1 // ensure rotation on every log write
defer func(previous int64) {
atomic.StoreInt64(&LogFilesCombinedMaxSize, previous)
}(LogFilesCombinedMaxSize)
atomic.StoreInt64(&LogFilesCombinedMaxSize, maxTotalLogFileSize)

for i := 1; i < newLogFiles; i++ {
Infof(context.Background(), "%d", i)
Flush()
}

allFilesBefore, err := ListLogFiles()
if err != nil {
t.Fatal(err)
}
if e, a := newLogFiles, len(allFilesBefore); e != a {
t.Fatalf("expected %d files, but found %d", e, a)
}

logging.gcOldFiles()

allFilesAfter, err := ListLogFiles()
if err != nil {
t.Fatal(err)
}
if e, a := expectedFilesAfterGC, len(allFilesAfter); e != a {
t.Fatalf("expected %d files, but found %d", e, a)
}
}

func TestLogBacktraceAt(t *testing.T) {
s := ScopeWithoutShowLogs(t)
defer s.Close(t)
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/log/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ func ListLogFiles() ([]FileInfo, error) {

func (l *loggingT) listLogFiles() ([]FileInfo, error) {
var results []FileInfo
dir, err := logging.logDir.get()
dir, err := l.logDir.get()
if err != nil {
// No log directory configured: simply indicate that there are no
// log files.
Expand Down
192 changes: 192 additions & 0 deletions pkg/util/log/log_gc_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.

package log

import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync/atomic"
"testing"
"time"

"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)

func TestGC(t *testing.T) {
s := ScopeWithoutShowLogs(t)
defer s.Close(t)

setFlags()

testLogGC(t, &logging, Infof)
}

func TestSecondaryGC(t *testing.T) {
s := ScopeWithoutShowLogs(t)
defer s.Close(t)

setFlags()

tmpDir, err := ioutil.TempDir(logging.logDir.String(), "gctest")
if err != nil {
t.Fatal(err)
}
defer func() {
if !t.Failed() {
// If the test failed, we'll want to keep the artifacts for
// troubleshooting.
_ = os.RemoveAll(tmpDir)
}
}()
tmpDirName := DirName{name: tmpDir}

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l := NewSecondaryLogger(ctx, &tmpDirName, "woo", false /*enableGc*/, false /*syncWrites*/, true /*msgCount*/)

testLogGC(t, &l.logger, l.Logf)
}

func testLogGC(
t *testing.T,
logger *loggingT,
logFn func(ctx context.Context, format string, args ...interface{}),
) {
logging.mu.Lock()
logging.disableDaemons = true
defer func(previous bool) {
logging.mu.Lock()
logging.disableDaemons = previous
logging.mu.Unlock()
}(logging.disableDaemons)
logging.mu.Unlock()

const newLogFiles = 20

// Prevent writes to stderr from being sent to log files which would screw up
// the expected number of log file calculation below.
logging.noStderrRedirect = true

// Ensure the main log has at least one entry. This serves two
// purposes. One is to serve as baseline for the file size. The
// other is to ensure that the TestLogScope does not erase the
// temporary directory, preventing further investigation.
Infof(context.Background(), "0")

// Make an entry in the target logger. This ensures That there is at
// least one file in the target directory for the logger being
// tested.
logFn(context.Background(), "0")

// Check that the file was created.
allFilesOriginal, err := logger.listLogFiles()
if err != nil {
t.Fatal(err)
}
if e, a := 1, len(allFilesOriginal); e != a {
t.Fatalf("expected %d files, but found %d", e, a)
}

// Check that the file exists, and also measure its size.
// We'll use this as base value for the maximum combined size
// below, to force GC.
dir, err := logger.logDir.get()
if err != nil {
t.Fatal(err)
}
stat, err := os.Stat(filepath.Join(dir, allFilesOriginal[0].Name))
if err != nil {
t.Fatal(err)
}
// logFileSize is the size of the first log file we wrote to.
logFileSize := stat.Size()
const expectedFilesAfterGC = 2
// Pick a max total size that's between 2 and 3 log files in size.
maxTotalLogFileSize := logFileSize*expectedFilesAfterGC + logFileSize // 2

// We want to create multiple log files below. For this we need to
// override the size/number limits first to the values suitable for
// the test.
defer func(previous int64) { LogFileMaxSize = previous }(LogFileMaxSize)
LogFileMaxSize = 1 // ensure rotation on every log write
defer func(previous int64) {
atomic.StoreInt64(&LogFilesCombinedMaxSize, previous)
}(LogFilesCombinedMaxSize)
atomic.StoreInt64(&LogFilesCombinedMaxSize, maxTotalLogFileSize)

// Create the number of expected log files.
for i := 1; i < newLogFiles; i++ {
logFn(context.Background(), "%d", i)
Flush()
}

allFilesBefore, err := logger.listLogFiles()
if err != nil {
t.Fatal(err)
}
if e, a := newLogFiles, len(allFilesBefore); e != a {
t.Fatalf("expected %d files, but found %d", e, a)
}

// Re-enable GC, so that the GC daemon can pick up the files.
logger.mu.Lock()
logger.disableDaemons = false
logger.mu.Unlock()
// Start the GC daemon, using a context that will terminate it
// at the end of the test.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go logger.gcDaemon(ctx)

// Emit a log line which will rotate the files and trigger GC.
logFn(context.Background(), "final")
Flush()

succeedsSoon(t, func() error {
allFilesAfter, err := logger.listLogFiles()
if err != nil {
return err
}
if e, a := expectedFilesAfterGC, len(allFilesAfter); e != a {
return fmt.Errorf("expected %d files, but found %d", e, a)
}
return nil
})
}

// succeedsSoon is a simplified version of testutils.SucceedsSoon.
// The main implementation cannot be used here because of
// an import cycle.
func succeedsSoon(t *testing.T, fn func() error) {
t.Helper()
tBegin := timeutil.Now()
deadline := tBegin.Add(5 * time.Second)
var lastErr error
for wait := time.Duration(1); timeutil.Now().Before(deadline); wait *= 2 {
lastErr = fn()
if lastErr == nil {
return
}
if wait > time.Second {
wait = time.Second
}
if timeutil.Since(tBegin) > 3*time.Second {
InfofDepth(context.Background(), 2, "SucceedsSoon: %+v", lastErr)
}
time.Sleep(wait)
}
if lastErr != nil {
t.Fatalf("condition failed to evalute: %+v", lastErr)
}
}

0 comments on commit 83eb885

Please sign in to comment.