Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
65633: log: create crdb-v2 log entry parser and integrate it into debug merge-logs r=knz,ajwerner a=cameronnunez

Fixes [#65504](#65504).

The crdb-v2 log file format is not recognized by some tools like debug merge-logs.
This is makes it difficult/impossible to merge log files properly.
This patch creates a new parser and integrates it into debug merge-logs.

Release note (cli change): the tool debug merge-logs is now able to recognize 
crdb-v2 formatted log files.

66001: sql: re-enable flaky show_trace_nonmetamorphic test r=cucaroach a=cucaroach

Cleanup some commented out tests

Resolves #58202

Release note: none

67198: sql: support expression indexes in CREATE TABLE ... LIKE r=mgartner a=mgartner

This commit adds support for copying table's with expression indexes
with a `CREATE TABLE ... LIKE` statement.

There is no release note because expression-based indexes are not
enabled by default. They require the
experimental_enable_expression_indexes session setting until they are
fully supported.

Release note: None


Co-authored-by: Cameron Nunez <[email protected]>
Co-authored-by: Tommy Reilly <[email protected]>
Co-authored-by: Marcus Gartner <[email protected]>
  • Loading branch information
4 people committed Jul 14, 2021
4 parents cc4ef82 + 72e4e86 + 2de304c + f7cc3e6 commit 00f9b26
Show file tree
Hide file tree
Showing 82 changed files with 1,385 additions and 267 deletions.
11 changes: 7 additions & 4 deletions pkg/cli/debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -1267,7 +1267,7 @@ func removeDeadReplicas(
return batch, nil
}

var debugMergeLogsCommand = &cobra.Command{
var debugMergeLogsCmd = &cobra.Command{
Use: "merge-logs <log file globs>",
Short: "merge multiple log files from different machines into a single stream",
Long: `
Expand Down Expand Up @@ -1295,6 +1295,7 @@ var debugMergeLogsOpts = struct {
prefix string
keepRedactable bool
redactInput bool
format string
}{
program: nil, // match everything
file: regexp.MustCompile(log.FilePattern),
Expand All @@ -1308,7 +1309,7 @@ func runDebugMergeLogs(cmd *cobra.Command, args []string) error {
inputEditMode := log.SelectEditMode(o.redactInput, o.keepRedactable)

s, err := newMergedStreamFromPatterns(context.Background(),
args, o.file, o.program, o.from, o.to, inputEditMode)
args, o.file, o.program, o.from, o.to, inputEditMode, o.format)
if err != nil {
return err
}
Expand Down Expand Up @@ -1342,7 +1343,7 @@ var debugCmds = append(DebugCmdsForRocksDB,
debugUnsafeRemoveDeadReplicasCmd,
debugEnvCmd,
debugZipCmd,
debugMergeLogsCommand,
debugMergeLogsCmd,
debugListFilesCmd,
debugResetQuorumCmd,
)
Expand Down Expand Up @@ -1440,7 +1441,7 @@ func init() {
f.IntSliceVar(&removeDeadReplicasOpts.deadStoreIDs, "dead-store-ids", nil,
"list of dead store IDs")

f = debugMergeLogsCommand.Flags()
f = debugMergeLogsCmd.Flags()
f.Var(flagutil.Time(&debugMergeLogsOpts.from), "from",
"time before which messages should be filtered")
// TODO(knz): the "to" should be named "until" - it's a time boundary, not a space boundary.
Expand All @@ -1459,6 +1460,8 @@ func init() {
"keep the output log file redactable")
f.BoolVar(&debugMergeLogsOpts.redactInput, "redact", debugMergeLogsOpts.redactInput,
"redact the input files to remove sensitive information")
f.StringVar(&debugMergeLogsOpts.format, "format", "",
"log format of the input files")

f = debugDecodeProtoCmd.Flags()
f.StringVar(&debugDecodeProtoName, "schema", "cockroach.sql.sqlbase.Descriptor",
Expand Down
39 changes: 29 additions & 10 deletions pkg/cli/debug_merge_logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ func newMergedStreamFromPatterns(
filePattern, programFilter *regexp.Regexp,
from, to time.Time,
editMode log.EditSensitiveData,
format string,
) (logStream, error) {
paths, err := expandPatterns(patterns)
if err != nil {
Expand All @@ -180,7 +181,7 @@ func newMergedStreamFromPatterns(
if err != nil {
return nil, err
}
return newMergedStream(ctx, files, from, to, editMode)
return newMergedStream(ctx, files, from, to, editMode, format)
}

func groupIndex(re *regexp.Regexp, groupName string) int {
Expand All @@ -193,7 +194,11 @@ func groupIndex(re *regexp.Regexp, groupName string) int {
}

func newMergedStream(
ctx context.Context, files []fileInfo, from, to time.Time, editMode log.EditSensitiveData,
ctx context.Context,
files []fileInfo,
from, to time.Time,
editMode log.EditSensitiveData,
format string,
) (*mergedStream, error) {
// TODO(ajwerner): think about clock movement and PID
const maxConcurrentFiles = 256 // should be far less than the FD limit
Expand All @@ -204,7 +209,7 @@ func newMergedStream(
return func() error {
sem <- struct{}{}
defer func() { <-sem }()
s, err := newFileLogStream(files[i], from, to, editMode)
s, err := newFileLogStream(files[i], from, to, editMode, format)
if s != nil {
res[i] = s
}
Expand Down Expand Up @@ -439,9 +444,10 @@ type fileLogStream struct {
prevTime int64
fi fileInfo
f *os.File
d *log.EntryDecoder
d log.EntryDecoder
read bool
editMode log.EditSensitiveData
format string

e logpb.Entry
err error
Expand All @@ -454,13 +460,14 @@ type fileLogStream struct {
// file is always closed before returning from this constructor so the initial
// peek does not consume resources.
func newFileLogStream(
fi fileInfo, from, to time.Time, editMode log.EditSensitiveData,
fi fileInfo, from, to time.Time, editMode log.EditSensitiveData, format string,
) (logStream, error) {
s := &fileLogStream{
fi: fi,
from: from,
to: to,
editMode: editMode,
format: format,
}
if _, ok := s.peek(); !ok {
if err := s.error(); err != io.EOF {
Expand All @@ -483,10 +490,13 @@ func (s *fileLogStream) open() bool {
if s.f, s.err = os.Open(s.fi.path); s.err != nil {
return false
}
if s.err = seekToFirstAfterFrom(s.f, s.from, s.editMode); s.err != nil {
if s.err = seekToFirstAfterFrom(s.f, s.from, s.editMode, s.format); s.err != nil {
return false
}
s.d = log.NewEntryDecoder(bufio.NewReaderSize(s.f, readBufSize), s.editMode)
var err error
if s.d, err = log.NewEntryDecoderWithFormat(bufio.NewReaderSize(s.f, readBufSize), s.editMode, s.format); err != nil {
panic(err)
}
return true
}

Expand Down Expand Up @@ -541,7 +551,9 @@ func (s *fileLogStream) error() error { return s.err }

// seekToFirstAfterFrom uses binary search to seek to an offset after all
// entries which occur before from.
func seekToFirstAfterFrom(f *os.File, from time.Time, editMode log.EditSensitiveData) (err error) {
func seekToFirstAfterFrom(
f *os.File, from time.Time, editMode log.EditSensitiveData, format string,
) (err error) {
if from.IsZero() {
return nil
}
Expand All @@ -560,8 +572,11 @@ func seekToFirstAfterFrom(f *os.File, from time.Time, editMode log.EditSensitive
panic(err)
}
var e logpb.Entry
err := log.NewEntryDecoder(f, editMode).Decode(&e)
d, err := log.NewEntryDecoderWithFormat(f, editMode, format)
if err != nil {
panic(err)
}
if err := d.Decode(&e); err != nil {
if err == io.EOF {
return true
}
Expand All @@ -573,7 +588,11 @@ func seekToFirstAfterFrom(f *os.File, from time.Time, editMode log.EditSensitive
return err
}
var e logpb.Entry
if err := log.NewEntryDecoder(f, editMode).Decode(&e); err != nil {
d, err := log.NewEntryDecoderWithFormat(f, editMode, format)
if err != nil {
return err
}
if err := d.Decode(&e); err != nil {
return err
}
_, err = f.Seek(int64(offset), io.SeekStart)
Expand Down
Loading

0 comments on commit 00f9b26

Please sign in to comment.