-
Notifications
You must be signed in to change notification settings - Fork 3.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
sql: created a scheduled logger to capture index usage stats
This change introduces a scheduled logger that runs background processes intended to emit logs on a time interval. Release note (sql change): Initial implementation of a scheduled logger, used to capture index usage statistics to the telemetry logging channel.
- Loading branch information
Thomas Hardy
committed
Feb 23, 2022
1 parent
5b3067f
commit 7efa752
Showing
10 changed files
with
556 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") | ||
|
||
go_library( | ||
name = "scheduledlogging", | ||
srcs = [ | ||
"captured_index_usage_stats.go", | ||
"scheduled_logger.go", | ||
], | ||
importpath = "github.com/cockroachdb/cockroach/pkg/sql/scheduledlogging", | ||
visibility = ["//visibility:public"], | ||
deps = [ | ||
"//pkg/kv", | ||
"//pkg/roachpb", | ||
"//pkg/security", | ||
"//pkg/settings", | ||
"//pkg/settings/cluster", | ||
"//pkg/sql/sem/tree", | ||
"//pkg/sql/sessiondata", | ||
"//pkg/sql/sqlutil", | ||
"//pkg/util/log", | ||
"//pkg/util/log/eventpb", | ||
"//pkg/util/stop", | ||
"//pkg/util/syncutil", | ||
"@com_github_cockroachdb_errors//:errors", | ||
], | ||
) | ||
|
||
go_test( | ||
name = "scheduledlogging_test", | ||
srcs = [ | ||
"captured_index_usage_stats_test.go", | ||
"scheduled_logger_test.go", | ||
], | ||
embed = [":scheduledlogging"], | ||
deps = [ | ||
"//pkg/base", | ||
"//pkg/testutils/serverutils", | ||
"//pkg/testutils/sqlutils", | ||
"//pkg/util/leaktest", | ||
"//pkg/util/log", | ||
"//pkg/util/log/channel", | ||
"//pkg/util/log/logconfig", | ||
], | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,228 @@ | ||
// Copyright 2022 The Cockroach Authors. | ||
// | ||
// Use of this software is governed by the Business Source License | ||
// included in the file licenses/BSL.txt. | ||
// | ||
// As of the Change Date specified in that file, in accordance with | ||
// the Business Source License, use of this software will be governed | ||
// by the Apache License, Version 2.0, included in the file | ||
// licenses/APL.txt. | ||
|
||
package scheduledlogging | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"time" | ||
|
||
"github.com/cockroachdb/cockroach/pkg/roachpb" | ||
"github.com/cockroachdb/cockroach/pkg/security" | ||
"github.com/cockroachdb/cockroach/pkg/settings" | ||
"github.com/cockroachdb/cockroach/pkg/settings/cluster" | ||
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree" | ||
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata" | ||
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil" | ||
"github.com/cockroachdb/cockroach/pkg/util/log" | ||
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb" | ||
"github.com/cockroachdb/cockroach/pkg/util/stop" | ||
"github.com/cockroachdb/cockroach/pkg/util/syncutil" | ||
"github.com/cockroachdb/errors" | ||
) | ||
|
||
var telemetryCaptureIndexUsageStatsEnabled = settings.RegisterBoolSetting( | ||
settings.TenantWritable, | ||
"sql.telemetry.capture_index_usage_stats.enabled", | ||
"enable/disable capturing index usage statistics to the telemetry logging channel", | ||
true, | ||
) | ||
|
||
const captureIndexUsageStatsScheduleInterval = 8 * time.Second | ||
|
||
// CaptureIndexUsageStatsEmitter type implements the ScheduledLogEmitter interface. | ||
type CaptureIndexUsageStatsEmitter struct { | ||
syncutil.Mutex | ||
isRunning bool | ||
} | ||
|
||
// Emit implements the ScheduledLogEmitter interface. | ||
func (s *CaptureIndexUsageStatsEmitter) Emit( | ||
ctx context.Context, ie sqlutil.InternalExecutor, stopper *stop.Stopper, | ||
) error { | ||
|
||
allDatabaseNames, err := getAllDatabaseNames(ctx, ie) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Capture index usage statistics for each database. | ||
var ok bool | ||
expectedNumDatums := 10 | ||
var allCapturedIndexUsageStats []eventpb.EventPayload | ||
for _, databaseName := range allDatabaseNames { | ||
// Omit index usage statistics of the 'system' database. | ||
if databaseName == "system" { | ||
continue | ||
} | ||
stmt := fmt.Sprintf(` | ||
SELECT | ||
'%s' as database_name, | ||
ti.descriptor_name as table_name, | ||
ti.descriptor_id as table_id, | ||
ti.index_name, | ||
ti.index_id, | ||
ti.index_type, | ||
ti.is_unique, | ||
ti.is_inverted, | ||
total_reads, | ||
last_read | ||
FROM %s.crdb_internal.index_usage_statistics AS us | ||
JOIN %s.crdb_internal.table_indexes ti | ||
ON us.index_id = ti.index_id | ||
AND us.table_id = ti.descriptor_id | ||
ORDER BY total_reads ASC; | ||
`, databaseName, databaseName, databaseName) | ||
|
||
it, err := ie.QueryIteratorEx( | ||
ctx, | ||
"capture-index-usage-stats", | ||
nil, | ||
sessiondata.InternalExecutorOverride{User: security.NodeUserName()}, | ||
stmt, | ||
) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { | ||
var row tree.Datums | ||
if row = it.Cur(); row == nil { | ||
return errors.New("unexpected null row while capturing index usage stats") | ||
} | ||
|
||
if row.Len() != expectedNumDatums { | ||
return errors.Newf("expected %d columns, received %d while capturing index usage stats", expectedNumDatums, row.Len()) | ||
} | ||
|
||
databaseName := tree.MustBeDString(row[0]) | ||
tableName := tree.MustBeDString(row[1]) | ||
tableID := tree.MustBeDInt(row[2]) | ||
indexName := tree.MustBeDString(row[3]) | ||
indexID := tree.MustBeDInt(row[4]) | ||
indexType := tree.MustBeDString(row[5]) | ||
isUnique := tree.MustBeDBool(row[6]) | ||
isInverted := tree.MustBeDBool(row[7]) | ||
totalReads := uint64(tree.MustBeDInt(row[8])) | ||
lastRead := time.Time{} | ||
if row[9] != tree.DNull { | ||
lastRead = tree.MustBeDTimestampTZ(row[9]).Time | ||
} | ||
|
||
capturedIndexStats := &eventpb.CapturedIndexUsageStats{ | ||
TableID: uint32(roachpb.TableID(tableID)), | ||
IndexID: uint32(roachpb.IndexID(indexID)), | ||
TotalReadCount: totalReads, | ||
LastRead: lastRead.String(), | ||
DatabaseName: string(databaseName), | ||
TableName: string(tableName), | ||
IndexName: string(indexName), | ||
IndexType: string(indexType), | ||
IsUnique: bool(isUnique), | ||
IsInverted: bool(isInverted), | ||
} | ||
|
||
allCapturedIndexUsageStats = append(allCapturedIndexUsageStats, capturedIndexStats) | ||
} | ||
err = it.Close() | ||
if err != nil { | ||
return err | ||
} | ||
} | ||
s.logIndexUsageStatsWithDelay(ctx, allCapturedIndexUsageStats, stopper) | ||
return nil | ||
} | ||
|
||
// logIndexUsageStatsWithDelay logs a slice of eventpb.EventPayload at half | ||
// second intervals (2 logs per second) to avoid exceeding the 10 log-line per | ||
// second limit per node on the telemetry logging pipeline. | ||
func (s *CaptureIndexUsageStatsEmitter) logIndexUsageStatsWithDelay( | ||
ctx context.Context, events []eventpb.EventPayload, stopper *stop.Stopper, | ||
) { | ||
_ = stopper.RunAsyncTask(ctx, "logging-index-usage-stats-with-delay", func(ctx context.Context) { | ||
s.Lock() | ||
s.isRunning = true | ||
s.Unlock() | ||
defer func() { | ||
s.Lock() | ||
s.isRunning = false | ||
s.Unlock() | ||
}() | ||
|
||
ticker := time.NewTicker(1000 * time.Millisecond) | ||
|
||
for len(events) > 0 { | ||
select { | ||
case <-stopper.ShouldQuiesce(): | ||
ticker.Stop() | ||
return | ||
case <-ticker.C: | ||
event := events[0] | ||
log.StructuredEvent(ctx, event) | ||
events = events[1:] | ||
} | ||
} | ||
ticker.Stop() | ||
}) | ||
} | ||
|
||
func getAllDatabaseNames(ctx context.Context, ie sqlutil.InternalExecutor) ([]string, error) { | ||
var allDatabaseNames []string | ||
var ok bool | ||
var expectedNumDatums = 1 | ||
|
||
it, err := ie.QueryIteratorEx( | ||
ctx, | ||
"get-all-db-names", | ||
nil, | ||
sessiondata.InternalExecutorOverride{User: security.NodeUserName()}, | ||
`SELECT database_name FROM [SHOW DATABASES]`, | ||
) | ||
if err != nil { | ||
return []string{}, err | ||
} | ||
|
||
// We have to make sure to close the iterator since we might return from the | ||
// for loop early (before Next() returns false). | ||
defer func() { err = errors.CombineErrors(err, it.Close()) }() | ||
for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { | ||
var row tree.Datums | ||
if row = it.Cur(); row == nil { | ||
return []string{}, errors.New("unexpected null row while capturing index usage stats") | ||
} | ||
if row.Len() != expectedNumDatums { | ||
return []string{}, errors.Newf("expected %d columns, received %d while capturing index usage stats", expectedNumDatums, row.Len()) | ||
} | ||
|
||
databaseName := string(tree.MustBeDString(row[0])) | ||
allDatabaseNames = append(allDatabaseNames, databaseName) | ||
} | ||
return allDatabaseNames, nil | ||
} | ||
|
||
// Interval implements the ScheduledLogEmitter interface. | ||
func (s *CaptureIndexUsageStatsEmitter) Interval() time.Duration { | ||
return captureIndexUsageStatsScheduleInterval | ||
} | ||
|
||
// IsEnabled implements the ScheduledLogEmitter interface. | ||
func (s *CaptureIndexUsageStatsEmitter) IsEnabled(cs *cluster.Settings) bool { | ||
return telemetryCaptureIndexUsageStatsEnabled.Get(&cs.SV) | ||
} | ||
|
||
// IsRunning implements the ScheduledLogEmitter interface. | ||
func (s *CaptureIndexUsageStatsEmitter) IsRunning() bool { | ||
return s.isRunning | ||
} | ||
|
||
func init() { | ||
RegisterLogEmitter(CaptureIndexUsageStatsEmitterType, &CaptureIndexUsageStatsEmitter{}) | ||
} |
Oops, something went wrong.