Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

statistics: add sampling logger #51741

Merged
merged 4 commits into from
Mar 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,9 @@ func (j *DynamicPartitionedTableAnalysisJob) HasNewlyAddedIndex() bool {

// IsValidToAnalyze checks whether the table or partition is valid to analyze.
// We need to check each partition to determine whether the table is valid to analyze.
func (j *DynamicPartitionedTableAnalysisJob) IsValidToAnalyze(sctx sessionctx.Context) (bool, string) {
func (j *DynamicPartitionedTableAnalysisJob) IsValidToAnalyze(
sctx sessionctx.Context,
) (bool, string) {
if valid, failReason := isValidWeight(j.Weight); !valid {
return false, failReason
}
Expand Down
16 changes: 9 additions & 7 deletions pkg/statistics/handle/autoanalyze/priorityqueue/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
"time"

"github.com/pingcap/tidb/pkg/sessionctx"
statslogutil "github.com/pingcap/tidb/pkg/statistics/handle/logutil"
"github.com/pingcap/tidb/pkg/statistics/handle/logutil"
statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types"
"github.com/pingcap/tidb/pkg/util/intest"
"go.uber.org/zap"
Expand Down Expand Up @@ -50,7 +50,9 @@ type AnalysisJob interface {
// It checks the last failed analysis duration and the average analysis duration.
// If the last failed analysis duration is less than 2 times the average analysis duration,
// we skip this table to avoid too much failed analysis.
IsValidToAnalyze(sctx sessionctx.Context) (bool, string)
IsValidToAnalyze(
sctx sessionctx.Context,
) (bool, string)

// Analyze executes the analyze statement within a transaction.
Analyze(
Expand Down Expand Up @@ -98,7 +100,7 @@ func isValidToAnalyze(
lastFailedAnalysisDuration, err :=
GetLastFailedAnalysisDuration(sctx, schema, table, partitionNames...)
if err != nil {
statslogutil.StatsLogger().Warn(
logutil.SingletonStatsSamplerLogger().Warn(
"Fail to get last failed analysis duration",
zap.String("schema", schema),
zap.String("table", table),
Expand All @@ -111,7 +113,7 @@ func isValidToAnalyze(
averageAnalysisDuration, err :=
GetAverageAnalysisDuration(sctx, schema, table, partitionNames...)
if err != nil {
statslogutil.StatsLogger().Warn(
logutil.SingletonStatsSamplerLogger().Warn(
"Fail to get average analysis duration",
zap.String("schema", schema),
zap.String("table", table),
Expand All @@ -124,7 +126,7 @@ func isValidToAnalyze(
// Last analysis just failed, we should not analyze it again.
if lastFailedAnalysisDuration == justFailed {
// The last analysis failed, we should not analyze it again.
statslogutil.StatsLogger().Info(
logutil.SingletonStatsSamplerLogger().Info(
"Skip analysis because the last analysis just failed",
zap.String("schema", schema),
zap.String("table", table),
Expand All @@ -137,7 +139,7 @@ func isValidToAnalyze(
// Skip this table to avoid too much failed analysis.
onlyFailedAnalysis := lastFailedAnalysisDuration != NoRecord && averageAnalysisDuration == NoRecord
if onlyFailedAnalysis && lastFailedAnalysisDuration < defaultFailedAnalysisWaitTime {
statslogutil.StatsLogger().Info(
logutil.SingletonStatsSamplerLogger().Info(
fmt.Sprintf("Skip analysis because the last failed analysis duration is less than %v", defaultFailedAnalysisWaitTime),
zap.String("schema", schema),
zap.String("table", table),
Expand All @@ -151,7 +153,7 @@ func isValidToAnalyze(
meetSkipCondition := lastFailedAnalysisDuration != NoRecord &&
lastFailedAnalysisDuration < 2*averageAnalysisDuration
if meetSkipCondition {
statslogutil.StatsLogger().Info(
logutil.SingletonStatsSamplerLogger().Info(
"Skip analysis because the last failed analysis duration is less than 2 times the average analysis duration",
zap.String("schema", schema),
zap.String("table", table),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,9 @@ func (j *NonPartitionedTableAnalysisJob) HasNewlyAddedIndex() bool {

// IsValidToAnalyze checks whether the table is valid to analyze.
// We will check the last failed job and average analyze duration to determine whether the table is valid to analyze.
func (j *NonPartitionedTableAnalysisJob) IsValidToAnalyze(sctx sessionctx.Context) (bool, string) {
func (j *NonPartitionedTableAnalysisJob) IsValidToAnalyze(
sctx sessionctx.Context,
) (bool, string) {
if valid, failReason := isValidWeight(j.Weight); !valid {
return false, failReason
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,9 @@ func (j *StaticPartitionedTableAnalysisJob) HasNewlyAddedIndex() bool {

// IsValidToAnalyze checks whether the partition is valid to analyze.
// Only the specified static partition is checked.
func (j *StaticPartitionedTableAnalysisJob) IsValidToAnalyze(sctx sessionctx.Context) (bool, string) {
func (j *StaticPartitionedTableAnalysisJob) IsValidToAnalyze(
sctx sessionctx.Context,
) (bool, string) {
if valid, failReason := isValidWeight(j.Weight); !valid {
return false, failReason
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/statistics/handle/autoanalyze/refresher/refresher.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (r *Refresher) PickOneTableAndAnalyzeByPriority() bool {
if valid, failReason := job.IsValidToAnalyze(
sctx,
); !valid {
statslogutil.StatsLogger().Info(
statslogutil.SingletonStatsSamplerLogger().Info(
"Table is not ready to analyze",
zap.String("failReason", failReason),
zap.Stringer("job", job),
Expand All @@ -116,7 +116,7 @@ func (r *Refresher) PickOneTableAndAnalyzeByPriority() bool {
// Only analyze one table each time.
return true
}
statslogutil.StatsLogger().Debug(
statslogutil.SingletonStatsSamplerLogger().Info(
"No table to analyze",
)
return false
Expand Down
1 change: 1 addition & 0 deletions pkg/statistics/handle/logutil/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ go_library(
deps = [
"//pkg/util/logutil",
"@org_uber_go_zap//:zap",
"@org_uber_go_zap//zapcore",
],
)
31 changes: 31 additions & 0 deletions pkg/statistics/handle/logutil/logutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,42 @@
package logutil

import (
"sync"
"time"

"github.com/pingcap/tidb/pkg/util/logutil"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)

// StatsLogger with category "stats" is used to log statistic related messages.
// Do not use it to log the message that is not related to statistics.
func StatsLogger() *zap.Logger {
return logutil.BgLogger().With(zap.String("category", "stats"))
}

var (
initSamplerLoggerOnce sync.Once
samplerLogger *zap.Logger
)

// SingletonStatsSamplerLogger with category "stats" is used to log statistic related messages.
// It is used to sample the log to avoid too many logs.
// NOTE: Do not create a new logger for each log, it will cause the sampler not work.
// Because we need to record the log count with the same level and message in this specific logger.
// Do not use it to log the message that is not related to statistics.
func SingletonStatsSamplerLogger() *zap.Logger {
init := func() {
if samplerLogger == nil {
// Create a new zapcore sampler with options
// This will log the first 2 log entries with the same level and message in a minute and ignore the rest of the logs.
sampler := zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewSamplerWithOptions(core, time.Minute, 2, 0)
})
samplerLogger = StatsLogger().WithOptions(sampler)
}
}

initSamplerLoggerOnce.Do(init)
return samplerLogger
}