Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sql, import: add metrics for max_row_size guardrails #69457

Merged
merged 3 commits into from
Aug 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/generated/settings/settings-for-tenants.txt
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,8 @@ sql.defaults.vectorize enumeration on default vectorize mode [on = 0, on = 2, ex
sql.defaults.zigzag_join.enabled boolean true default value for enable_zigzag_join session setting; allows use of zig-zag join by default
sql.distsql.max_running_flows integer 500 maximum number of concurrent flows that can be run on a node
sql.distsql.temp_storage.workmem byte size 64 MiB maximum amount of memory in bytes a processor can use before falling back to temp storage
sql.guardrails.max_row_size_err byte size 512 MiB maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disable
sql.guardrails.max_row_size_log byte size 64 MiB maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disable
sql.log.slow_query.experimental_full_table_scans.enabled boolean false when set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.
sql.log.slow_query.internal_queries.enabled boolean false when set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.
sql.log.slow_query.latency_threshold duration 0s when set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each node
Expand All @@ -134,8 +136,6 @@ sql.metrics.statement_details.plan_collection.period duration 5m0s the time unti
sql.metrics.statement_details.threshold duration 0s minimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.
sql.metrics.transaction_details.enabled boolean true collect per-application transaction statistics
sql.multiregion.drop_primary_region.enabled boolean true allows dropping the PRIMARY REGION of a database if it is the last region
sql.mutations.max_row_size.err byte size 512 MiB maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; setting to 0 disables large row errors
sql.mutations.max_row_size.log byte size 64 MiB maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); setting to 0 disables large row logging
sql.notices.enabled boolean true enable notices in the server/client protocol being sent
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled boolean false if enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probability
sql.spatial.experimental_box2d_comparison_operators.enabled boolean false enables the use of certain experimental box2d comparison operators
Expand Down
4 changes: 2 additions & 2 deletions docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@
<tr><td><code>sql.defaults.zigzag_join.enabled</code></td><td>boolean</td><td><code>true</code></td><td>default value for enable_zigzag_join session setting; allows use of zig-zag join by default</td></tr>
<tr><td><code>sql.distsql.max_running_flows</code></td><td>integer</td><td><code>500</code></td><td>maximum number of concurrent flows that can be run on a node</td></tr>
<tr><td><code>sql.distsql.temp_storage.workmem</code></td><td>byte size</td><td><code>64 MiB</code></td><td>maximum amount of memory in bytes a processor can use before falling back to temp storage</td></tr>
<tr><td><code>sql.guardrails.max_row_size_err</code></td><td>byte size</td><td><code>512 MiB</code></td><td>maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disable</td></tr>
<tr><td><code>sql.guardrails.max_row_size_log</code></td><td>byte size</td><td><code>64 MiB</code></td><td>maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disable</td></tr>
<tr><td><code>sql.log.slow_query.experimental_full_table_scans.enabled</code></td><td>boolean</td><td><code>false</code></td><td>when set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.</td></tr>
<tr><td><code>sql.log.slow_query.internal_queries.enabled</code></td><td>boolean</td><td><code>false</code></td><td>when set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.</td></tr>
<tr><td><code>sql.log.slow_query.latency_threshold</code></td><td>duration</td><td><code>0s</code></td><td>when set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each node</td></tr>
Expand All @@ -138,8 +140,6 @@
<tr><td><code>sql.metrics.statement_details.threshold</code></td><td>duration</td><td><code>0s</code></td><td>minimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.</td></tr>
<tr><td><code>sql.metrics.transaction_details.enabled</code></td><td>boolean</td><td><code>true</code></td><td>collect per-application transaction statistics</td></tr>
<tr><td><code>sql.multiregion.drop_primary_region.enabled</code></td><td>boolean</td><td><code>true</code></td><td>allows dropping the PRIMARY REGION of a database if it is the last region</td></tr>
<tr><td><code>sql.mutations.max_row_size.err</code></td><td>byte size</td><td><code>512 MiB</code></td><td>maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; setting to 0 disables large row errors</td></tr>
<tr><td><code>sql.mutations.max_row_size.log</code></td><td>byte size</td><td><code>64 MiB</code></td><td>maximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); setting to 0 disables large row logging</td></tr>
<tr><td><code>sql.notices.enabled</code></td><td>boolean</td><td><code>true</code></td><td>enable notices in the server/client protocol being sent</td></tr>
<tr><td><code>sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled</code></td><td>boolean</td><td><code>false</code></td><td>if enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probability</td></tr>
<tr><td><code>sql.spatial.experimental_box2d_comparison_operators.enabled</code></td><td>boolean</td><td><code>false</code></td><td>enables the use of certain experimental box2d comparison operators</td></tr>
Expand Down
4 changes: 2 additions & 2 deletions pkg/ccl/backupccl/testdata/backup-restore/max-row-size
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ SELECT i, pg_column_size(s) FROM maxrow ORDER BY i;
1 20004

exec-sql
SET CLUSTER SETTING sql.mutations.max_row_size.err = '16KiB';
SET CLUSTER SETTING sql.guardrails.max_row_size_err = '16KiB';
----

query-sql
Expand All @@ -39,7 +39,7 @@ INSERT INTO d2.maxrow VALUES (2, repeat('y', 20000));
pq: row larger than max row size: table 57 family 0 primary key /Table/57/1/2/0 size 20013

exec-sql
SET CLUSTER SETTING sql.mutations.max_row_size.err = default;
SET CLUSTER SETTING sql.guardrails.max_row_size_err = DEFAULT;
INSERT INTO d2.maxrow VALUES (2, repeat('y', 20000));
----

Expand Down
3 changes: 2 additions & 1 deletion pkg/ccl/importccl/read_import_avro_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,8 @@ func (th *testHelper) newRecordStream(

conv, err := row.NewDatumRowConverter(
context.Background(), th.schemaTable, nil, th.evalCtx.Copy(), nil,
nil /* seqChunkProvider */)
nil /* seqChunkProvider */, nil, /* metrics */
)
require.NoError(t, err)
return &testRecordStream{
producer: producer,
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/importccl/read_import_base.go
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ func makeDatumConverter(
) (*row.DatumRowConverter, error) {
conv, err := row.NewDatumRowConverter(
ctx, importCtx.tableDesc, importCtx.targetCols, importCtx.evalCtx, importCtx.kvCh,
importCtx.seqChunkProvider)
importCtx.seqChunkProvider, nil /* metrics */)
if err == nil {
conv.KvBatch.Source = fileCtx.source
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/importccl/read_import_mysql.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func newMysqldumpReader(
continue
}
conv, err := row.NewDatumRowConverter(ctx, tabledesc.NewBuilder(table.Desc).BuildImmutableTable(),
nil /* targetColNames */, evalCtx, kvCh, nil /* seqChunkProvider */)
nil /* targetColNames */, evalCtx, kvCh, nil /* seqChunkProvider */, nil /* metrics */)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/importccl/read_import_pgdump.go
Original file line number Diff line number Diff line change
Expand Up @@ -959,7 +959,7 @@ func newPgDumpReader(
colSubMap[col.GetName()] = i
}
conv, err := row.NewDatumRowConverter(ctx, tableDesc, targetCols, evalCtx, kvCh,
nil /* seqChunkProvider */)
nil /* seqChunkProvider */, nil /* metrics */)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/importccl/read_import_workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ func NewWorkloadKVConverter(
// This worker needs its own EvalContext and DatumAlloc.
func (w *WorkloadKVConverter) Worker(ctx context.Context, evalCtx *tree.EvalContext) error {
conv, err := row.NewDatumRowConverter(ctx, w.tableDesc, nil /* targetColNames */, evalCtx,
w.kvCh, nil /* seqChunkProvider */)
w.kvCh, nil /* seqChunkProvider */, nil /* metrics */)
if err != nil {
return err
}
Expand Down
16 changes: 12 additions & 4 deletions pkg/server/server_sql.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,9 +392,6 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
}
cfg.registry.AddMetricStruct(jobRegistry.MetricsStruct())

distSQLMetrics := execinfra.MakeDistSQLMetrics(cfg.HistogramWindowInterval())
cfg.registry.AddMetricStruct(distSQLMetrics)

// Set up Lease Manager
var lmKnobs lease.ManagerTestingKnobs
if leaseManagerTestingKnobs := cfg.TestingKnobs.SQLLeaseManager; leaseManagerTestingKnobs != nil {
Expand Down Expand Up @@ -469,6 +466,13 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
}
}))

distSQLMetrics := execinfra.MakeDistSQLMetrics(cfg.HistogramWindowInterval())
cfg.registry.AddMetricStruct(distSQLMetrics)
rowMetrics := sql.NewRowMetrics(false /* internal */)
cfg.registry.AddMetricStruct(rowMetrics)
internalRowMetrics := sql.NewRowMetrics(true /* internal */)
cfg.registry.AddMetricStruct(internalRowMetrics)

virtualSchemas, err := sql.NewVirtualSchemaHolder(ctx, cfg.Settings)
if err != nil {
return nil, errors.Wrap(err, "creating virtual schema holder")
Expand Down Expand Up @@ -538,7 +542,9 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
return bulk.MakeBulkAdder(ctx, db, cfg.distSender.RangeDescriptorCache(), cfg.Settings, ts, opts, bulkMon)
},

Metrics: &distSQLMetrics,
Metrics: &distSQLMetrics,
RowMetrics: &rowMetrics,
InternalRowMetrics: &internalRowMetrics,

SQLLivenessReader: cfg.sqlLivenessProvider,
JobRegistry: jobRegistry,
Expand Down Expand Up @@ -668,6 +674,8 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
),

QueryCache: querycache.New(cfg.QueryCacheSize),
RowMetrics: &rowMetrics,
InternalRowMetrics: &internalRowMetrics,
ProtectedTimestampProvider: cfg.protectedtsProvider,
ExternalIODirConfig: cfg.ExternalIODirConfig,
GCJobNotifier: gcJobNotifier,
Expand Down
26 changes: 19 additions & 7 deletions pkg/sql/backfill.go
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,7 @@ func TruncateInterleavedIndexes(
if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
rd := row.MakeDeleter(
codec, table, nil /* requestedCols */, &execCfg.Settings.SV, true, /* internal */
execCfg.GetRowMetrics(true /* internal */),
)
td := tableDeleter{rd: rd, alloc: alloc}
if err := td.init(ctx, txn, nil /* *tree.EvalContext */); err != nil {
Expand Down Expand Up @@ -883,7 +884,7 @@ func (sc *SchemaChanger) truncateIndexes(
}
rd := row.MakeDeleter(
sc.execCfg.Codec, tableDesc, nil /* requestedCols */, &sc.settings.SV,
true, /* internal */
true /* internal */, sc.execCfg.GetRowMetrics(true /* internal */),
)
td := tableDeleter{rd: rd, alloc: alloc}
if err := td.init(ctx, txn, nil /* *tree.EvalContext */); err != nil {
Expand Down Expand Up @@ -2044,7 +2045,10 @@ func runSchemaChangesInTxn(
return AlterColTypeInTxnNotSupportedErr
} else if col := m.AsColumn(); col != nil {
if !doneColumnBackfill && catalog.ColumnNeedsBackfill(col) {
if err := columnBackfillInTxn(ctx, planner.Txn(), planner.EvalContext(), planner.SemaCtx(), immutDesc, traceKV); err != nil {
if err := columnBackfillInTxn(
ctx, planner.Txn(), planner.ExecCfg(), planner.EvalContext(), planner.SemaCtx(),
immutDesc, traceKV,
); err != nil {
return err
}
doneColumnBackfill = true
Expand All @@ -2065,7 +2069,8 @@ func runSchemaChangesInTxn(
if col := m.AsColumn(); col != nil {
if !doneColumnBackfill && catalog.ColumnNeedsBackfill(col) {
if err := columnBackfillInTxn(
ctx, planner.Txn(), planner.EvalContext(), planner.SemaCtx(), immutDesc, traceKV,
ctx, planner.Txn(), planner.ExecCfg(), planner.EvalContext(), planner.SemaCtx(),
immutDesc, traceKV,
); err != nil {
return err
}
Expand Down Expand Up @@ -2399,6 +2404,7 @@ func validateUniqueWithoutIndexConstraintInTxn(
func columnBackfillInTxn(
ctx context.Context,
txn *kv.Txn,
execCfg *ExecutorConfig,
evalCtx *tree.EvalContext,
semaCtx *tree.SemaContext,
tableDesc catalog.TableDescriptor,
Expand All @@ -2414,8 +2420,11 @@ func columnBackfillInTxn(
columnBackfillerMon = execinfra.NewMonitor(ctx, evalCtx.Mon, "local-column-backfill-mon")
}

rowMetrics := execCfg.GetRowMetrics(evalCtx.SessionData().Internal)
var backfiller backfill.ColumnBackfiller
if err := backfiller.InitForLocalUse(ctx, evalCtx, semaCtx, tableDesc, columnBackfillerMon); err != nil {
if err := backfiller.InitForLocalUse(
ctx, evalCtx, semaCtx, tableDesc, columnBackfillerMon, rowMetrics,
); err != nil {
return err
}
defer backfiller.Close(ctx)
Expand Down Expand Up @@ -2453,7 +2462,9 @@ func indexBackfillInTxn(
}

var backfiller backfill.IndexBackfiller
if err := backfiller.InitForLocalUse(ctx, evalCtx, semaCtx, tableDesc, indexBackfillerMon); err != nil {
if err := backfiller.InitForLocalUse(
ctx, evalCtx, semaCtx, tableDesc, indexBackfillerMon,
); err != nil {
return err
}
defer backfiller.Close(ctx)
Expand Down Expand Up @@ -2485,9 +2496,10 @@ func indexTruncateInTxn(
alloc := &rowenc.DatumAlloc{}
var sp roachpb.Span
for done := false; !done; done = sp.Key == nil {
internal := evalCtx.SessionData().Internal
rd := row.MakeDeleter(
execCfg.Codec, tableDesc, nil /* requestedCols */, &execCfg.Settings.SV,
evalCtx.SessionData().Internal,
execCfg.Codec, tableDesc, nil /* requestedCols */, &execCfg.Settings.SV, internal,
execCfg.GetRowMetrics(internal),
)
td := tableDeleter{rd: rd, alloc: alloc}
if err := td.init(ctx, txn, evalCtx); err != nil {
Expand Down
11 changes: 9 additions & 2 deletions pkg/sql/backfill/backfill.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ type ColumnBackfiller struct {

// mon is a memory monitor linked with the ColumnBackfiller on creation.
mon *mon.BytesMonitor

rowMetrics *row.Metrics
}

// initCols is a helper to populate some column metadata on a ColumnBackfiller.
Expand All @@ -91,6 +93,7 @@ func (cb *ColumnBackfiller) init(
computedExprs []tree.TypedExpr,
desc catalog.TableDescriptor,
mon *mon.BytesMonitor,
rowMetrics *row.Metrics,
) error {
cb.evalCtx = evalCtx
cb.updateCols = append(cb.added, cb.dropped...)
Expand Down Expand Up @@ -130,6 +133,7 @@ func (cb *ColumnBackfiller) init(
return errors.AssertionFailedf("no memory monitor linked to ColumnBackfiller during init")
}
cb.mon = mon
cb.rowMetrics = rowMetrics

return cb.fetcher.Init(
evalCtx.Context,
Expand All @@ -154,6 +158,7 @@ func (cb *ColumnBackfiller) InitForLocalUse(
semaCtx *tree.SemaContext,
desc catalog.TableDescriptor,
mon *mon.BytesMonitor,
rowMetrics *row.Metrics,
) error {
cb.initCols(desc)
defaultExprs, err := schemaexpr.MakeDefaultExprs(
Expand All @@ -174,7 +179,7 @@ func (cb *ColumnBackfiller) InitForLocalUse(
if err != nil {
return err
}
return cb.init(evalCtx, defaultExprs, computedExprs, desc, mon)
return cb.init(evalCtx, defaultExprs, computedExprs, desc, mon, rowMetrics)
}

// InitForDistributedUse initializes a ColumnBackfiller for use as part of a
Expand Down Expand Up @@ -230,7 +235,8 @@ func (cb *ColumnBackfiller) InitForDistributedUse(
// entire backfill process.
flowCtx.TypeResolverFactory.Descriptors.ReleaseAll(ctx)

return cb.init(evalCtx, defaultExprs, computedExprs, desc, mon)
rowMetrics := flowCtx.GetRowMetrics()
return cb.init(evalCtx, defaultExprs, computedExprs, desc, mon, rowMetrics)
}

// Close frees the resources used by the ColumnBackfiller.
Expand Down Expand Up @@ -269,6 +275,7 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk(
&cb.alloc,
&cb.evalCtx.Settings.SV,
cb.evalCtx.SessionData().Internal,
cb.rowMetrics,
)
if err != nil {
return roachpb.Key{}, err
Expand Down
4 changes: 3 additions & 1 deletion pkg/sql/create_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,7 @@ func (n *createTableNode) startExec(params runParams) error {

// Instantiate a row inserter and table writer. It has a 1-1
// mapping to the definitions in the descriptor.
internal := params.p.SessionData().Internal
ri, err := row.MakeInserter(
params.ctx,
params.p.txn,
Expand All @@ -464,7 +465,8 @@ func (n *createTableNode) startExec(params runParams) error {
desc.PublicColumns(),
params.p.alloc,
&params.ExecCfg().Settings.SV,
params.p.SessionData().Internal,
internal,
params.ExecCfg().GetRowMetrics(internal),
)
if err != nil {
return err
Expand Down
7 changes: 5 additions & 2 deletions pkg/sql/event_log_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -693,8 +693,11 @@ func TestPerfLogging(t *testing.T) {

// Enable slow query logging and large row logging.
db.Exec(t, `SET CLUSTER SETTING sql.log.slow_query.latency_threshold = '128ms'`)
db.Exec(t, `SET CLUSTER SETTING sql.mutations.max_row_size.log = '1KiB'`)
db.Exec(t, `SET CLUSTER SETTING sql.mutations.max_row_size.err = '2KiB'`)
db.Exec(t, `SET CLUSTER SETTING sql.guardrails.max_row_size_log = '1KiB'`)
db.Exec(t, `SET CLUSTER SETTING sql.guardrails.max_row_size_err = '2KiB'`)
defer db.Exec(t, `SET CLUSTER SETTING sql.guardrails.max_row_size_err = DEFAULT`)
defer db.Exec(t, `SET CLUSTER SETTING sql.guardrails.max_row_size_log = DEFAULT`)
defer db.Exec(t, `SET CLUSTER SETTING sql.log.slow_query.latency_threshold = DEFAULT`)

// Test schema.
db.Exec(t, `CREATE TABLE t (i INT PRIMARY KEY, b BOOL, s STRING)`)
Expand Down
21 changes: 21 additions & 0 deletions pkg/sql/exec_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/physicalplan"
"github.com/cockroachdb/cockroach/pkg/sql/querycache"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
Expand Down Expand Up @@ -1051,6 +1052,8 @@ type ExecutorConfig struct {

SchemaChangerMetrics *SchemaChangerMetrics
FeatureFlagMetrics *featureflag.DenialMetrics
RowMetrics *row.Metrics
InternalRowMetrics *row.Metrics

TestingKnobs ExecutorTestingKnobs
MigrationTestingKnobs *migration.TestingKnobs
Expand Down Expand Up @@ -3002,3 +3005,21 @@ func DescsTxn(
) error {
return execCfg.CollectionFactory.Txn(ctx, execCfg.InternalExecutor, execCfg.DB, f)
}

// NewRowMetrics creates a row.Metrics struct for either internal or user
// queries.
func NewRowMetrics(internal bool) row.Metrics {
return row.Metrics{
MaxRowSizeLogCount: metric.NewCounter(getMetricMeta(row.MetaMaxRowSizeLog, internal)),
MaxRowSizeErrCount: metric.NewCounter(getMetricMeta(row.MetaMaxRowSizeErr, internal)),
}
}

// GetRowMetrics returns the proper RowMetrics for either internal or user
// queries.
func (cfg *ExecutorConfig) GetRowMetrics(internal bool) *row.Metrics {
if internal {
return cfg.InternalRowMetrics
}
return cfg.RowMetrics
}
Loading