From ca378d17ca0e3c9854411797179243b661ce1e97 Mon Sep 17 00:00:00 2001 From: YangKeao Date: Fri, 22 Mar 2024 17:47:15 +0800 Subject: [PATCH 1/2] This is an automated cherry-pick of #51996 Signed-off-by: ti-chi-bot --- pkg/executor/simple.go | 11 +++ pkg/metrics/grafana/tidb_summary.json | 22 ++++- pkg/metrics/grafana/tidb_summary.jsonnet | 14 ++- pkg/server/conn.go | 39 ++++---- pkg/server/conn_test.go | 4 + .../internal/testserverclient/BUILD.bazel | 2 + .../testserverclient/server_client.go | 66 +++++++++++++ pkg/server/server.go | 38 +++---- pkg/server/tests/commontest/BUILD.bazel | 50 ++++++++++ pkg/server/tests/tidb_test.go | 98 +++++++++++++++++++ 10 files changed, 296 insertions(+), 48 deletions(-) create mode 100644 pkg/server/tests/commontest/BUILD.bazel diff --git a/pkg/executor/simple.go b/pkg/executor/simple.go index 19e52a9f3ae16..3355feeceba0a 100644 --- a/pkg/executor/simple.go +++ b/pkg/executor/simple.go @@ -41,6 +41,11 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" +<<<<<<< HEAD +======= + "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/metrics" +>>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/model" @@ -2823,6 +2828,7 @@ func (e *SimpleExec) executeAdminFlushPlanCache(s *ast.AdminStmt) error { } func (e *SimpleExec) executeSetResourceGroupName(s *ast.SetResourceGroupStmt) error { + originalResourceGroup := e.Ctx().GetSessionVars().ResourceGroupName if s.Name.L != "" { if _, ok := e.is.ResourceGroupByName(s.Name); !ok { return infoschema.ErrResourceGroupNotExists.GenWithStackByArgs(s.Name.O) @@ -2831,6 +2837,11 @@ func (e *SimpleExec) executeSetResourceGroupName(s *ast.SetResourceGroupStmt) er } else { e.Ctx().GetSessionVars().ResourceGroupName = resourcegroup.DefaultResourceGroupName } + newResourceGroup := e.Ctx().GetSessionVars().ResourceGroupName + if originalResourceGroup != newResourceGroup { + metrics.ConnGauge.WithLabelValues(originalResourceGroup).Dec() + metrics.ConnGauge.WithLabelValues(newResourceGroup).Inc() + } return nil } diff --git a/pkg/metrics/grafana/tidb_summary.json b/pkg/metrics/grafana/tidb_summary.json index aa2611e772aab..102cebcec9ba8 100644 --- a/pkg/metrics/grafana/tidb_summary.json +++ b/pkg/metrics/grafana/tidb_summary.json @@ -163,7 +163,7 @@ "expr": "tidb_server_connections{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{instance}}", + "legendFormat": "{{instance}} {{resource_group}}", "refId": "A" }, { @@ -258,6 +258,16 @@ "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" +<<<<<<< HEAD +======= + }, + { + "expr": "tidb_server_maxprocs{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota-{{instance}}", + "refId": "B" +>>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) } ], "thresholds": [ ], @@ -351,6 +361,16 @@ "intervalFactor": 2, "legendFormat": "HeapInuse-{{instance}}", "refId": "B" +<<<<<<< HEAD +======= + }, + { + "expr": "tidb_server_memory_quota_bytes{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", job=\"tidb\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota-{{instance}}", + "refId": "C" +>>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) } ], "thresholds": [ ], diff --git a/pkg/metrics/grafana/tidb_summary.jsonnet b/pkg/metrics/grafana/tidb_summary.jsonnet index e11d6587e0429..e9b4374b61310 100644 --- a/pkg/metrics/grafana/tidb_summary.jsonnet +++ b/pkg/metrics/grafana/tidb_summary.jsonnet @@ -111,7 +111,7 @@ local connectionP = graphPanel.new( .addTarget( prometheus.target( 'tidb_server_connections{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance"}', - legendFormat='{{instance}}', + legendFormat='{{instance}} {{resource_group}}', ) ) .addTarget( @@ -133,6 +133,12 @@ local cpuP = graphPanel.new( 'rate(process_cpu_seconds_total{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"}[1m])', legendFormat='{{instance}}', ) +) +.addTarget( + prometheus.target( + 'tidb_server_maxprocs{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance"}', + legendFormat='quota-{{instance}}', + ) ); local memP = graphPanel.new( @@ -153,6 +159,12 @@ local memP = graphPanel.new( 'go_memory_classes_heap_objects_bytes{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"} + go_memory_classes_heap_unused_bytes{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"}', legendFormat='HeapInuse-{{instance}}', ) +) +.addTarget( + prometheus.target( + 'tidb_server_memory_quota_bytes{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"}', + legendFormat='quota-{{instance}}', + ) ); // Query Summary diff --git a/pkg/server/conn.go b/pkg/server/conn.go index e3fb01e3d03d1..c9fb261f379f6 100644 --- a/pkg/server/conn.go +++ b/pkg/server/conn.go @@ -337,28 +337,30 @@ func (cc *clientConn) handshake(ctx context.Context) error { } func (cc *clientConn) Close() error { + // Be careful, this function should be re-entrant. It might be called more than once for a single connection. + // Any logic which is not idempotent should be in closeConn() and wrapped with `cc.closeOnce.Do`, like decresing + // metrics, releasing resources, etc. + // + // TODO: avoid calling this function multiple times. It's not intuitive that a connection can be closed multiple + // times. cc.server.rwlock.Lock() delete(cc.server.clients, cc.connectionID) - resourceGroupName, count := "", 0 - if ctx := cc.getCtx(); ctx != nil { - resourceGroupName = ctx.GetSessionVars().ResourceGroupName - count = cc.server.ConnNumByResourceGroup[resourceGroupName] - if count <= 1 { - delete(cc.server.ConnNumByResourceGroup, resourceGroupName) - } else { - cc.server.ConnNumByResourceGroup[resourceGroupName]-- - } - } cc.server.rwlock.Unlock() - return closeConn(cc, resourceGroupName, count) + return closeConn(cc) } // closeConn is idempotent and thread-safe. // It will be called on the same `clientConn` more than once to avoid connection leak. +<<<<<<< HEAD func closeConn(cc *clientConn, resourceGroupName string, connections int) error { var err error cc.closeOnce.Do(func() { metrics.ConnGauge.WithLabelValues(resourceGroupName).Set(float64(connections)) +======= +func closeConn(cc *clientConn) error { + var err error + cc.closeOnce.Do(func() { +>>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) if cc.connectionID > 0 { cc.server.dom.ReleaseConnID(cc.connectionID) cc.connectionID = 0 @@ -372,8 +374,12 @@ func closeConn(cc *clientConn, resourceGroupName string, connections int) error } } // Close statements and session - // This will release advisory locks, row locks, etc. + // At first, it'll decrese the count of connections in the resource group, update the corresponding gauge. + // Then it'll close the statements and session, which release advisory locks, row locks, etc. if ctx := cc.getCtx(); ctx != nil { + resourceGroupName := ctx.GetSessionVars().ResourceGroupName + metrics.ConnGauge.WithLabelValues(resourceGroupName).Dec() + err = ctx.Close() } }) @@ -382,14 +388,7 @@ func closeConn(cc *clientConn, resourceGroupName string, connections int) error func (cc *clientConn) closeWithoutLock() error { delete(cc.server.clients, cc.connectionID) - name := cc.getCtx().GetSessionVars().ResourceGroupName - count := cc.server.ConnNumByResourceGroup[name] - if count <= 1 { - delete(cc.server.ConnNumByResourceGroup, name) - } else { - cc.server.ConnNumByResourceGroup[name]-- - } - return closeConn(cc, name, count-1) + return closeConn(cc) } // writeInitialHandshake sends server version, connection ID, server capability, collation, server status diff --git a/pkg/server/conn_test.go b/pkg/server/conn_test.go index c6f4de555083f..635da56400e1b 100644 --- a/pkg/server/conn_test.go +++ b/pkg/server/conn_test.go @@ -2036,7 +2036,11 @@ func TestCloseConn(t *testing.T) { for i := 0; i < numGoroutines; i++ { go func() { defer wg.Done() +<<<<<<< HEAD err := closeConn(cc, "default", 1) +======= + err := closeConn(cc) +>>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) require.NoError(t, err) }() } diff --git a/pkg/server/internal/testserverclient/BUILD.bazel b/pkg/server/internal/testserverclient/BUILD.bazel index b29f420f7e1f5..771a0a56843e4 100644 --- a/pkg/server/internal/testserverclient/BUILD.bazel +++ b/pkg/server/internal/testserverclient/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//pkg/config", "//pkg/errno", "//pkg/kv", + "//pkg/metrics", "//pkg/parser/mysql", "//pkg/server", "//pkg/testkit", @@ -18,6 +19,7 @@ go_library( "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_log//:log", + "@com_github_prometheus_client_model//go", "@com_github_stretchr_testify//require", "@org_uber_go_zap//:zap", ], diff --git a/pkg/server/internal/testserverclient/server_client.go b/pkg/server/internal/testserverclient/server_client.go index b09291db6d6dd..434c8f9568707 100644 --- a/pkg/server/internal/testserverclient/server_client.go +++ b/pkg/server/internal/testserverclient/server_client.go @@ -38,11 +38,13 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/metrics" tmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testenv" "github.com/pingcap/tidb/pkg/util/versioninfo" + dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" "go.uber.org/zap" ) @@ -2532,4 +2534,68 @@ func (cli *TestServerClient) RunTestStmtCountLimit(t *testing.T) { }) } +func (cli *TestServerClient) RunTestConnectionCount(t *testing.T) { + readConnCount := func(resourceGroupName string) float64 { + metric, err := metrics.ConnGauge.GetMetricWith(map[string]string{ + metrics.LblResourceGroup: resourceGroupName, + }) + require.NoError(t, err) + output := &dto.Metric{} + metric.Write(output) + + return output.GetGauge().GetValue() + } + + resourceGroupConnCountReached := func(t *testing.T, resourceGroupName string, expected float64) { + require.Eventually(t, func() bool { + return readConnCount(resourceGroupName) == expected + }, 5*time.Second, 100*time.Millisecond) + } + + cli.RunTests(t, nil, func(dbt *testkit.DBTestKit) { + ctx := context.Background() + dbt.GetDB().SetMaxIdleConns(0) + + // start 100 connections + conns := make([]*sql.Conn, 100) + for i := 0; i < 100; i++ { + conn, err := dbt.GetDB().Conn(ctx) + require.NoError(t, err) + conns[i] = conn + } + resourceGroupConnCountReached(t, "default", 100.0) + + // close 50 connections + for i := 0; i < 50; i++ { + err := conns[i].Close() + require.NoError(t, err) + } + resourceGroupConnCountReached(t, "default", 50.0) + + // close 25 connections + for i := 50; i < 75; i++ { + err := conns[i].Close() + require.NoError(t, err) + } + resourceGroupConnCountReached(t, "default", 25.0) + + // change the following 25 connections from `default` resource group to `test` + dbt.MustExec("create resource group test RU_PER_SEC = 1000;") + for i := 75; i < 100; i++ { + _, err := conns[i].ExecContext(ctx, "set resource group test") + require.NoError(t, err) + } + resourceGroupConnCountReached(t, "default", 0.0) + resourceGroupConnCountReached(t, "test", 25.0) + + // close 25 connections + for i := 75; i < 100; i++ { + err := conns[i].Close() + require.NoError(t, err) + } + resourceGroupConnCountReached(t, "default", 0.0) + resourceGroupConnCountReached(t, "test", 0.0) + }) +} + //revive:enable:exported diff --git a/pkg/server/server.go b/pkg/server/server.go index 55df85982c6bd..df1ce623f3638 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -119,9 +119,8 @@ type Server struct { socket net.Listener concurrentLimiter *TokenLimiter - rwlock sync.RWMutex - clients map[uint64]*clientConn - ConnNumByResourceGroup map[string]int + rwlock sync.RWMutex + clients map[uint64]*clientConn capability uint32 dom *domain.Domain @@ -239,15 +238,14 @@ func (s *Server) newConn(conn net.Conn) *clientConn { // NewServer creates a new Server. func NewServer(cfg *config.Config, driver IDriver) (*Server, error) { s := &Server{ - cfg: cfg, - driver: driver, - concurrentLimiter: NewTokenLimiter(cfg.TokenLimit), - clients: make(map[uint64]*clientConn), - ConnNumByResourceGroup: make(map[string]int), - internalSessions: make(map[any]struct{}, 100), - health: uatomic.NewBool(false), - inShutdownMode: uatomic.NewBool(false), - printMDLLogTime: time.Now(), + cfg: cfg, + driver: driver, + concurrentLimiter: NewTokenLimiter(cfg.TokenLimit), + clients: make(map[uint64]*clientConn), + internalSessions: make(map[any]struct{}, 100), + health: uatomic.NewBool(false), + inShutdownMode: uatomic.NewBool(false), + printMDLLogTime: time.Now(), } s.capability = defaultCapability setTxnScope() @@ -633,27 +631,15 @@ func (s *Server) Close() { func (s *Server) registerConn(conn *clientConn) bool { s.rwlock.Lock() defer s.rwlock.Unlock() - connections := make(map[string]int, 0) - for _, conn := range s.clients { - resourceGroup := conn.getCtx().GetSessionVars().ResourceGroupName - connections[resourceGroup]++ - } logger := logutil.BgLogger() if s.inShutdownMode.Load() { logger.Info("close connection directly when shutting down") - for resourceGroupName, count := range s.ConnNumByResourceGroup { - metrics.ConnGauge.WithLabelValues(resourceGroupName).Set(float64(count)) - } - terror.Log(closeConn(conn, "", 0)) + terror.Log(closeConn(conn)) return false } s.clients[conn.connectionID] = conn - s.ConnNumByResourceGroup[conn.getCtx().GetSessionVars().ResourceGroupName]++ - - for name, count := range s.ConnNumByResourceGroup { - metrics.ConnGauge.WithLabelValues(name).Set(float64(count)) - } + metrics.ConnGauge.WithLabelValues(conn.getCtx().GetSessionVars().ResourceGroupName).Inc() return true } diff --git a/pkg/server/tests/commontest/BUILD.bazel b/pkg/server/tests/commontest/BUILD.bazel new file mode 100644 index 0000000000000..d2b4d8ed82490 --- /dev/null +++ b/pkg/server/tests/commontest/BUILD.bazel @@ -0,0 +1,50 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "commontest_test", + timeout = "short", + srcs = [ + "main_test.go", + "tidb_test.go", + ], + flaky = True, + shard_count = 49, + deps = [ + "//pkg/config", + "//pkg/ddl/util", + "//pkg/extension", + "//pkg/metrics", + "//pkg/parser", + "//pkg/parser/ast", + "//pkg/parser/auth", + "//pkg/parser/mysql", + "//pkg/server", + "//pkg/server/internal/column", + "//pkg/server/internal/resultset", + "//pkg/server/internal/testserverclient", + "//pkg/server/internal/testutil", + "//pkg/server/internal/util", + "//pkg/server/tests/servertestkit", + "//pkg/session", + "//pkg/sessionctx/variable", + "//pkg/store/mockstore/unistore", + "//pkg/testkit", + "//pkg/testkit/testsetup", + "//pkg/util", + "//pkg/util/plancodec", + "//pkg/util/resourcegrouptag", + "//pkg/util/topsql", + "//pkg/util/topsql/collector", + "//pkg/util/topsql/collector/mock", + "//pkg/util/topsql/state", + "//pkg/util/topsql/stmtstats", + "@com_github_go_sql_driver_mysql//:mysql", + "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//tikv", + "@com_github_tikv_client_go_v2//tikvrpc", + "@io_opencensus_go//stats/view", + "@org_uber_go_goleak//:goleak", + ], +) diff --git a/pkg/server/tests/tidb_test.go b/pkg/server/tests/tidb_test.go index 299a14754587c..70924734d3122 100644 --- a/pkg/server/tests/tidb_test.go +++ b/pkg/server/tests/tidb_test.go @@ -3134,3 +3134,101 @@ func TestProxyProtocolWithIpNoFallbackable(t *testing.T) { require.NotNil(t, err) db.Close() } +<<<<<<< HEAD:pkg/server/tests/tidb_test.go +======= + +func TestConnectionWillNotLeak(t *testing.T) { + cfg := util2.NewTestConfig() + cfg.Port = 0 + cfg.Status.ReportStatus = false + // Setup proxy protocol config + cfg.ProxyProtocol.Networks = "*" + cfg.ProxyProtocol.Fallbackable = false + + ts := servertestkit.CreateTidbTestSuite(t) + + cli := testserverclient.NewTestServerClient() + cli.Port = testutil.GetPortFromTCPAddr(ts.Server.ListenAddr()) + dsn := cli.GetDSN(func(config *mysql.Config) { + config.User = "root" + config.DBName = "test" + }) + db, err := sql.Open("mysql", dsn) + require.Nil(t, err) + db.SetMaxOpenConns(100) + db.SetMaxIdleConns(0) + + // create 100 connections + conns := make([]*sql.Conn, 0, 100) + for len(conns) < 100 { + conn, err := db.Conn(context.Background()) + require.NoError(t, err) + conns = append(conns, conn) + } + require.Eventually(t, func() bool { + runtime.GC() + return server2.ConnectionInMemCounterForTest.Load() == int64(100) + }, time.Minute, time.Millisecond*100) + + // run a simple query on each connection and close it + // this cannot ensure the connection will not leak for any kinds of requests + var wg sync.WaitGroup + for _, conn := range conns { + wg.Add(1) + conn := conn + go func() { + rows, err := conn.QueryContext(context.Background(), "SELECT 2023") + require.NoError(t, err) + var result int + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&result)) + require.Equal(t, result, 2023) + require.NoError(t, rows.Close()) + // `db.Close` will not close already grabbed connection, so it's still needed to close the connection here. + require.NoError(t, conn.Close()) + wg.Done() + }() + } + wg.Wait() + + require.NoError(t, db.Close()) + require.Eventually(t, func() bool { + runtime.GC() + count := server2.ConnectionInMemCounterForTest.Load() + return count == 0 + }, time.Minute, time.Millisecond*100) +} + +func TestPrepareCount(t *testing.T) { + ts := servertestkit.CreateTidbTestSuite(t) + + qctx, err := ts.Tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil) + require.NoError(t, err) + prepareCnt := atomic.LoadInt64(&variable.PreparedStmtCount) + ctx := context.Background() + _, err = Execute(ctx, qctx, "use test;") + require.NoError(t, err) + _, err = Execute(ctx, qctx, "drop table if exists t1") + require.NoError(t, err) + _, err = Execute(ctx, qctx, "create table t1 (id int)") + require.NoError(t, err) + stmt, _, _, err := qctx.Prepare("insert into t1 values (?)") + require.NoError(t, err) + require.Equal(t, prepareCnt+1, atomic.LoadInt64(&variable.PreparedStmtCount)) + require.NoError(t, err) + err = qctx.GetStatement(stmt.ID()).Close() + require.NoError(t, err) + require.Equal(t, prepareCnt, atomic.LoadInt64(&variable.PreparedStmtCount)) + require.NoError(t, qctx.Close()) +} + +func TestSQLModeIsLoadedBeforeQuery(t *testing.T) { + ts := servertestkit.CreateTidbTestSuite(t) + ts.RunTestSQLModeIsLoadedBeforeQuery(t) +} + +func TestConnectionCount(t *testing.T) { + ts := servertestkit.CreateTidbTestSuite(t) + ts.RunTestConnectionCount(t) +} +>>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)):pkg/server/tests/commontest/tidb_test.go From 6590ea3d37bcf089ade5cdd503caafec47b999f0 Mon Sep 17 00:00:00 2001 From: Yang Keao Date: Tue, 21 May 2024 16:42:34 +0800 Subject: [PATCH 2/2] fix conflicts Signed-off-by: Yang Keao --- pkg/executor/simple.go | 4 - pkg/metrics/grafana/tidb_summary.json | 20 ---- pkg/metrics/grafana/tidb_summary.jsonnet | 12 --- pkg/server/conn.go | 7 -- pkg/server/conn_test.go | 4 - .../testserverclient/server_client.go | 1 + pkg/server/tests/commontest/BUILD.bazel | 50 ---------- pkg/server/tests/tidb_test.go | 93 ------------------- 8 files changed, 1 insertion(+), 190 deletions(-) delete mode 100644 pkg/server/tests/commontest/BUILD.bazel diff --git a/pkg/executor/simple.go b/pkg/executor/simple.go index 3355feeceba0a..e228515af9046 100644 --- a/pkg/executor/simple.go +++ b/pkg/executor/simple.go @@ -41,11 +41,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" -<<<<<<< HEAD -======= - "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/metrics" ->>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/model" diff --git a/pkg/metrics/grafana/tidb_summary.json b/pkg/metrics/grafana/tidb_summary.json index 102cebcec9ba8..085eec4916af8 100644 --- a/pkg/metrics/grafana/tidb_summary.json +++ b/pkg/metrics/grafana/tidb_summary.json @@ -258,16 +258,6 @@ "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" -<<<<<<< HEAD -======= - }, - { - "expr": "tidb_server_maxprocs{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "quota-{{instance}}", - "refId": "B" ->>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) } ], "thresholds": [ ], @@ -361,16 +351,6 @@ "intervalFactor": 2, "legendFormat": "HeapInuse-{{instance}}", "refId": "B" -<<<<<<< HEAD -======= - }, - { - "expr": "tidb_server_memory_quota_bytes{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", job=\"tidb\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "quota-{{instance}}", - "refId": "C" ->>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) } ], "thresholds": [ ], diff --git a/pkg/metrics/grafana/tidb_summary.jsonnet b/pkg/metrics/grafana/tidb_summary.jsonnet index e9b4374b61310..2d3e6c1d5fc38 100644 --- a/pkg/metrics/grafana/tidb_summary.jsonnet +++ b/pkg/metrics/grafana/tidb_summary.jsonnet @@ -133,12 +133,6 @@ local cpuP = graphPanel.new( 'rate(process_cpu_seconds_total{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"}[1m])', legendFormat='{{instance}}', ) -) -.addTarget( - prometheus.target( - 'tidb_server_maxprocs{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance"}', - legendFormat='quota-{{instance}}', - ) ); local memP = graphPanel.new( @@ -159,12 +153,6 @@ local memP = graphPanel.new( 'go_memory_classes_heap_objects_bytes{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"} + go_memory_classes_heap_unused_bytes{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"}', legendFormat='HeapInuse-{{instance}}', ) -) -.addTarget( - prometheus.target( - 'tidb_server_memory_quota_bytes{k8s_cluster="$k8s_cluster", tidb_cluster="$tidb_cluster", instance=~"$instance", job="tidb"}', - legendFormat='quota-{{instance}}', - ) ); // Query Summary diff --git a/pkg/server/conn.go b/pkg/server/conn.go index c9fb261f379f6..3f0f3c0fc7b2e 100644 --- a/pkg/server/conn.go +++ b/pkg/server/conn.go @@ -351,16 +351,9 @@ func (cc *clientConn) Close() error { // closeConn is idempotent and thread-safe. // It will be called on the same `clientConn` more than once to avoid connection leak. -<<<<<<< HEAD -func closeConn(cc *clientConn, resourceGroupName string, connections int) error { - var err error - cc.closeOnce.Do(func() { - metrics.ConnGauge.WithLabelValues(resourceGroupName).Set(float64(connections)) -======= func closeConn(cc *clientConn) error { var err error cc.closeOnce.Do(func() { ->>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) if cc.connectionID > 0 { cc.server.dom.ReleaseConnID(cc.connectionID) cc.connectionID = 0 diff --git a/pkg/server/conn_test.go b/pkg/server/conn_test.go index 635da56400e1b..8aa40dfcf52b6 100644 --- a/pkg/server/conn_test.go +++ b/pkg/server/conn_test.go @@ -2036,11 +2036,7 @@ func TestCloseConn(t *testing.T) { for i := 0; i < numGoroutines; i++ { go func() { defer wg.Done() -<<<<<<< HEAD - err := closeConn(cc, "default", 1) -======= err := closeConn(cc) ->>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)) require.NoError(t, err) }() } diff --git a/pkg/server/internal/testserverclient/server_client.go b/pkg/server/internal/testserverclient/server_client.go index 434c8f9568707..8dd9a985edc42 100644 --- a/pkg/server/internal/testserverclient/server_client.go +++ b/pkg/server/internal/testserverclient/server_client.go @@ -16,6 +16,7 @@ package testserverclient import ( "bytes" + "context" "database/sql" "encoding/json" "fmt" diff --git a/pkg/server/tests/commontest/BUILD.bazel b/pkg/server/tests/commontest/BUILD.bazel deleted file mode 100644 index d2b4d8ed82490..0000000000000 --- a/pkg/server/tests/commontest/BUILD.bazel +++ /dev/null @@ -1,50 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_test") - -go_test( - name = "commontest_test", - timeout = "short", - srcs = [ - "main_test.go", - "tidb_test.go", - ], - flaky = True, - shard_count = 49, - deps = [ - "//pkg/config", - "//pkg/ddl/util", - "//pkg/extension", - "//pkg/metrics", - "//pkg/parser", - "//pkg/parser/ast", - "//pkg/parser/auth", - "//pkg/parser/mysql", - "//pkg/server", - "//pkg/server/internal/column", - "//pkg/server/internal/resultset", - "//pkg/server/internal/testserverclient", - "//pkg/server/internal/testutil", - "//pkg/server/internal/util", - "//pkg/server/tests/servertestkit", - "//pkg/session", - "//pkg/sessionctx/variable", - "//pkg/store/mockstore/unistore", - "//pkg/testkit", - "//pkg/testkit/testsetup", - "//pkg/util", - "//pkg/util/plancodec", - "//pkg/util/resourcegrouptag", - "//pkg/util/topsql", - "//pkg/util/topsql/collector", - "//pkg/util/topsql/collector/mock", - "//pkg/util/topsql/state", - "//pkg/util/topsql/stmtstats", - "@com_github_go_sql_driver_mysql//:mysql", - "@com_github_pingcap_errors//:errors", - "@com_github_pingcap_failpoint//:failpoint", - "@com_github_stretchr_testify//require", - "@com_github_tikv_client_go_v2//tikv", - "@com_github_tikv_client_go_v2//tikvrpc", - "@io_opencensus_go//stats/view", - "@org_uber_go_goleak//:goleak", - ], -) diff --git a/pkg/server/tests/tidb_test.go b/pkg/server/tests/tidb_test.go index 70924734d3122..5f672cac0cab0 100644 --- a/pkg/server/tests/tidb_test.go +++ b/pkg/server/tests/tidb_test.go @@ -3134,101 +3134,8 @@ func TestProxyProtocolWithIpNoFallbackable(t *testing.T) { require.NotNil(t, err) db.Close() } -<<<<<<< HEAD:pkg/server/tests/tidb_test.go -======= - -func TestConnectionWillNotLeak(t *testing.T) { - cfg := util2.NewTestConfig() - cfg.Port = 0 - cfg.Status.ReportStatus = false - // Setup proxy protocol config - cfg.ProxyProtocol.Networks = "*" - cfg.ProxyProtocol.Fallbackable = false - - ts := servertestkit.CreateTidbTestSuite(t) - - cli := testserverclient.NewTestServerClient() - cli.Port = testutil.GetPortFromTCPAddr(ts.Server.ListenAddr()) - dsn := cli.GetDSN(func(config *mysql.Config) { - config.User = "root" - config.DBName = "test" - }) - db, err := sql.Open("mysql", dsn) - require.Nil(t, err) - db.SetMaxOpenConns(100) - db.SetMaxIdleConns(0) - - // create 100 connections - conns := make([]*sql.Conn, 0, 100) - for len(conns) < 100 { - conn, err := db.Conn(context.Background()) - require.NoError(t, err) - conns = append(conns, conn) - } - require.Eventually(t, func() bool { - runtime.GC() - return server2.ConnectionInMemCounterForTest.Load() == int64(100) - }, time.Minute, time.Millisecond*100) - - // run a simple query on each connection and close it - // this cannot ensure the connection will not leak for any kinds of requests - var wg sync.WaitGroup - for _, conn := range conns { - wg.Add(1) - conn := conn - go func() { - rows, err := conn.QueryContext(context.Background(), "SELECT 2023") - require.NoError(t, err) - var result int - require.True(t, rows.Next()) - require.NoError(t, rows.Scan(&result)) - require.Equal(t, result, 2023) - require.NoError(t, rows.Close()) - // `db.Close` will not close already grabbed connection, so it's still needed to close the connection here. - require.NoError(t, conn.Close()) - wg.Done() - }() - } - wg.Wait() - - require.NoError(t, db.Close()) - require.Eventually(t, func() bool { - runtime.GC() - count := server2.ConnectionInMemCounterForTest.Load() - return count == 0 - }, time.Minute, time.Millisecond*100) -} - -func TestPrepareCount(t *testing.T) { - ts := servertestkit.CreateTidbTestSuite(t) - - qctx, err := ts.Tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil) - require.NoError(t, err) - prepareCnt := atomic.LoadInt64(&variable.PreparedStmtCount) - ctx := context.Background() - _, err = Execute(ctx, qctx, "use test;") - require.NoError(t, err) - _, err = Execute(ctx, qctx, "drop table if exists t1") - require.NoError(t, err) - _, err = Execute(ctx, qctx, "create table t1 (id int)") - require.NoError(t, err) - stmt, _, _, err := qctx.Prepare("insert into t1 values (?)") - require.NoError(t, err) - require.Equal(t, prepareCnt+1, atomic.LoadInt64(&variable.PreparedStmtCount)) - require.NoError(t, err) - err = qctx.GetStatement(stmt.ID()).Close() - require.NoError(t, err) - require.Equal(t, prepareCnt, atomic.LoadInt64(&variable.PreparedStmtCount)) - require.NoError(t, qctx.Close()) -} - -func TestSQLModeIsLoadedBeforeQuery(t *testing.T) { - ts := servertestkit.CreateTidbTestSuite(t) - ts.RunTestSQLModeIsLoadedBeforeQuery(t) -} func TestConnectionCount(t *testing.T) { ts := servertestkit.CreateTidbTestSuite(t) ts.RunTestConnectionCount(t) } ->>>>>>> 07ef0094860 (server, metrics: remove the connection count on server, only use the metrics (#51996)):pkg/server/tests/commontest/tidb_test.go