From d6b7847c7ac50c65761b289ba0f91dce9e0a01c9 Mon Sep 17 00:00:00 2001 From: husharp Date: Sat, 6 May 2023 10:25:54 +0800 Subject: [PATCH 1/3] update Signed-off-by: husharp --- executor/calibrate_resource.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/executor/calibrate_resource.go b/executor/calibrate_resource.go index 1a6b94aa3cda8..92bc689195ef8 100644 --- a/executor/calibrate_resource.go +++ b/executor/calibrate_resource.go @@ -45,26 +45,26 @@ var ( workloadBaseRUCostMap = map[ast.CalibrateResourceType]*baseResourceCost{ ast.TPCC: { tidbCPU: 0.6, - kvCPU: 0.15, - readBytes: units.MiB / 2, - writeBytes: units.MiB, - readReqCount: 300, - writeReqCount: 1750, + kvCPU: 500, + readBytes: units.MiB * 4, + writeBytes: units.MiB * 1.25, + readReqCount: 350, + writeReqCount: 1465, }, ast.OLTPREADWRITE: { - tidbCPU: 1.25, - kvCPU: 0.35, - readBytes: units.MiB * 4.25, + tidbCPU: 1.1, + kvCPU: 400, + readBytes: units.MiB * 8.5, writeBytes: units.MiB / 3, - readReqCount: 1600, - writeReqCount: 1400, + readReqCount: 1365, + writeReqCount: 1430, }, ast.OLTPREADONLY: { - tidbCPU: 2, - kvCPU: 0.52, - readBytes: units.MiB * 28, + tidbCPU: 1.3, + kvCPU: 500, + readBytes: units.MiB * 20.5, writeBytes: 0, - readReqCount: 4500, + readReqCount: 3350, writeReqCount: 0, }, ast.OLTPWRITEONLY: { @@ -243,7 +243,7 @@ func (e *calibrateResourceExec) dynamicCalibrate(ctx context.Context, req *chunk } tikvQuota, tidbQuota := tikvCPUs.getValue()/totalKVCPUQuota, tidbCPUs.getValue()/totalTiDBCPU // If one of the two cpu usage is greater than the `valuableUsageThreshold`, we can accept it. - // And if both are greater than the `lowUsageThreshold`, we can also accpet it. + // And if both are greater than the `lowUsageThreshold`, we can also accept it. if tikvQuota > valuableUsageThreshold || tidbQuota > valuableUsageThreshold { quotas = append(quotas, rus.getValue()/mathutil.Max(tikvQuota, tidbQuota)) } else if tikvQuota < lowUsageThreshold || tidbQuota < lowUsageThreshold { @@ -262,7 +262,7 @@ func (e *calibrateResourceExec) dynamicCalibrate(ctx context.Context, req *chunk sort.Slice(quotas, func(i, j int) bool { return quotas[i] > quotas[j] }) - lowerBound := int(math.Round(float64(len(quotas)) * float64(discardRate))) + lowerBound := int(math.Round(float64(len(quotas)) * discardRate)) upperBound := len(quotas) - lowerBound sum := 0. for i := lowerBound; i < upperBound; i++ { From 7dd81dfb3960ac90a7e46ee58010657315b985b5 Mon Sep 17 00:00:00 2001 From: husharp Date: Sat, 6 May 2023 10:59:31 +0800 Subject: [PATCH 2/3] update Signed-off-by: husharp --- executor/calibrate_resource.go | 60 +++++++++++++++++----------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/executor/calibrate_resource.go b/executor/calibrate_resource.go index 92bc689195ef8..5b19768eb1033 100644 --- a/executor/calibrate_resource.go +++ b/executor/calibrate_resource.go @@ -44,36 +44,36 @@ var ( // the workload. workloadBaseRUCostMap = map[ast.CalibrateResourceType]*baseResourceCost{ ast.TPCC: { - tidbCPU: 0.6, - kvCPU: 500, - readBytes: units.MiB * 4, - writeBytes: units.MiB * 1.25, - readReqCount: 350, - writeReqCount: 1465, + tidbToKVCPURatio: 0.6, + kvCPU: 0.5, + readBytes: units.MiB * 4, + writeBytes: units.MiB * 1.25, + readReqCount: 350, + writeReqCount: 1465, }, ast.OLTPREADWRITE: { - tidbCPU: 1.1, - kvCPU: 400, - readBytes: units.MiB * 8.5, - writeBytes: units.MiB / 3, - readReqCount: 1365, - writeReqCount: 1430, + tidbToKVCPURatio: 1.1, + kvCPU: 0.4, + readBytes: units.MiB * 8.5, + writeBytes: units.MiB / 3, + readReqCount: 1365, + writeReqCount: 1430, }, ast.OLTPREADONLY: { - tidbCPU: 1.3, - kvCPU: 500, - readBytes: units.MiB * 20.5, - writeBytes: 0, - readReqCount: 3350, - writeReqCount: 0, + tidbToKVCPURatio: 1.3, + kvCPU: 0.5, + readBytes: units.MiB * 20.5, + writeBytes: 0, + readReqCount: 3350, + writeReqCount: 0, }, ast.OLTPWRITEONLY: { - tidbCPU: 1, - kvCPU: 0, - readBytes: 0, - writeBytes: units.MiB, - readReqCount: 0, - writeReqCount: 3550, + tidbToKVCPURatio: 1, + kvCPU: 0, + readBytes: 0, + writeBytes: units.MiB, + readReqCount: 0, + writeReqCount: 3550, }, } @@ -93,10 +93,10 @@ func GetResourceGroupController() *rmclient.ResourceGroupsController { // the resource cost rate of a specified workload per 1 tikv cpu. type baseResourceCost struct { - // the average tikv cpu time, this is used to calculate whether tikv cpu + // represents the average ratio of TiDB CPU time to TiKV CPU time, this is used to calculate whether tikv cpu // or tidb cpu is the performance bottle neck. - tidbCPU float64 - // the kv CPU time for calculate RU, it's smaller than the actual cpu usage. + tidbToKVCPURatio float64 + // the kv CPU time for calculate RU, it's smaller than the actual cpu usage. The unit is seconds. kvCPU float64 // the read bytes rate per 1 tikv cpu. readBytes uint64 @@ -303,12 +303,12 @@ func (e *calibrateResourceExec) staticCalibrate(ctx context.Context, req *chunk. return errors.Errorf("unknown workload '%T'", e.workloadType) } - if totalTiDBCPU/baseCost.tidbCPU < totalKVCPUQuota { - totalKVCPUQuota = totalTiDBCPU / baseCost.tidbCPU + if totalTiDBCPU/baseCost.tidbToKVCPURatio < totalKVCPUQuota { + totalKVCPUQuota = totalTiDBCPU / baseCost.tidbToKVCPURatio } ruCfg := resourceGroupCtl.GetConfig() ruPerKVCPU := float64(ruCfg.ReadBaseCost)*float64(baseCost.readReqCount) + - float64(ruCfg.CPUMsCost)*baseCost.kvCPU + + float64(ruCfg.CPUMsCost)*baseCost.kvCPU*1000 + float64(ruCfg.ReadBytesCost)*float64(baseCost.readBytes) + float64(ruCfg.WriteBaseCost)*float64(baseCost.writeReqCount) + float64(ruCfg.WriteBytesCost)*float64(baseCost.writeBytes) From eef0191665854bb3c9544d9ffddeb9d80a1f4210 Mon Sep 17 00:00:00 2001 From: husharp Date: Mon, 8 May 2023 18:01:07 +0800 Subject: [PATCH 3/3] update test Signed-off-by: husharp --- executor/calibrate_resource_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/executor/calibrate_resource_test.go b/executor/calibrate_resource_test.go index 68a4557d751b7..a66a35f9a9c01 100644 --- a/executor/calibrate_resource_test.go +++ b/executor/calibrate_resource_test.go @@ -113,17 +113,17 @@ func TestCalibrateResource(t *testing.T) { ctx = failpoint.WithHook(ctx, func(_ context.Context, fpname string) bool { return fpName == fpname }) - tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE").Check(testkit.Rows("68569")) - tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD TPCC").Check(testkit.Rows("68569")) - tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD OLTP_READ_WRITE").Check(testkit.Rows("53026")) - tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD OLTP_READ_ONLY").Check(testkit.Rows("31463")) + tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE").Check(testkit.Rows("73516")) + tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD TPCC").Check(testkit.Rows("73516")) + tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD OLTP_READ_WRITE").Check(testkit.Rows("57165")) + tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD OLTP_READ_ONLY").Check(testkit.Rows("31971")) tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE WORKLOAD OLTP_WRITE_ONLY").Check(testkit.Rows("109776")) // change total tidb cpu to less than tikv_cpu_quota mockData["tidb_server_maxprocs"] = [][]types.Datum{ types.MakeDatums(datetime("2020-02-12 10:35:00"), "tidb-0", 8.0), } - tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE").Check(testkit.Rows("38094")) + tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE").Check(testkit.Rows("40842")) // construct data for dynamic calibrate ru1 := [][]types.Datum{