Skip to content

Commit

Permalink
bucket: migrate test framework to testify (tikv#5195)
Browse files Browse the repository at this point in the history
ref tikv#4813

Signed-off-by: lhy1024 <[email protected]>

Co-authored-by: Ti Chi Robot <[email protected]>
  • Loading branch information
2 people authored and CabinfeverB committed Jul 14, 2022
1 parent 733e2e0 commit 85f79bf
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 72 deletions.
83 changes: 40 additions & 43 deletions server/statistics/buckets/hot_bucket_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,12 @@ import (
"context"
"testing"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/stretchr/testify/require"
)

func Test(t *testing.T) {
TestingT(t)
}

var _ = Suite(&testHotBucketCache{})

type testHotBucketCache struct{}

func (t *testHotBucketCache) TestPutItem(c *C) {
func TestPutItem(t *testing.T) {
re := require.New(t)
cache := NewBucketsCache(context.Background())
testdata := []struct {
regionID uint64
Expand Down Expand Up @@ -90,17 +83,18 @@ func (t *testHotBucketCache) TestPutItem(c *C) {
}}
for _, v := range testdata {
bucket := convertToBucketTreeItem(newTestBuckets(v.regionID, v.version, v.keys, 10))
c.Assert(bucket.GetStartKey(), BytesEquals, v.keys[0])
c.Assert(bucket.GetEndKey(), BytesEquals, v.keys[len(v.keys)-1])
re.Equal(v.keys[0], bucket.GetStartKey())
re.Equal(v.keys[len(v.keys)-1], bucket.GetEndKey())
cache.putItem(bucket, cache.getBucketsByKeyRange(bucket.GetStartKey(), bucket.GetEndKey()))
c.Assert(cache.bucketsOfRegion, HasLen, v.regionCount)
c.Assert(cache.tree.Len(), Equals, v.treeLen)
c.Assert(cache.bucketsOfRegion[v.regionID], NotNil)
c.Assert(cache.getBucketsByKeyRange([]byte("10"), nil), NotNil)
re.Len(cache.bucketsOfRegion, v.regionCount)
re.Equal(v.treeLen, cache.tree.Len())
re.NotNil(cache.bucketsOfRegion[v.regionID])
re.NotNil(cache.getBucketsByKeyRange([]byte("10"), nil))
}
}

func (t *testHotBucketCache) TestConvertToBucketTreeStat(c *C) {
func TestConvertToBucketTreeStat(t *testing.T) {
re := require.New(t)
buckets := &metapb.Buckets{
RegionId: 1,
Version: 0,
Expand All @@ -116,30 +110,32 @@ func (t *testHotBucketCache) TestConvertToBucketTreeStat(c *C) {
PeriodInMs: 1000,
}
item := convertToBucketTreeItem(buckets)
c.Assert(item.startKey, BytesEquals, []byte{'1'})
c.Assert(item.endKey, BytesEquals, []byte{'5'})
c.Assert(item.regionID, Equals, uint64(1))
c.Assert(item.version, Equals, uint64(0))
c.Assert(item.stats, HasLen, 4)
re.Equal([]byte{'1'}, item.startKey)
re.Equal([]byte{'5'}, item.endKey)
re.Equal(uint64(1), item.regionID)
re.Equal(uint64(0), item.version)
re.Len(item.stats, 4)
}

func (t *testHotBucketCache) TestGetBucketsByKeyRange(c *C) {
func TestGetBucketsByKeyRange(t *testing.T) {
re := require.New(t)
cache := NewBucketsCache(context.Background())
bucket1 := newTestBuckets(1, 1, [][]byte{[]byte(""), []byte("015")}, 0)
bucket2 := newTestBuckets(2, 1, [][]byte{[]byte("015"), []byte("020")}, 0)
bucket3 := newTestBuckets(3, 1, [][]byte{[]byte("020"), []byte("")}, 0)
cache.putItem(cache.checkBucketsFlow(bucket1))
cache.putItem(cache.checkBucketsFlow(bucket2))
cache.putItem(cache.checkBucketsFlow(bucket3))
c.Assert(cache.getBucketsByKeyRange([]byte(""), []byte("100")), HasLen, 3)
c.Assert(cache.getBucketsByKeyRange([]byte("030"), []byte("100")), HasLen, 1)
c.Assert(cache.getBucketsByKeyRange([]byte("010"), []byte("030")), HasLen, 3)
c.Assert(cache.getBucketsByKeyRange([]byte("015"), []byte("020")), HasLen, 1)
c.Assert(cache.getBucketsByKeyRange([]byte("001"), []byte("")), HasLen, 3)
c.Assert(cache.bucketsOfRegion, HasLen, 3)
re.Len(cache.getBucketsByKeyRange([]byte(""), []byte("100")), 3)
re.Len(cache.getBucketsByKeyRange([]byte("030"), []byte("100")), 1)
re.Len(cache.getBucketsByKeyRange([]byte("010"), []byte("030")), 3)
re.Len(cache.getBucketsByKeyRange([]byte("015"), []byte("020")), 1)
re.Len(cache.getBucketsByKeyRange([]byte("001"), []byte("")), 3)
re.Len(cache.bucketsOfRegion, 3)
}

func (t *testHotBucketCache) TestInherit(c *C) {
func TestInherit(t *testing.T) {
re := require.New(t)
originBucketItem := convertToBucketTreeItem(newTestBuckets(1, 1, [][]byte{[]byte(""), []byte("20"), []byte("50"), []byte("")}, 0))
originBucketItem.stats[0].HotDegree = 3
originBucketItem.stats[1].HotDegree = 2
Expand Down Expand Up @@ -173,15 +169,15 @@ func (t *testHotBucketCache) TestInherit(c *C) {
for _, v := range testdata {
buckets := convertToBucketTreeItem(v.buckets)
buckets.inherit([]*BucketTreeItem{originBucketItem})
c.Assert(buckets.stats, HasLen, len(v.expect))
re.Len(buckets.stats, len(v.expect))
for k, v := range v.expect {
c.Assert(buckets.stats[k].HotDegree, Equals, v)
re.Equal(v, buckets.stats[k].HotDegree)
}
}
}

func (t *testHotBucketCache) TestBucketTreeItemClone(c *C) {
// bucket range: [010,020][020,100]
func TestBucketTreeItemClone(t *testing.T) {
re := require.New(t)
origin := convertToBucketTreeItem(newTestBuckets(1, 1, [][]byte{[]byte("010"), []byte("020"), []byte("100")}, uint64(0)))
testdata := []struct {
startKey []byte
Expand Down Expand Up @@ -221,30 +217,31 @@ func (t *testHotBucketCache) TestBucketTreeItemClone(c *C) {
}}
for _, v := range testdata {
copy := origin.cloneBucketItemByRange(v.startKey, v.endKey)
c.Assert(copy.startKey, BytesEquals, v.startKey)
c.Assert(copy.endKey, BytesEquals, v.endKey)
c.Assert(copy.stats, HasLen, v.count)
re.Equal(v.startKey, copy.startKey)
re.Equal(v.endKey, copy.endKey)
re.Len(copy.stats, v.count)
if v.count > 0 && v.strict {
c.Assert(copy.stats[0].StartKey, BytesEquals, v.startKey)
c.Assert(copy.stats[len(copy.stats)-1].EndKey, BytesEquals, v.endKey)
re.Equal(v.startKey, copy.stats[0].StartKey)
re.Equal(v.endKey, copy.stats[len(copy.stats)-1].EndKey)
}
}
}

func (t *testHotBucketCache) TestCalculateHotDegree(c *C) {
func TestCalculateHotDegree(t *testing.T) {
re := require.New(t)
origin := convertToBucketTreeItem(newTestBuckets(1, 1, [][]byte{[]byte("010"), []byte("100")}, uint64(0)))
origin.calculateHotDegree()
c.Assert(origin.stats[0].HotDegree, Equals, -1)
re.Equal(-1, origin.stats[0].HotDegree)

// case1: the dimension of read will be hot
origin.stats[0].Loads = []uint64{minHotThresholds[0] + 1, minHotThresholds[1] + 1, 0, 0, 0, 0}
origin.calculateHotDegree()
c.Assert(origin.stats[0].HotDegree, Equals, 0)
re.Equal(0, origin.stats[0].HotDegree)

// case1: the dimension of write will be hot
origin.stats[0].Loads = []uint64{0, 0, 0, minHotThresholds[3] + 1, minHotThresholds[4] + 1, 0}
origin.calculateHotDegree()
c.Assert(origin.stats[0].HotDegree, Equals, 1)
re.Equal(1, origin.stats[0].HotDegree)
}

func newTestBuckets(regionID uint64, version uint64, keys [][]byte, flow uint64) *metapb.Buckets {
Expand Down
58 changes: 29 additions & 29 deletions server/statistics/buckets/hot_bucket_task_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,21 @@ import (
"context"
"math"
"strconv"
"testing"
"time"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/stretchr/testify/require"
)

var _ = Suite(&testHotBucketTaskCache{})

type testHotBucketTaskCache struct {
}

func getAllBucketStats(ctx context.Context, hotCache *HotBucketCache) map[uint64][]*BucketStat {
task := NewCollectBucketStatsTask(minHotDegree)
hotCache.CheckAsync(task)
return task.WaitRet(ctx)
}

func (s *testHotBucketTaskCache) TestColdHot(c *C) {
func TestColdHot(t *testing.T) {
re := require.New(t)
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
hotCache := NewBucketsCache(ctx)
Expand All @@ -52,60 +49,63 @@ func (s *testHotBucketTaskCache) TestColdHot(c *C) {
for _, v := range testdata {
for i := 0; i < 20; i++ {
task := NewCheckPeerTask(v.buckets)
c.Assert(hotCache.CheckAsync(task), IsTrue)
re.True(hotCache.CheckAsync(task))
hotBuckets := getAllBucketStats(ctx, hotCache)
time.Sleep(time.Millisecond * 10)
item := hotBuckets[v.buckets.RegionId]
c.Assert(item, NotNil)
re.NotNil(item)
if v.isHot {
c.Assert(item[0].HotDegree, Equals, i+1)
re.Equal(i+1, item[0].HotDegree)
} else {
c.Assert(item[0].HotDegree, Equals, -i-1)
re.Equal(-i-1, item[0].HotDegree)
}
}
}
}

func (s *testHotBucketTaskCache) TestCheckBucketsTask(c *C) {
func TestCheckBucketsTask(t *testing.T) {
re := require.New(t)
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
hotCache := NewBucketsCache(ctx)
// case1: add bucket successfully
buckets := newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20"), []byte("30")}, 0)
task := NewCheckPeerTask(buckets)
c.Assert(hotCache.CheckAsync(task), IsTrue)
re.True(hotCache.CheckAsync(task))
time.Sleep(time.Millisecond * 10)

hotBuckets := getAllBucketStats(ctx, hotCache)
c.Assert(hotBuckets, HasLen, 1)
re.Len(hotBuckets, 1)
item := hotBuckets[uint64(1)]
c.Assert(item, NotNil)
c.Assert(item, HasLen, 2)
c.Assert(item[0].HotDegree, Equals, -1)
c.Assert(item[1].HotDegree, Equals, -1)
re.NotNil(item)

re.Len(item, 2)
re.Equal(-1, item[0].HotDegree)
re.Equal(-1, item[1].HotDegree)

// case2: add bucket successful and the hot degree should inherit from the old one.
buckets = newTestBuckets(2, 1, [][]byte{[]byte("20"), []byte("30")}, 0)
task = NewCheckPeerTask(buckets)
c.Assert(hotCache.CheckAsync(task), IsTrue)
re.True(hotCache.CheckAsync(task))
hotBuckets = getAllBucketStats(ctx, hotCache)
time.Sleep(time.Millisecond * 10)
item = hotBuckets[uint64(2)]
c.Assert(item, HasLen, 1)
c.Assert(item[0].HotDegree, Equals, -2)
re.Len(item, 1)
re.Equal(-2, item[0].HotDegree)

// case3:add bucket successful and the hot degree should inherit from the old one.
buckets = newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20")}, 0)
task = NewCheckPeerTask(buckets)
c.Assert(hotCache.CheckAsync(task), IsTrue)
re.True(hotCache.CheckAsync(task))
hotBuckets = getAllBucketStats(ctx, hotCache)
time.Sleep(time.Millisecond * 10)
item = hotBuckets[uint64(1)]
c.Assert(item, HasLen, 1)
c.Assert(item[0].HotDegree, Equals, -2)
re.Len(item, 1)
re.Equal(-2, item[0].HotDegree)
}

func (s *testHotBucketTaskCache) TestCollectBucketStatsTask(c *C) {
func TestCollectBucketStatsTask(t *testing.T) {
re := require.New(t)
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
hotCache := NewBucketsCache(ctx)
Expand All @@ -117,11 +117,11 @@ func (s *testHotBucketTaskCache) TestCollectBucketStatsTask(c *C) {
}
time.Sleep(time.Millisecond * 10)
task := NewCollectBucketStatsTask(-100)
c.Assert(hotCache.CheckAsync(task), IsTrue)
re.True(hotCache.CheckAsync(task))
stats := task.WaitRet(ctx)
c.Assert(stats, HasLen, 10)
re.Len(stats, 10)
task = NewCollectBucketStatsTask(1)
c.Assert(hotCache.CheckAsync(task), IsTrue)
re.True(hotCache.CheckAsync(task))
stats = task.WaitRet(ctx)
c.Assert(stats, HasLen, 0)
re.Len(stats, 0)
}

0 comments on commit 85f79bf

Please sign in to comment.