Skip to content

Commit

Permalink
*: add test for syncer (#8873) (#8875)
Browse files Browse the repository at this point in the history
ref #8823

Signed-off-by: Ryan Leung <[email protected]>
  • Loading branch information
rleungx authored Dec 6, 2024
1 parent 0035391 commit 81a0619
Show file tree
Hide file tree
Showing 4 changed files with 165 additions and 56 deletions.
88 changes: 88 additions & 0 deletions pkg/mock/mockserver/mockserver.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
// Copyright 2024 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package mockserver

import (
"context"

"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/tikv/pd/pkg/core"
"github.com/tikv/pd/pkg/storage"
"github.com/tikv/pd/pkg/utils/grpcutil"
)

// MockServer is used to mock Server for test use.
type MockServer struct {
ctx context.Context
member, leader *pdpb.Member
storage storage.Storage
bc *core.BasicCluster
}

// NewMockServer creates a new MockServer.
func NewMockServer(ctx context.Context, member, leader *pdpb.Member, storage storage.Storage, bc *core.BasicCluster) *MockServer {
return &MockServer{
ctx: ctx,
member: member,
leader: leader,
storage: storage,
bc: bc,
}
}

// LoopContext returns the context of the server.
func (s *MockServer) LoopContext() context.Context {
return s.ctx
}

// ClusterID returns the cluster ID of the server.
func (*MockServer) ClusterID() uint64 {
return 1
}

// GetMemberInfo returns the member info of the server.
func (s *MockServer) GetMemberInfo() *pdpb.Member {
return s.member
}

// GetLeader returns the leader of the server.
func (s *MockServer) GetLeader() *pdpb.Member {
return s.leader
}

// GetStorage returns the storage of the server.
func (s *MockServer) GetStorage() storage.Storage {
return s.storage
}

// Name returns the name of the server.
func (*MockServer) Name() string {
return "mock-server"
}

// GetRegions returns the regions of the server.
func (s *MockServer) GetRegions() []*core.RegionInfo {
return s.bc.GetRegions()
}

// GetTLSConfig returns the TLS config of the server.
func (*MockServer) GetTLSConfig() *grpcutil.TLSConfig {
return &grpcutil.TLSConfig{}
}

// GetBasicCluster returns the basic cluster of the server.
func (s *MockServer) GetBasicCluster() *core.BasicCluster {
return s.bc
}
20 changes: 18 additions & 2 deletions pkg/syncer/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) {
go func() {
defer logutil.LogPanic()
defer s.wg.Done()
timer := time.NewTimer(retryInterval)
defer timer.Stop()
// used to load region from kv storage to cache storage.
bc := s.server.GetBasicCluster()
regionStorage := s.server.GetStorage()
Expand Down Expand Up @@ -140,11 +142,18 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) {
}
}
log.Error("server failed to establish sync stream with leader", zap.String("server", s.server.Name()), zap.String("leader", s.server.GetLeader().GetName()), errs.ZapError(err))
if !timer.Stop() {
select {
case <-timer.C: // try to drain from the channel
default:
}
}
timer.Reset(retryInterval)
select {
case <-ctx.Done():
log.Info("stop synchronizing with leader due to context canceled")
return
case <-time.After(retryInterval):
case <-timer.C:
}
continue
}
Expand All @@ -157,11 +166,18 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) {
if err = stream.CloseSend(); err != nil {
log.Error("failed to terminate client stream", errs.ZapError(errs.ErrGRPCCloseSend, err))
}
if !timer.Stop() {
select {
case <-timer.C: // try to drain from the channel
default:
}
}
timer.Reset(retryInterval)
select {
case <-ctx.Done():
log.Info("stop synchronizing with leader due to context canceled")
return
case <-time.After(retryInterval):
case <-timer.C:
}
break
}
Expand Down
69 changes: 15 additions & 54 deletions pkg/syncer/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ import (

"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/stretchr/testify/require"
"github.com/tikv/pd/pkg/core"
"github.com/tikv/pd/pkg/mock/mockserver"
"github.com/tikv/pd/pkg/storage"
"github.com/tikv/pd/pkg/utils/grpcutil"
"google.golang.org/grpc/codes"
Expand All @@ -37,11 +37,13 @@ func TestLoadRegion(t *testing.T) {
rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil)
re.NoError(err)

server := &mockServer{
ctx: context.Background(),
storage: storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs),
bc: core.NewBasicCluster(),
}
server := mockserver.NewMockServer(
context.Background(),
nil,
nil,
storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs),
core.NewBasicCluster(),
)
for i := 0; i < 30; i++ {
rs.SaveRegion(&metapb.Region{Id: uint64(i) + 1})
}
Expand All @@ -64,11 +66,13 @@ func TestErrorCode(t *testing.T) {
tempDir := t.TempDir()
rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil)
re.NoError(err)
server := &mockServer{
ctx: context.Background(),
storage: storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs),
bc: core.NewBasicCluster(),
}
server := mockserver.NewMockServer(
context.Background(),
nil,
nil,
storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs),
core.NewBasicCluster(),
)
ctx, cancel := context.WithCancel(context.TODO())
rc := NewRegionSyncer(server)
conn, err := grpcutil.GetClientConn(ctx, "http://127.0.0.1", nil)
Expand All @@ -79,46 +83,3 @@ func TestErrorCode(t *testing.T) {
re.True(ok)
re.Equal(codes.Canceled, ev.Code())
}

type mockServer struct {
ctx context.Context
member, leader *pdpb.Member
storage storage.Storage
bc *core.BasicCluster
}

func (s *mockServer) LoopContext() context.Context {
return s.ctx
}

func (s *mockServer) ClusterID() uint64 {
return 1
}

func (s *mockServer) GetMemberInfo() *pdpb.Member {
return s.member
}

func (s *mockServer) GetLeader() *pdpb.Member {
return s.leader
}

func (s *mockServer) GetStorage() storage.Storage {
return s.storage
}

func (s *mockServer) Name() string {
return "mock-server"
}

func (s *mockServer) GetRegions() []*core.RegionInfo {
return s.bc.GetRegions()
}

func (s *mockServer) GetTLSConfig() *grpcutil.TLSConfig {
return &grpcutil.TLSConfig{}
}

func (s *mockServer) GetBasicCluster() *core.BasicCluster {
return s.bc
}
44 changes: 44 additions & 0 deletions tests/server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,15 @@ import (
"github.com/tikv/pd/pkg/dashboard"
"github.com/tikv/pd/pkg/id"
"github.com/tikv/pd/pkg/mock/mockid"
"github.com/tikv/pd/pkg/mock/mockserver"
sc "github.com/tikv/pd/pkg/schedule/config"
"github.com/tikv/pd/pkg/schedule/operator"
"github.com/tikv/pd/pkg/schedule/schedulers"
"github.com/tikv/pd/pkg/statistics"
"github.com/tikv/pd/pkg/storage"
"github.com/tikv/pd/pkg/syncer"
"github.com/tikv/pd/pkg/tso"
"github.com/tikv/pd/pkg/utils/tempurl"
"github.com/tikv/pd/pkg/utils/testutil"
"github.com/tikv/pd/pkg/utils/tsoutil"
"github.com/tikv/pd/pkg/utils/typeutil"
Expand Down Expand Up @@ -1812,3 +1814,45 @@ func TestExternalTimestamp(t *testing.T) {
re.Equal(ts, resp4.GetTimestamp())
}
}

func TestFollowerExitSyncTime(t *testing.T) {
re := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tc, err := tests.NewTestCluster(ctx, 1)
defer tc.Destroy()
re.NoError(err)
err = tc.RunInitialServers()
re.NoError(err)
tc.WaitLeader()
leaderServer := tc.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

tempDir := t.TempDir()
rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil)
re.NoError(err)

server := mockserver.NewMockServer(
context.Background(),
&pdpb.Member{MemberId: 1, Name: "test", ClientUrls: []string{tempurl.Alloc()}},
nil,
storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs),
core.NewBasicCluster(),
)
s := syncer.NewRegionSyncer(server)
s.StartSyncWithLeader(leaderServer.GetAddr())
time.Sleep(time.Second)

// Record the time when exiting sync
startTime := time.Now()

// Simulate leader change scenario
// Directly call StopSyncWithLeader to simulate exit
s.StopSyncWithLeader()

// Calculate time difference
elapsedTime := time.Since(startTime)

// Assert that the sync exit time is within expected range
re.Less(elapsedTime, time.Second)
}

0 comments on commit 81a0619

Please sign in to comment.