Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Search/Query may failed during updating delegator cache #37174

Merged
merged 3 commits into from
Oct 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 18 additions & 2 deletions internal/proxy/lb_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,30 @@ func (lb *LBPolicyImpl) Start(ctx context.Context) {
}
}

func (lb *LBPolicyImpl) GetShardLeaders(ctx context.Context, dbName string, collName string, collectionID int64, withCache bool) (map[string][]nodeInfo, error) {
var shardLeaders map[string][]nodeInfo
// use retry to handle query coord service not ready
err := retry.Handle(ctx, func() (bool, error) {
var err error
shardLeaders, err = globalMetaCache.GetShards(ctx, withCache, dbName, collName, collectionID)
if err != nil {
return true, err
}

return false, nil
})

return shardLeaders, err
}

// try to select the best node from the available nodes
func (lb *LBPolicyImpl) selectNode(ctx context.Context, balancer LBBalancer, workload ChannelWorkload, excludeNodes typeutil.UniqueSet) (int64, error) {
availableNodes := lo.FilterMap(workload.shardLeaders, func(node int64, _ int) (int64, bool) { return node, !excludeNodes.Contain(node) })
targetNode, err := balancer.SelectNode(ctx, availableNodes, workload.nq)
if err != nil {
log := log.Ctx(ctx)
globalMetaCache.DeprecateShardCache(workload.db, workload.collectionName)
shardLeaders, err := globalMetaCache.GetShards(ctx, false, workload.db, workload.collectionName, workload.collectionID)
shardLeaders, err := lb.GetShardLeaders(ctx, workload.db, workload.collectionName, workload.collectionID, false)
if err != nil {
log.Warn("failed to get shard delegator",
zap.Int64("collectionID", workload.collectionID),
Expand Down Expand Up @@ -195,7 +211,7 @@ func (lb *LBPolicyImpl) ExecuteWithRetry(ctx context.Context, workload ChannelWo

// Execute will execute collection workload in parallel
func (lb *LBPolicyImpl) Execute(ctx context.Context, workload CollectionWorkLoad) error {
dml2leaders, err := globalMetaCache.GetShards(ctx, true, workload.db, workload.collectionName, workload.collectionID)
dml2leaders, err := lb.GetShardLeaders(ctx, workload.db, workload.collectionName, workload.collectionID, true)
if err != nil {
log.Ctx(ctx).Warn("failed to get shards", zap.Error(err))
return err
Expand Down
6 changes: 3 additions & 3 deletions internal/proxy/meta_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -1001,9 +1001,7 @@ func (m *MetaCache) GetShards(ctx context.Context, withCache bool, database, col
if _, ok := m.collLeader[database]; !ok {
m.collLeader[database] = make(map[string]*shardLeaders)
}

m.collLeader[database][collectionName] = newShardLeaders
m.leaderMut.Unlock()

iterator := newShardLeaders.GetReader()
ret := iterator.Shuffle()
Expand All @@ -1013,8 +1011,10 @@ func (m *MetaCache) GetShards(ctx context.Context, withCache bool, database, col
oldLeaders = cacheShardLeaders.shardLeaders
}
// update refcnt in shardClientMgr
// and create new client for new leaders
// update shard leader's just create a empty client pool
// and init new client will be execute in getClient
_ = m.shardMgr.UpdateShardLeaders(oldLeaders, ret)
m.leaderMut.Unlock()

metrics.ProxyUpdateCacheLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
return ret, nil
Expand Down
66 changes: 38 additions & 28 deletions internal/proxy/shard_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,26 +31,37 @@
type shardClient struct {
sync.RWMutex
info nodeInfo
client types.QueryNodeClient
isClosed bool
refCnt int
clients []types.QueryNodeClient
idx atomic.Int64
poolSize int
pooling bool

initialized atomic.Bool
creator queryNodeCreatorFunc
}

func (n *shardClient) getClient(ctx context.Context) (types.QueryNodeClient, error) {
if !n.initialized.Load() {
n.Lock()
if !n.initialized.Load() {
if err := n.initClients(); err != nil {
return nil, err
}

Check warning on line 51 in internal/proxy/shard_client.go

View check run for this annotation

Codecov / codecov/patch

internal/proxy/shard_client.go#L50-L51

Added lines #L50 - L51 were not covered by tests
n.initialized.Store(true)
}
n.Unlock()
}

n.RLock()
defer n.RUnlock()
if n.isClosed {
return nil, errClosed
}
if n.pooling {
idx := n.idx.Inc()
return n.clients[int(idx)%n.poolSize], nil
}
return n.client, nil

idx := n.idx.Inc()
return n.clients[int(idx)%n.poolSize], nil
}

func (n *shardClient) inc() {
Expand All @@ -65,12 +76,13 @@
func (n *shardClient) close() {
n.isClosed = true
n.refCnt = 0
if n.client != nil {
if err := n.client.Close(); err != nil {

for _, client := range n.clients {
if err := client.Close(); err != nil {
log.Warn("close grpc client failed", zap.Error(err))
}
n.client = nil
}
n.clients = nil
}

func (n *shardClient) dec() bool {
Expand All @@ -94,41 +106,39 @@
n.close()
}

func newShardClient(info *nodeInfo, client types.QueryNodeClient) *shardClient {
ret := &shardClient{
func newPoolingShardClient(info *nodeInfo, creator queryNodeCreatorFunc) (*shardClient, error) {
return &shardClient{
info: nodeInfo{
nodeID: info.nodeID,
address: info.address,
},
client: client,
refCnt: 1,
}
return ret
refCnt: 1,
pooling: true,
creator: creator,
}, nil
}

func newPoolingShardClient(info *nodeInfo, creator queryNodeCreatorFunc) (*shardClient, error) {
func (n *shardClient) initClients() error {
num := paramtable.Get().ProxyCfg.QueryNodePoolingSize.GetAsInt()
if num <= 0 {
num = 1
}
clients := make([]types.QueryNodeClient, 0, num)
for i := 0; i < num; i++ {
client, err := creator(context.Background(), info.address, info.nodeID)
client, err := n.creator(context.Background(), n.info.address, n.info.nodeID)
if err != nil {
return nil, err
// roll back already created clients
for _, c := range clients[:i] {
c.Close()
}
return errors.Wrap(err, fmt.Sprintf("create client for node=%d failed", n.info.nodeID))

Check warning on line 134 in internal/proxy/shard_client.go

View check run for this annotation

Codecov / codecov/patch

internal/proxy/shard_client.go#L130-L134

Added lines #L130 - L134 were not covered by tests
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will the already created client be cleaned up before returning an error?

}
clients = append(clients, client)
}
return &shardClient{
info: nodeInfo{
nodeID: info.nodeID,
address: info.address,
},
refCnt: 1,
pooling: true,
clients: clients,
poolSize: num,
}, nil

n.clients = clients
n.poolSize = num
return nil
}

type shardClientMgr interface {
Expand Down
Loading