Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enhance: [2.4] Reduce memory usage of BF in DataNode and QueryNode #38215

Open
wants to merge 3 commits into
base: 2.4
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 12 additions & 11 deletions internal/datanode/writebuffer/l0_write_buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,23 +165,24 @@

if paramtable.Get().DataNodeCfg.SkipBFStatsLoad.GetAsBool() {
// In Skip BF mode, datanode no longer maintains bloom filters.
// So, here we skip filtering delete entries.
// So, here we skip generating BF (growing segment's BF will be regenerated during the sync phase)
// and also skip filtering delete entries by bf.
wb.dispatchDeleteMsgsWithoutFilter(deleteMsgs, startPos, endPos)
} else {
// distribute delete msg
// bf write buffer check bloom filter of segment and current insert batch to decide which segment to write delete data
wb.dispatchDeleteMsgs(groups, deleteMsgs, startPos, endPos)
}

// update pk oracle
for _, inData := range groups {
// segment shall always exists after buffer insert
segments := wb.metaCache.GetSegmentsBy(metacache.WithSegmentIDs(inData.segmentID))
for _, segment := range segments {
for _, fieldData := range inData.pkField {
err := segment.GetBloomFilterSet().UpdatePKRange(fieldData)
if err != nil {
return err
// update pk oracle
for _, inData := range groups {
// segment shall always exists after buffer insert
segments := wb.metaCache.GetSegmentsBy(metacache.WithSegmentIDs(inData.segmentID))
for _, segment := range segments {
for _, fieldData := range inData.pkField {
err := segment.GetBloomFilterSet().UpdatePKRange(fieldData)
if err != nil {
return err
}

Check warning on line 185 in internal/datanode/writebuffer/l0_write_buffer.go

View check run for this annotation

Codecov / codecov/patch

internal/datanode/writebuffer/l0_write_buffer.go#L176-L185

Added lines #L176 - L185 were not covered by tests
}
}
}
Expand Down
5 changes: 0 additions & 5 deletions internal/datanode/writebuffer/l0_write_buffer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"github.com/milvus-io/milvus/internal/allocator"
"github.com/milvus-io/milvus/internal/datanode/metacache"
"github.com/milvus-io/milvus/internal/datanode/syncmgr"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/metrics"
Expand Down Expand Up @@ -181,8 +180,6 @@ func (s *L0WriteBufferSuite) TestBufferData() {
pks, msg := s.composeInsertMsg(1000, 10, 128, schemapb.DataType_Int64)
delMsg := s.composeDeleteMsg(lo.Map(pks, func(id int64, _ int) storage.PrimaryKey { return storage.NewInt64PrimaryKey(id) }))

seg := metacache.NewSegmentInfo(&datapb.SegmentInfo{ID: 1000}, metacache.NewBloomFilterSet())
s.metacache.EXPECT().GetSegmentsBy(mock.Anything, mock.Anything).Return([]*metacache.SegmentInfo{seg})
s.metacache.EXPECT().GetSegmentByID(int64(1000)).Return(nil, false).Once()
s.metacache.EXPECT().AddSegment(mock.Anything, mock.Anything, mock.Anything).Return()
s.metacache.EXPECT().UpdateSegments(mock.Anything, mock.Anything).Return()
Expand Down Expand Up @@ -210,8 +207,6 @@ func (s *L0WriteBufferSuite) TestBufferData() {
pks, msg := s.composeInsertMsg(1000, 10, 128, schemapb.DataType_VarChar)
delMsg := s.composeDeleteMsg(lo.Map(pks, func(id int64, _ int) storage.PrimaryKey { return storage.NewInt64PrimaryKey(id) }))

seg := metacache.NewSegmentInfo(&datapb.SegmentInfo{ID: 1000}, metacache.NewBloomFilterSet())
s.metacache.EXPECT().GetSegmentsBy(mock.Anything, mock.Anything).Return([]*metacache.SegmentInfo{seg})
s.metacache.EXPECT().AddSegment(mock.Anything, mock.Anything, mock.Anything).Return()
s.metacache.EXPECT().UpdateSegments(mock.Anything, mock.Anything).Return()

Expand Down
15 changes: 15 additions & 0 deletions internal/querynodev2/segments/segment.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ type baseSegment struct {
bloomFilterSet *pkoracle.BloomFilterSet
loadInfo *atomic.Pointer[querypb.SegmentLoadInfo]
isLazyLoad bool
skipGrowingBF bool // Skip generating or maintaining BF for growing segments; deletion checks will be handled in segcore.
channel metautil.Channel

resourceUsageCache *atomic.Pointer[ResourceUsage]
Expand All @@ -114,6 +115,7 @@ func newBaseSegment(collection *Collection, segmentType SegmentType, version int
bloomFilterSet: pkoracle.NewBloomFilterSet(loadInfo.GetSegmentID(), loadInfo.GetPartitionID(), segmentType),
channel: channel,
isLazyLoad: isLazyLoad(collection, segmentType),
skipGrowingBF: segmentType == SegmentTypeGrowing && paramtable.Get().QueryNodeCfg.SkipGrowingSegmentBF.GetAsBool(),

resourceUsageCache: atomic.NewPointer[ResourceUsage](nil),
needUpdatedVersion: atomic.NewInt64(0),
Expand Down Expand Up @@ -183,17 +185,30 @@ func (s *baseSegment) LoadInfo() *querypb.SegmentLoadInfo {
}

func (s *baseSegment) UpdateBloomFilter(pks []storage.PrimaryKey) {
if s.skipGrowingBF {
return
}
s.bloomFilterSet.UpdateBloomFilter(pks)
}

// MayPkExist returns true if the given PK exists in the PK range and being positive through the bloom filter,
// false otherwise,
// may returns true even the PK doesn't exist actually
func (s *baseSegment) MayPkExist(pk *storage.LocationsCache) bool {
if s.skipGrowingBF {
return true
}
return s.bloomFilterSet.MayPkExist(pk)
}

func (s *baseSegment) BatchPkExist(lc *storage.BatchLocationsCache) []bool {
if s.skipGrowingBF {
allPositive := make([]bool, lc.Size())
for i := 0; i < lc.Size(); i++ {
allPositive[i] = true
}
return allPositive
}
return s.bloomFilterSet.BatchPkExist(lc)
}

Expand Down
10 changes: 10 additions & 0 deletions pkg/util/paramtable/component_param.go
Original file line number Diff line number Diff line change
Expand Up @@ -2448,6 +2448,8 @@ type queryNodeConfig struct {
DefaultSegmentFilterRatio ParamItem `refreshable:"false"`
UseStreamComputing ParamItem `refreshable:"false"`

// BF
SkipGrowingSegmentBF ParamItem `refreshable:"true"`
BloomFilterApplyParallelFactor ParamItem `refreshable:"true"`

QueryStreamBatchSize ParamItem `refreshable:"false"`
Expand Down Expand Up @@ -3144,6 +3146,14 @@ user-task-polling:
}
p.QueryStreamBatchSize.Init(base.mgr)

p.SkipGrowingSegmentBF = ParamItem{
Key: "queryNode.skipGrowingSegmentBF",
Version: "2.5",
DefaultValue: "true",
Doc: "indicates whether skipping the creation, maintenance, or checking of Bloom Filters for growing segments",
}
p.SkipGrowingSegmentBF.Init(base.mgr)

p.WorkerPoolingSize = ParamItem{
Key: "queryNode.workerPooling.size",
Version: "2.4.7",
Expand Down
2 changes: 2 additions & 0 deletions pkg/util/paramtable/component_param_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,8 @@ func TestComponentParam(t *testing.T) {
assert.Equal(t, 3*time.Second, Params.LazyLoadRequestResourceRetryInterval.GetAsDuration(time.Millisecond))

assert.Equal(t, 4, Params.BloomFilterApplyParallelFactor.GetAsInt())
assert.Equal(t, true, Params.SkipGrowingSegmentBF.GetAsBool())

assert.Equal(t, "/var/lib/milvus/data/mmap", Params.MmapDirPath.GetValue())

assert.Equal(t, true, Params.MmapChunkCache.GetAsBool())
Expand Down
Loading