enhance: Reduce memory usage of BF in DataNode and QueryNode (#38129)

1. DataNode: Skip generating BF during the insert phase (BF will be
regenerated during the sync phase).
2. QueryNode: Skip generating or maintaining BF for growing segments;
deletion checks will be handled in the segcore.

issue: https://github.com/milvus-io/milvus/issues/37630

---------

Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
pull/39177/merge
yihao.dai 2025-01-15 01:59:01 +08:00 committed by GitHub
parent 272d95ad79
commit ec2e77b5d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 41 additions and 16 deletions

View File

@ -165,23 +165,24 @@ func (wb *l0WriteBuffer) BufferData(insertData []*InsertData, deleteMsgs []*msgs
if paramtable.Get().DataNodeCfg.SkipBFStatsLoad.GetAsBool() || streamingutil.IsStreamingServiceEnabled() {
// In streaming service mode, flushed segments no longer maintain a bloom filter.
// So, here we skip filtering delete entries by bf.
// So, here we skip generating BF (growing segment's BF will be regenerated during the sync phase)
// and also skip filtering delete entries by bf.
wb.dispatchDeleteMsgsWithoutFilter(deleteMsgs, startPos, endPos)
} else {
// distribute delete msg
// bf write buffer check bloom filter of segment and current insert batch to decide which segment to write delete data
wb.dispatchDeleteMsgs(insertData, deleteMsgs, startPos, endPos)
}
// update pk oracle
for _, inData := range insertData {
// segment shall always exists after buffer insert
segments := wb.metaCache.GetSegmentsBy(metacache.WithSegmentIDs(inData.segmentID))
for _, segment := range segments {
for _, fieldData := range inData.pkField {
err := segment.GetBloomFilterSet().UpdatePKRange(fieldData)
if err != nil {
return err
// update pk oracle
for _, inData := range insertData {
// segment shall always exists after buffer insert
segments := wb.metaCache.GetSegmentsBy(metacache.WithSegmentIDs(inData.segmentID))
for _, segment := range segments {
for _, fieldData := range inData.pkField {
err := segment.GetBloomFilterSet().UpdatePKRange(fieldData)
if err != nil {
return err
}
}
}
}

View File

@ -15,13 +15,11 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/allocator"
"github.com/milvus-io/milvus/internal/flushcommon/metacache"
"github.com/milvus-io/milvus/internal/flushcommon/metacache/pkoracle"
"github.com/milvus-io/milvus/internal/flushcommon/syncmgr"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/proto/datapb"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/testutils"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
@ -185,8 +183,6 @@ func (s *L0WriteBufferSuite) TestBufferData() {
pks, msg := s.composeInsertMsg(1000, 10, 128, schemapb.DataType_Int64)
delMsg := s.composeDeleteMsg(lo.Map(pks, func(id int64, _ int) storage.PrimaryKey { return storage.NewInt64PrimaryKey(id) }))
seg := metacache.NewSegmentInfo(&datapb.SegmentInfo{ID: 1000}, pkoracle.NewBloomFilterSet(), nil)
s.metacache.EXPECT().GetSegmentsBy(mock.Anything, mock.Anything).Return([]*metacache.SegmentInfo{seg})
s.metacache.EXPECT().GetSegmentByID(int64(1000)).Return(nil, false).Once()
s.metacache.EXPECT().AddSegment(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return()
s.metacache.EXPECT().UpdateSegments(mock.Anything, mock.Anything).Return()

View File

@ -91,6 +91,7 @@ type baseSegment struct {
bloomFilterSet *pkoracle.BloomFilterSet
loadInfo *atomic.Pointer[querypb.SegmentLoadInfo]
isLazyLoad bool
skipGrowingBF bool // Skip generating or maintaining BF for growing segments; deletion checks will be handled in segcore.
channel metautil.Channel
bm25Stats map[int64]*storage.BM25Stats
@ -114,6 +115,7 @@ func newBaseSegment(collection *Collection, segmentType SegmentType, version int
bm25Stats: make(map[int64]*storage.BM25Stats),
channel: channel,
isLazyLoad: isLazyLoad(collection, segmentType),
skipGrowingBF: segmentType == SegmentTypeGrowing && paramtable.Get().QueryNodeCfg.SkipGrowingSegmentBF.GetAsBool(),
resourceUsageCache: atomic.NewPointer[ResourceUsage](nil),
needUpdatedVersion: atomic.NewInt64(0),
@ -187,6 +189,9 @@ func (s *baseSegment) LoadInfo() *querypb.SegmentLoadInfo {
}
func (s *baseSegment) UpdateBloomFilter(pks []storage.PrimaryKey) {
if s.skipGrowingBF {
return
}
s.bloomFilterSet.UpdateBloomFilter(pks)
}
@ -208,10 +213,20 @@ func (s *baseSegment) GetBM25Stats() map[int64]*storage.BM25Stats {
// false otherwise,
// may returns true even the PK doesn't exist actually
func (s *baseSegment) MayPkExist(pk *storage.LocationsCache) bool {
if s.skipGrowingBF {
return true
}
return s.bloomFilterSet.MayPkExist(pk)
}
func (s *baseSegment) BatchPkExist(lc *storage.BatchLocationsCache) []bool {
if s.skipGrowingBF {
allPositive := make([]bool, lc.Size())
for i := 0; i < lc.Size(); i++ {
allPositive[i] = true
}
return allPositive
}
return s.bloomFilterSet.BatchPkExist(lc)
}

View File

@ -2553,7 +2553,10 @@ type queryNodeConfig struct {
UseStreamComputing ParamItem `refreshable:"false"`
QueryStreamBatchSize ParamItem `refreshable:"false"`
QueryStreamMaxBatchSize ParamItem `refreshable:"false"`
BloomFilterApplyParallelFactor ParamItem `refreshable:"true"`
// BF
SkipGrowingSegmentBF ParamItem `refreshable:"true"`
BloomFilterApplyParallelFactor ParamItem `refreshable:"true"`
// worker
WorkerPoolingSize ParamItem `refreshable:"false"`
@ -3276,6 +3279,14 @@ user-task-polling:
}
p.BloomFilterApplyParallelFactor.Init(base.mgr)
p.SkipGrowingSegmentBF = ParamItem{
Key: "queryNode.skipGrowingSegmentBF",
Version: "2.5",
DefaultValue: "true",
Doc: "indicates whether skipping the creation, maintenance, or checking of Bloom Filters for growing segments",
}
p.SkipGrowingSegmentBF.Init(base.mgr)
p.WorkerPoolingSize = ParamItem{
Key: "queryNode.workerPooling.size",
Version: "2.4.7",

View File

@ -480,6 +480,8 @@ func TestComponentParam(t *testing.T) {
assert.Equal(t, 3*time.Second, Params.LazyLoadRequestResourceRetryInterval.GetAsDuration(time.Millisecond))
assert.Equal(t, 4, Params.BloomFilterApplyParallelFactor.GetAsInt())
assert.Equal(t, true, Params.SkipGrowingSegmentBF.GetAsBool())
assert.Equal(t, "/var/lib/milvus/data/mmap", Params.MmapDirPath.GetValue())
assert.Equal(t, true, Params.MmapChunkCache.GetAsBool())