Change MetaCache interface (#28388)

See also: #28320, #27675

Signed-off-by: yangxuan <xuan.yang@zilliz.com>
Co-authored-by: Congqi Xia <congqi.xia@zilliz.com>
pull/28136/head
XuanYang-cn 2023-11-14 15:08:19 +08:00 committed by GitHub
parent e9ff7ed13d
commit 8037f35de7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 462 additions and 26 deletions

View File

@ -24,16 +24,32 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/pkg/log"
)
type MetaCache interface {
// Collection returns collection id of metacache.
Collection() int64
// Schema returns collection schema.
Schema() *schemapb.CollectionSchema
// NewSegment creates a new segment from WAL stream data.
NewSegment(segmentID, partitionID int64, startPos *msgpb.MsgPosition, actions ...SegmentAction)
// AddSegment adds a segment from segment info.
AddSegment(segInfo *datapb.SegmentInfo, factory PkStatsFactory, actions ...SegmentAction)
// UpdateSegments applies action to segment(s) satisfy the provided filters.
UpdateSegments(action SegmentAction, filters ...SegmentFilter)
CompactSegments(newSegmentID, partitionID int64, oldSegmentIDs ...int64)
// CompactSegments transfers compaction segment results inside the metacache.
CompactSegments(newSegmentID, partitionID int64, numRows int64, bfs *BloomFilterSet, oldSegmentIDs ...int64)
// GetSegmentsBy returns segments statify the provided filters.
GetSegmentsBy(filters ...SegmentFilter) []*SegmentInfo
// GetSegmentByID returns segment with provided segment id if exists.
GetSegmentByID(id int64, filters ...SegmentFilter) (*SegmentInfo, bool)
// GetSegmentIDs returns ids of segments which satifiy the provided filters.
GetSegmentIDsBy(filters ...SegmentFilter) []int64
PredictSegments(pk storage.PrimaryKey, filters ...SegmentFilter) ([]int64, bool)
}
var _ MetaCache = (*metaCacheImpl)(nil)
@ -44,14 +60,17 @@ type metaCacheImpl struct {
collectionID int64
vChannelName string
segmentInfos map[int64]*SegmentInfo
schema *schemapb.CollectionSchema
mu sync.RWMutex
}
func NewMetaCache(vchannel *datapb.VchannelInfo, factory PkStatsFactory) MetaCache {
func NewMetaCache(info *datapb.ChannelWatchInfo, factory PkStatsFactory) MetaCache {
vchannel := info.GetVchan()
cache := &metaCacheImpl{
collectionID: vchannel.GetCollectionID(),
vChannelName: vchannel.GetChannelName(),
segmentInfos: make(map[int64]*SegmentInfo),
schema: info.GetSchema(),
}
cache.init(vchannel, factory)
@ -68,6 +87,17 @@ func (c *metaCacheImpl) init(vchannel *datapb.VchannelInfo, factory PkStatsFacto
}
}
// Collection returns collection id of metacache.
func (c *metaCacheImpl) Collection() int64 {
return c.collectionID
}
// Schema returns collection schema.
func (c *metaCacheImpl) Schema() *schemapb.CollectionSchema {
return c.schema
}
// NewSegment creates a new segment from WAL stream data.
func (c *metaCacheImpl) NewSegment(segmentID, partitionID int64, startPos *msgpb.MsgPosition, actions ...SegmentAction) {
c.mu.Lock()
defer c.mu.Unlock()
@ -86,7 +116,20 @@ func (c *metaCacheImpl) NewSegment(segmentID, partitionID int64, startPos *msgpb
}
}
func (c *metaCacheImpl) CompactSegments(newSegmentID, partitionID int64, dropSegmentIDs ...int64) {
// AddSegment adds a segment from segment info.
func (c *metaCacheImpl) AddSegment(segInfo *datapb.SegmentInfo, factory PkStatsFactory, actions ...SegmentAction) {
segment := NewSegmentInfo(segInfo, factory(segInfo))
for _, action := range actions {
action(segment)
}
c.mu.Lock()
defer c.mu.Unlock()
c.segmentInfos[segInfo.GetID()] = segment
}
func (c *metaCacheImpl) CompactSegments(newSegmentID, partitionID int64, numOfRows int64, bfs *BloomFilterSet, dropSegmentIDs ...int64) {
c.mu.Lock()
defer c.mu.Unlock()
@ -101,12 +144,14 @@ func (c *metaCacheImpl) CompactSegments(newSegmentID, partitionID int64, dropSeg
}
}
if _, ok := c.segmentInfos[newSegmentID]; !ok {
c.segmentInfos[newSegmentID] = &SegmentInfo{
segmentID: newSegmentID,
partitionID: partitionID,
state: commonpb.SegmentState_Flushed,
startPosRecorded: true,
if numOfRows > 0 {
if _, ok := c.segmentInfos[newSegmentID]; !ok {
c.segmentInfos[newSegmentID] = &SegmentInfo{
segmentID: newSegmentID,
partitionID: partitionID,
state: commonpb.SegmentState_Flushed,
startPosRecorded: true,
}
}
}
}
@ -126,6 +171,21 @@ func (c *metaCacheImpl) GetSegmentsBy(filters ...SegmentFilter) []*SegmentInfo {
return segments
}
// GetSegmentByID returns segment with provided segment id if exists.
func (c *metaCacheImpl) GetSegmentByID(id int64, filters ...SegmentFilter) (*SegmentInfo, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
segment, ok := c.segmentInfos[id]
if !ok {
return nil, false
}
if !c.mergeFilters(filters...)(segment) {
return nil, false
}
return segment, ok
}
func (c *metaCacheImpl) GetSegmentIDsBy(filters ...SegmentFilter) []int64 {
segments := c.GetSegmentsBy(filters...)
return lo.Map(segments, func(info *SegmentInfo, _ int) int64 { return info.SegmentID() })
@ -147,6 +207,17 @@ func (c *metaCacheImpl) UpdateSegments(action SegmentAction, filters ...SegmentF
}
}
func (c *metaCacheImpl) PredictSegments(pk storage.PrimaryKey, filters ...SegmentFilter) ([]int64, bool) {
var predicts []int64
segments := c.GetSegmentsBy(filters...)
for _, segment := range segments {
if segment.GetBloomFilterSet().PkExists(pk) {
predicts = append(predicts, segment.segmentID)
}
}
return predicts, len(predicts) > 0
}
func (c *metaCacheImpl) mergeFilters(filters ...SegmentFilter) SegmentFilter {
return func(info *SegmentInfo) bool {
for _, filter := range filters {

View File

@ -23,13 +23,17 @@ import (
"github.com/stretchr/testify/suite"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/pkg/common"
)
type MetaCacheSuite struct {
suite.Suite
collectionID int64
collSchema *schemapb.CollectionSchema
vchannel string
invaliedSeg int64
partitionIDs []int64
@ -52,6 +56,15 @@ func (s *MetaCacheSuite) SetupSuite() {
s.bfsFactory = func(*datapb.SegmentInfo) *BloomFilterSet {
return NewBloomFilterSet()
}
s.collSchema = &schemapb.CollectionSchema{
Name: "test_collection",
Fields: []*schemapb.FieldSchema{
{FieldID: 100, DataType: schemapb.DataType_Int64, IsPrimaryKey: true, Name: "pk"},
{FieldID: 101, DataType: schemapb.DataType_FloatVector, TypeParams: []*commonpb.KeyValuePair{
{Key: common.DimKey, Value: "128"},
}},
},
}
}
func (s *MetaCacheSuite) SetupTest() {
@ -71,14 +84,22 @@ func (s *MetaCacheSuite) SetupTest() {
}
})
s.cache = NewMetaCache(&datapb.VchannelInfo{
CollectionID: s.collectionID,
ChannelName: s.vchannel,
FlushedSegments: flushSegmentInfos,
UnflushedSegments: growingSegmentInfos,
s.cache = NewMetaCache(&datapb.ChannelWatchInfo{
Schema: s.collSchema,
Vchan: &datapb.VchannelInfo{
CollectionID: s.collectionID,
ChannelName: s.vchannel,
FlushedSegments: flushSegmentInfos,
UnflushedSegments: growingSegmentInfos,
},
}, s.bfsFactory)
}
func (s *MetaCacheSuite) TestMetaInfo() {
s.Equal(s.collectionID, s.cache.Collection())
s.Equal(s.collSchema, s.cache.Schema())
}
func (s *MetaCacheSuite) TestNewSegment() {
for i, seg := range s.newSegments {
s.cache.NewSegment(seg, s.partitionIDs[i], nil, UpdateNumOfRows(100))
@ -97,7 +118,7 @@ func (s *MetaCacheSuite) TestNewSegment() {
func (s *MetaCacheSuite) TestCompactSegments() {
for i, seg := range s.newSegments {
// compaction from flushed[i], unflushed[i] and invalidSeg to new[i]
s.cache.CompactSegments(seg, s.partitionIDs[i], s.flushedSegments[i], s.growingSegments[i], s.invaliedSeg)
s.cache.CompactSegments(seg, s.partitionIDs[i], 100, NewBloomFilterSet(), s.flushedSegments[i], s.growingSegments[i], s.invaliedSeg)
}
for i, partitionID := range s.partitionIDs {
@ -109,6 +130,39 @@ func (s *MetaCacheSuite) TestCompactSegments() {
}
}
func (s *MetaCacheSuite) TestAddSegment() {
testSegs := []int64{100, 101, 102}
for _, segID := range testSegs {
info := &datapb.SegmentInfo{
ID: segID,
PartitionID: 10,
}
s.cache.AddSegment(info, func(info *datapb.SegmentInfo) *BloomFilterSet {
return NewBloomFilterSet()
}, UpdateState(commonpb.SegmentState_Flushed))
}
segments := s.cache.GetSegmentsBy(WithSegmentIDs(testSegs...))
s.Require().Equal(3, len(segments))
for _, seg := range segments {
s.Equal(commonpb.SegmentState_Flushed, seg.State())
s.EqualValues(10, seg.partitionID)
seg, ok := s.cache.GetSegmentByID(seg.segmentID, WithSegmentState(commonpb.SegmentState_Flushed))
s.NotNil(seg)
s.True(ok)
seg, ok = s.cache.GetSegmentByID(seg.segmentID, WithSegmentState(commonpb.SegmentState_Growing))
s.Nil(seg)
s.False(ok)
}
gotSegIDs := lo.Map(segments, func(info *SegmentInfo, _ int) int64 {
return info.segmentID
})
s.ElementsMatch(testSegs, gotSegIDs)
}
func (s *MetaCacheSuite) TestUpdateSegments() {
s.cache.UpdateSegments(UpdateState(commonpb.SegmentState_Flushed), WithSegmentIDs(5))
segments := s.cache.GetSegmentsBy(WithSegmentIDs(5))
@ -117,6 +171,38 @@ func (s *MetaCacheSuite) TestUpdateSegments() {
s.Equal(commonpb.SegmentState_Flushed, segment.State())
}
func (s *MetaCacheSuite) TestPredictSegments() {
pk := storage.NewInt64PrimaryKey(100)
predict, ok := s.cache.PredictSegments(pk)
s.False(ok)
s.Empty(predict)
pkFieldData := &storage.Int64FieldData{
Data: []int64{1, 2, 3, 4, 5, 6, 7},
}
info, got := s.cache.GetSegmentByID(1)
s.Require().True(got)
s.Require().NotNil(info)
err := info.GetBloomFilterSet().UpdatePKRange(pkFieldData)
s.Require().NoError(err)
predict, ok = s.cache.PredictSegments(pk, func(s *SegmentInfo) bool {
return s.segmentID == 1
})
s.False(ok)
s.Empty(predict)
predict, ok = s.cache.PredictSegments(
storage.NewInt64PrimaryKey(5),
func(s *SegmentInfo) bool {
return s.segmentID == 1
})
s.True(ok)
s.NotEmpty(predict)
s.Equal(1, len(predict))
s.EqualValues(1, predict[0])
}
func TestMetaCacheSuite(t *testing.T) {
suite.Run(t, new(MetaCacheSuite))
}

View File

@ -3,8 +3,14 @@
package metacache
import (
msgpb "github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
datapb "github.com/milvus-io/milvus/internal/proto/datapb"
mock "github.com/stretchr/testify/mock"
msgpb "github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
schemapb "github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
storage "github.com/milvus-io/milvus/internal/storage"
)
// MockMetaCache is an autogenerated mock type for the MetaCache type
@ -20,14 +26,104 @@ func (_m *MockMetaCache) EXPECT() *MockMetaCache_Expecter {
return &MockMetaCache_Expecter{mock: &_m.Mock}
}
// CompactSegments provides a mock function with given fields: newSegmentID, partitionID, oldSegmentIDs
func (_m *MockMetaCache) CompactSegments(newSegmentID int64, partitionID int64, oldSegmentIDs ...int64) {
// AddSegment provides a mock function with given fields: segInfo, factory, actions
func (_m *MockMetaCache) AddSegment(segInfo *datapb.SegmentInfo, factory PkStatsFactory, actions ...SegmentAction) {
_va := make([]interface{}, len(actions))
for _i := range actions {
_va[_i] = actions[_i]
}
var _ca []interface{}
_ca = append(_ca, segInfo, factory)
_ca = append(_ca, _va...)
_m.Called(_ca...)
}
// MockMetaCache_AddSegment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddSegment'
type MockMetaCache_AddSegment_Call struct {
*mock.Call
}
// AddSegment is a helper method to define mock.On call
// - segInfo *datapb.SegmentInfo
// - factory PkStatsFactory
// - actions ...SegmentAction
func (_e *MockMetaCache_Expecter) AddSegment(segInfo interface{}, factory interface{}, actions ...interface{}) *MockMetaCache_AddSegment_Call {
return &MockMetaCache_AddSegment_Call{Call: _e.mock.On("AddSegment",
append([]interface{}{segInfo, factory}, actions...)...)}
}
func (_c *MockMetaCache_AddSegment_Call) Run(run func(segInfo *datapb.SegmentInfo, factory PkStatsFactory, actions ...SegmentAction)) *MockMetaCache_AddSegment_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]SegmentAction, len(args)-2)
for i, a := range args[2:] {
if a != nil {
variadicArgs[i] = a.(SegmentAction)
}
}
run(args[0].(*datapb.SegmentInfo), args[1].(PkStatsFactory), variadicArgs...)
})
return _c
}
func (_c *MockMetaCache_AddSegment_Call) Return() *MockMetaCache_AddSegment_Call {
_c.Call.Return()
return _c
}
func (_c *MockMetaCache_AddSegment_Call) RunAndReturn(run func(*datapb.SegmentInfo, PkStatsFactory, ...SegmentAction)) *MockMetaCache_AddSegment_Call {
_c.Call.Return(run)
return _c
}
// Collection provides a mock function with given fields:
func (_m *MockMetaCache) Collection() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}
// MockMetaCache_Collection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Collection'
type MockMetaCache_Collection_Call struct {
*mock.Call
}
// Collection is a helper method to define mock.On call
func (_e *MockMetaCache_Expecter) Collection() *MockMetaCache_Collection_Call {
return &MockMetaCache_Collection_Call{Call: _e.mock.On("Collection")}
}
func (_c *MockMetaCache_Collection_Call) Run(run func()) *MockMetaCache_Collection_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockMetaCache_Collection_Call) Return(_a0 int64) *MockMetaCache_Collection_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockMetaCache_Collection_Call) RunAndReturn(run func() int64) *MockMetaCache_Collection_Call {
_c.Call.Return(run)
return _c
}
// CompactSegments provides a mock function with given fields: newSegmentID, partitionID, numRows, bfs, oldSegmentIDs
func (_m *MockMetaCache) CompactSegments(newSegmentID int64, partitionID int64, numRows int64, bfs *BloomFilterSet, oldSegmentIDs ...int64) {
_va := make([]interface{}, len(oldSegmentIDs))
for _i := range oldSegmentIDs {
_va[_i] = oldSegmentIDs[_i]
}
var _ca []interface{}
_ca = append(_ca, newSegmentID, partitionID)
_ca = append(_ca, newSegmentID, partitionID, numRows, bfs)
_ca = append(_ca, _va...)
_m.Called(_ca...)
}
@ -40,21 +136,23 @@ type MockMetaCache_CompactSegments_Call struct {
// CompactSegments is a helper method to define mock.On call
// - newSegmentID int64
// - partitionID int64
// - numRows int64
// - bfs *BloomFilterSet
// - oldSegmentIDs ...int64
func (_e *MockMetaCache_Expecter) CompactSegments(newSegmentID interface{}, partitionID interface{}, oldSegmentIDs ...interface{}) *MockMetaCache_CompactSegments_Call {
func (_e *MockMetaCache_Expecter) CompactSegments(newSegmentID interface{}, partitionID interface{}, numRows interface{}, bfs interface{}, oldSegmentIDs ...interface{}) *MockMetaCache_CompactSegments_Call {
return &MockMetaCache_CompactSegments_Call{Call: _e.mock.On("CompactSegments",
append([]interface{}{newSegmentID, partitionID}, oldSegmentIDs...)...)}
append([]interface{}{newSegmentID, partitionID, numRows, bfs}, oldSegmentIDs...)...)}
}
func (_c *MockMetaCache_CompactSegments_Call) Run(run func(newSegmentID int64, partitionID int64, oldSegmentIDs ...int64)) *MockMetaCache_CompactSegments_Call {
func (_c *MockMetaCache_CompactSegments_Call) Run(run func(newSegmentID int64, partitionID int64, numRows int64, bfs *BloomFilterSet, oldSegmentIDs ...int64)) *MockMetaCache_CompactSegments_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]int64, len(args)-2)
for i, a := range args[2:] {
variadicArgs := make([]int64, len(args)-4)
for i, a := range args[4:] {
if a != nil {
variadicArgs[i] = a.(int64)
}
}
run(args[0].(int64), args[1].(int64), variadicArgs...)
run(args[0].(int64), args[1].(int64), args[2].(int64), args[3].(*BloomFilterSet), variadicArgs...)
})
return _c
}
@ -64,7 +162,76 @@ func (_c *MockMetaCache_CompactSegments_Call) Return() *MockMetaCache_CompactSeg
return _c
}
func (_c *MockMetaCache_CompactSegments_Call) RunAndReturn(run func(int64, int64, ...int64)) *MockMetaCache_CompactSegments_Call {
func (_c *MockMetaCache_CompactSegments_Call) RunAndReturn(run func(int64, int64, int64, *BloomFilterSet, ...int64)) *MockMetaCache_CompactSegments_Call {
_c.Call.Return(run)
return _c
}
// GetSegmentByID provides a mock function with given fields: id, filters
func (_m *MockMetaCache) GetSegmentByID(id int64, filters ...SegmentFilter) (*SegmentInfo, bool) {
_va := make([]interface{}, len(filters))
for _i := range filters {
_va[_i] = filters[_i]
}
var _ca []interface{}
_ca = append(_ca, id)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *SegmentInfo
var r1 bool
if rf, ok := ret.Get(0).(func(int64, ...SegmentFilter) (*SegmentInfo, bool)); ok {
return rf(id, filters...)
}
if rf, ok := ret.Get(0).(func(int64, ...SegmentFilter) *SegmentInfo); ok {
r0 = rf(id, filters...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*SegmentInfo)
}
}
if rf, ok := ret.Get(1).(func(int64, ...SegmentFilter) bool); ok {
r1 = rf(id, filters...)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockMetaCache_GetSegmentByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSegmentByID'
type MockMetaCache_GetSegmentByID_Call struct {
*mock.Call
}
// GetSegmentByID is a helper method to define mock.On call
// - id int64
// - filters ...SegmentFilter
func (_e *MockMetaCache_Expecter) GetSegmentByID(id interface{}, filters ...interface{}) *MockMetaCache_GetSegmentByID_Call {
return &MockMetaCache_GetSegmentByID_Call{Call: _e.mock.On("GetSegmentByID",
append([]interface{}{id}, filters...)...)}
}
func (_c *MockMetaCache_GetSegmentByID_Call) Run(run func(id int64, filters ...SegmentFilter)) *MockMetaCache_GetSegmentByID_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]SegmentFilter, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(SegmentFilter)
}
}
run(args[0].(int64), variadicArgs...)
})
return _c
}
func (_c *MockMetaCache_GetSegmentByID_Call) Return(_a0 *SegmentInfo, _a1 bool) *MockMetaCache_GetSegmentByID_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockMetaCache_GetSegmentByID_Call) RunAndReturn(run func(int64, ...SegmentFilter) (*SegmentInfo, bool)) *MockMetaCache_GetSegmentByID_Call {
_c.Call.Return(run)
return _c
}
@ -233,6 +400,118 @@ func (_c *MockMetaCache_NewSegment_Call) RunAndReturn(run func(int64, int64, *ms
return _c
}
// PredictSegments provides a mock function with given fields: pk, filters
func (_m *MockMetaCache) PredictSegments(pk storage.PrimaryKey, filters ...SegmentFilter) ([]int64, bool) {
_va := make([]interface{}, len(filters))
for _i := range filters {
_va[_i] = filters[_i]
}
var _ca []interface{}
_ca = append(_ca, pk)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 []int64
var r1 bool
if rf, ok := ret.Get(0).(func(storage.PrimaryKey, ...SegmentFilter) ([]int64, bool)); ok {
return rf(pk, filters...)
}
if rf, ok := ret.Get(0).(func(storage.PrimaryKey, ...SegmentFilter) []int64); ok {
r0 = rf(pk, filters...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(storage.PrimaryKey, ...SegmentFilter) bool); ok {
r1 = rf(pk, filters...)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockMetaCache_PredictSegments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PredictSegments'
type MockMetaCache_PredictSegments_Call struct {
*mock.Call
}
// PredictSegments is a helper method to define mock.On call
// - pk storage.PrimaryKey
// - filters ...SegmentFilter
func (_e *MockMetaCache_Expecter) PredictSegments(pk interface{}, filters ...interface{}) *MockMetaCache_PredictSegments_Call {
return &MockMetaCache_PredictSegments_Call{Call: _e.mock.On("PredictSegments",
append([]interface{}{pk}, filters...)...)}
}
func (_c *MockMetaCache_PredictSegments_Call) Run(run func(pk storage.PrimaryKey, filters ...SegmentFilter)) *MockMetaCache_PredictSegments_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]SegmentFilter, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(SegmentFilter)
}
}
run(args[0].(storage.PrimaryKey), variadicArgs...)
})
return _c
}
func (_c *MockMetaCache_PredictSegments_Call) Return(_a0 []int64, _a1 bool) *MockMetaCache_PredictSegments_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockMetaCache_PredictSegments_Call) RunAndReturn(run func(storage.PrimaryKey, ...SegmentFilter) ([]int64, bool)) *MockMetaCache_PredictSegments_Call {
_c.Call.Return(run)
return _c
}
// Schema provides a mock function with given fields:
func (_m *MockMetaCache) Schema() *schemapb.CollectionSchema {
ret := _m.Called()
var r0 *schemapb.CollectionSchema
if rf, ok := ret.Get(0).(func() *schemapb.CollectionSchema); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*schemapb.CollectionSchema)
}
}
return r0
}
// MockMetaCache_Schema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Schema'
type MockMetaCache_Schema_Call struct {
*mock.Call
}
// Schema is a helper method to define mock.On call
func (_e *MockMetaCache_Expecter) Schema() *MockMetaCache_Schema_Call {
return &MockMetaCache_Schema_Call{Call: _e.mock.On("Schema")}
}
func (_c *MockMetaCache_Schema_Call) Run(run func()) *MockMetaCache_Schema_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockMetaCache_Schema_Call) Return(_a0 *schemapb.CollectionSchema) *MockMetaCache_Schema_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockMetaCache_Schema_Call) RunAndReturn(run func() *schemapb.CollectionSchema) *MockMetaCache_Schema_Call {
_c.Call.Return(run)
return _c
}
// UpdateSegments provides a mock function with given fields: action, filters
func (_m *MockMetaCache) UpdateSegments(action SegmentAction, filters ...SegmentFilter) {
_va := make([]interface{}, len(filters))