mirror of https://github.com/milvus-io/milvus.git
Optimize read ddl (#19352)
Signed-off-by: longjiquan <jiquan.long@zilliz.com> Signed-off-by: longjiquan <jiquan.long@zilliz.com>pull/19412/head
parent
662c654fe3
commit
4e521438c3
|
@ -175,7 +175,6 @@ func (kc *Catalog) loadCollection(ctx context.Context, collectionID typeutil.Uni
|
|||
collKey := buildCollectionKey(collectionID)
|
||||
collVal, err := kc.Snapshot.Load(collKey, ts)
|
||||
if err != nil {
|
||||
log.Error("get collection meta fail", zap.String("key", collKey), zap.Error(err))
|
||||
return nil, fmt.Errorf("can't find collection: %d", collectionID)
|
||||
}
|
||||
|
||||
|
@ -310,10 +309,8 @@ func (kc *Catalog) listFieldsAfter210(ctx context.Context, collectionID typeutil
|
|||
}
|
||||
|
||||
func (kc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) {
|
||||
collKey := buildCollectionKey(collectionID)
|
||||
collMeta, err := kc.loadCollection(ctx, collectionID, ts)
|
||||
if err != nil {
|
||||
log.Error("collection meta marshal fail", zap.String("key", collKey), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
type alterAliasTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.AlterAliasRequest
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ func Test_alterAliasTask_Execute(t *testing.T) {
|
|||
t.Run("failed to expire cache", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidProxyManager())
|
||||
task := &alterAliasTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.AlterAliasRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias},
|
||||
Alias: "test",
|
||||
|
@ -42,7 +42,7 @@ func Test_alterAliasTask_Execute(t *testing.T) {
|
|||
t.Run("failed to alter alias", func(t *testing.T) {
|
||||
core := newTestCore(withValidProxyManager(), withInvalidMeta())
|
||||
task := &alterAliasTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.AlterAliasRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias},
|
||||
Alias: "test",
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
type createAliasTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.CreateAliasRequest
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ func Test_createAliasTask_Execute(t *testing.T) {
|
|||
t.Run("failed to create alias", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &createAliasTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateAliasRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateAlias},
|
||||
Alias: "test",
|
||||
|
|
|
@ -33,7 +33,7 @@ type collectionChannels struct {
|
|||
}
|
||||
|
||||
type createCollectionTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.CreateCollectionRequest
|
||||
schema *schemapb.CollectionSchema
|
||||
collID UniqueID
|
||||
|
|
|
@ -207,7 +207,7 @@ func Test_createCollectionTask_Prepare(t *testing.T) {
|
|||
core := newTestCore(withInvalidIDAllocator())
|
||||
|
||||
task := createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -244,7 +244,7 @@ func Test_createCollectionTask_Prepare(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
task := createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -273,7 +273,7 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
|||
core := newTestCore(withMeta(meta), withTtSynchronizer(ticker))
|
||||
|
||||
task := &createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -318,7 +318,7 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
|||
core := newTestCore(withMeta(meta), withTtSynchronizer(ticker))
|
||||
|
||||
task := &createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -338,7 +338,7 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
|||
pchans := ticker.getDmlChannelNames(shardNum)
|
||||
core := newTestCore(withTtSynchronizer(ticker))
|
||||
task := &createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
channels: collectionChannels{
|
||||
physicalChannels: pchans,
|
||||
virtualChannels: []string{funcutil.GenRandomStr(), funcutil.GenRandomStr()},
|
||||
|
@ -402,7 +402,7 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
task := createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -484,7 +484,7 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
task := createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
type createPartitionTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.CreatePartitionRequest
|
||||
collMeta *model.Collection
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ func Test_createPartitionTask_Prepare(t *testing.T) {
|
|||
t.Run("failed to get collection meta", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
|
@ -42,8 +42,8 @@ func Test_createPartitionTask_Prepare(t *testing.T) {
|
|||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
@ -70,9 +70,9 @@ func Test_createPartitionTask_Execute(t *testing.T) {
|
|||
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
|
||||
core := newTestCore(withInvalidIDAllocator())
|
||||
task := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
baseTask: baseTask{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
|
@ -84,9 +84,9 @@ func Test_createPartitionTask_Execute(t *testing.T) {
|
|||
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
|
||||
core := newTestCore(withValidIDAllocator(), withInvalidProxyManager())
|
||||
task := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
baseTask: baseTask{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
|
@ -98,9 +98,9 @@ func Test_createPartitionTask_Execute(t *testing.T) {
|
|||
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
|
||||
core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withInvalidMeta())
|
||||
task := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
baseTask: baseTask{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
|
@ -116,9 +116,9 @@ func Test_createPartitionTask_Execute(t *testing.T) {
|
|||
}
|
||||
core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withMeta(meta))
|
||||
task := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
baseTask: baseTask{core: core},
|
||||
collMeta: coll,
|
||||
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
type DdlTsLockManagerV2 interface {
|
||||
type DdlTsLockManager interface {
|
||||
GetMinDdlTs() Timestamp
|
||||
AddRefCnt(delta int32)
|
||||
Lock()
|
||||
|
@ -16,14 +16,14 @@ type DdlTsLockManagerV2 interface {
|
|||
UpdateLastTs(ts Timestamp)
|
||||
}
|
||||
|
||||
type ddlTsLockManagerV2 struct {
|
||||
type ddlTsLockManager struct {
|
||||
lastTs atomic.Uint64
|
||||
inProgressCnt atomic.Int32
|
||||
tsoAllocator tso.Allocator
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (c *ddlTsLockManagerV2) GetMinDdlTs() Timestamp {
|
||||
func (c *ddlTsLockManager) GetMinDdlTs() Timestamp {
|
||||
// In fact, `TryLock` can replace the `inProgressCnt` but it's not recommended.
|
||||
if c.inProgressCnt.Load() > 0 {
|
||||
return c.lastTs.Load()
|
||||
|
@ -38,24 +38,24 @@ func (c *ddlTsLockManagerV2) GetMinDdlTs() Timestamp {
|
|||
return ts
|
||||
}
|
||||
|
||||
func (c *ddlTsLockManagerV2) AddRefCnt(delta int32) {
|
||||
func (c *ddlTsLockManager) AddRefCnt(delta int32) {
|
||||
c.inProgressCnt.Add(delta)
|
||||
}
|
||||
|
||||
func (c *ddlTsLockManagerV2) Lock() {
|
||||
func (c *ddlTsLockManager) Lock() {
|
||||
c.mu.Lock()
|
||||
}
|
||||
|
||||
func (c *ddlTsLockManagerV2) Unlock() {
|
||||
func (c *ddlTsLockManager) Unlock() {
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *ddlTsLockManagerV2) UpdateLastTs(ts Timestamp) {
|
||||
func (c *ddlTsLockManager) UpdateLastTs(ts Timestamp) {
|
||||
c.lastTs.Store(ts)
|
||||
}
|
||||
|
||||
func newDdlTsLockManagerV2(tsoAllocator tso.Allocator) *ddlTsLockManagerV2 {
|
||||
return &ddlTsLockManagerV2{
|
||||
func newDdlTsLockManager(tsoAllocator tso.Allocator) *ddlTsLockManager {
|
||||
return &ddlTsLockManager{
|
||||
lastTs: *atomic.NewUint64(0),
|
||||
inProgressCnt: *atomic.NewInt32(0),
|
||||
tsoAllocator: tsoAllocator,
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
func Test_ddlTsLockManager_GetMinDdlTs(t *testing.T) {
|
||||
t.Run("there are in-progress tasks", func(t *testing.T) {
|
||||
m := newDdlTsLockManagerV2(nil)
|
||||
m := newDdlTsLockManager(nil)
|
||||
m.UpdateLastTs(100)
|
||||
m.inProgressCnt.Store(9999)
|
||||
ts := m.GetMinDdlTs()
|
||||
|
@ -21,7 +21,7 @@ func Test_ddlTsLockManager_GetMinDdlTs(t *testing.T) {
|
|||
tsoAllocator.GenerateTSOF = func(count uint32) (uint64, error) {
|
||||
return 0, errors.New("error mock GenerateTSO")
|
||||
}
|
||||
m := newDdlTsLockManagerV2(tsoAllocator)
|
||||
m := newDdlTsLockManager(tsoAllocator)
|
||||
m.UpdateLastTs(101)
|
||||
m.inProgressCnt.Store(0)
|
||||
ts := m.GetMinDdlTs()
|
||||
|
@ -33,7 +33,7 @@ func Test_ddlTsLockManager_GetMinDdlTs(t *testing.T) {
|
|||
tsoAllocator.GenerateTSOF = func(count uint32) (uint64, error) {
|
||||
return 102, nil
|
||||
}
|
||||
m := newDdlTsLockManagerV2(tsoAllocator)
|
||||
m := newDdlTsLockManager(tsoAllocator)
|
||||
m.UpdateLastTs(101)
|
||||
m.inProgressCnt.Store(0)
|
||||
ts := m.GetMinDdlTs()
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
// describeCollectionTask describe collection request task
|
||||
type describeCollectionTask struct {
|
||||
baseTaskV2
|
||||
Req *milvuspb.DescribeCollectionRequest
|
||||
Rsp *milvuspb.DescribeCollectionResponse
|
||||
}
|
||||
|
||||
func (t *describeCollectionTask) Prepare(ctx context.Context) error {
|
||||
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_DescribeCollection); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute task execution
|
||||
func (t *describeCollectionTask) Execute(ctx context.Context) (err error) {
|
||||
var collInfo *model.Collection
|
||||
t.Rsp.Status = succStatus()
|
||||
|
||||
if t.Req.GetTimeStamp() == 0 {
|
||||
t.Req.TimeStamp = typeutil.MaxTimestamp
|
||||
}
|
||||
|
||||
if t.Req.GetCollectionName() != "" {
|
||||
collInfo, err = t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.Req.GetTimeStamp())
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
collInfo, err = t.core.meta.GetCollectionByID(ctx, t.Req.GetCollectionID(), t.Req.GetTimeStamp())
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
t.Rsp.Schema = &schemapb.CollectionSchema{
|
||||
Name: collInfo.Name,
|
||||
Description: collInfo.Description,
|
||||
AutoID: collInfo.AutoID,
|
||||
Fields: model.MarshalFieldModels(collInfo.Fields),
|
||||
}
|
||||
t.Rsp.CollectionID = collInfo.CollectionID
|
||||
t.Rsp.VirtualChannelNames = collInfo.VirtualChannelNames
|
||||
t.Rsp.PhysicalChannelNames = collInfo.PhysicalChannelNames
|
||||
if collInfo.ShardsNum == 0 {
|
||||
collInfo.ShardsNum = int32(len(collInfo.VirtualChannelNames))
|
||||
}
|
||||
t.Rsp.ShardsNum = collInfo.ShardsNum
|
||||
t.Rsp.ConsistencyLevel = collInfo.ConsistencyLevel
|
||||
|
||||
t.Rsp.CreatedTimestamp = collInfo.CreateTime
|
||||
createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime)
|
||||
t.Rsp.CreatedUtcTimestamp = uint64(createdPhysicalTime)
|
||||
t.Rsp.Aliases = t.core.meta.ListAliasesByID(collInfo.CollectionID)
|
||||
t.Rsp.StartPositions = collInfo.StartPositions
|
||||
t.Rsp.CollectionName = t.Rsp.Schema.Name
|
||||
return nil
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_describeCollectionTask_Prepare(t *testing.T) {
|
||||
t.Run("invalid msg type", func(t *testing.T) {
|
||||
task := &describeCollectionTask{
|
||||
Req: &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DropCollection,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
task := &describeCollectionTask{
|
||||
Req: &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_describeCollectionTask_Execute(t *testing.T) {
|
||||
t.Run("failed to get collection by name", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &describeCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
},
|
||||
CollectionName: "test coll",
|
||||
},
|
||||
Rsp: &milvuspb.DescribeCollectionResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
})
|
||||
|
||||
t.Run("failed to get collection by id", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &describeCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
},
|
||||
CollectionID: 1,
|
||||
},
|
||||
Rsp: &milvuspb.DescribeCollectionResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
meta := newMockMetaTable()
|
||||
meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) {
|
||||
return &model.Collection{
|
||||
CollectionID: 1,
|
||||
Name: "test coll",
|
||||
}, nil
|
||||
}
|
||||
alias1, alias2 := funcutil.GenRandomStr(), funcutil.GenRandomStr()
|
||||
meta.ListAliasesByIDFunc = func(collID UniqueID) []string {
|
||||
return []string{alias1, alias2}
|
||||
}
|
||||
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &describeCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
},
|
||||
CollectionID: 1,
|
||||
},
|
||||
Rsp: &milvuspb.DescribeCollectionResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
assert.ElementsMatch(t, []string{alias1, alias2}, task.Rsp.GetAliases())
|
||||
})
|
||||
}
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
type dropAliasTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.DropAliasRequest
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ func Test_dropAliasTask_Execute(t *testing.T) {
|
|||
core := newTestCore(withInvalidProxyManager())
|
||||
alias := funcutil.GenRandomStr()
|
||||
task := &dropAliasTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropAliasRequest{
|
||||
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias},
|
||||
|
@ -50,7 +50,7 @@ func Test_dropAliasTask_Execute(t *testing.T) {
|
|||
core := newTestCore(withValidProxyManager(), withInvalidMeta())
|
||||
alias := funcutil.GenRandomStr()
|
||||
task := &dropAliasTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropAliasRequest{
|
||||
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias},
|
||||
|
@ -69,7 +69,7 @@ func Test_dropAliasTask_Execute(t *testing.T) {
|
|||
core := newTestCore(withValidProxyManager(), withMeta(meta))
|
||||
alias := funcutil.GenRandomStr()
|
||||
task := &dropAliasTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropAliasRequest{
|
||||
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias},
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
type dropCollectionTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.DropCollectionRequest
|
||||
}
|
||||
|
||||
|
@ -54,18 +54,18 @@ func (t *dropCollectionTask) Execute(ctx context.Context) error {
|
|||
|
||||
redoTask := newBaseRedoTask(t.core.stepExecutor)
|
||||
|
||||
redoTask.AddSyncStep(&expireCacheStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
collectionNames: append(aliases, collMeta.Name),
|
||||
collectionID: collMeta.CollectionID,
|
||||
ts: ts,
|
||||
})
|
||||
redoTask.AddSyncStep(&changeCollectionStateStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
collectionID: collMeta.CollectionID,
|
||||
state: pb.CollectionState_CollectionDropping,
|
||||
ts: ts,
|
||||
})
|
||||
redoTask.AddSyncStep(&expireCacheStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
collectionNames: append(aliases, collMeta.Name),
|
||||
collectionID: collMeta.CollectionID,
|
||||
ts: ts,
|
||||
})
|
||||
|
||||
redoTask.AddAsyncStep(&releaseCollectionStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
|
|
|
@ -5,15 +5,14 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
func Test_dropCollectionTask_Prepare(t *testing.T) {
|
||||
|
@ -35,7 +34,7 @@ func Test_dropCollectionTask_Prepare(t *testing.T) {
|
|||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -53,7 +52,7 @@ func Test_dropCollectionTask_Prepare(t *testing.T) {
|
|||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -69,7 +68,7 @@ func Test_dropCollectionTask_Execute(t *testing.T) {
|
|||
collectionName := funcutil.GenRandomStr()
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -82,16 +81,26 @@ func Test_dropCollectionTask_Execute(t *testing.T) {
|
|||
t.Run("failed to expire cache", func(t *testing.T) {
|
||||
collectionName := funcutil.GenRandomStr()
|
||||
coll := &model.Collection{Name: collectionName}
|
||||
meta := newMockMetaTable()
|
||||
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
|
||||
return coll.Clone(), nil
|
||||
}
|
||||
meta.ListAliasesByIDFunc = func(collID UniqueID) []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64"),
|
||||
).Return(coll.Clone(), nil)
|
||||
meta.On("ListAliasesByID",
|
||||
mock.AnythingOfType("int64"),
|
||||
).Return([]string{})
|
||||
meta.On("ChangeCollectionState",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("int64"),
|
||||
mock.Anything, // etcdpb.CollectionState
|
||||
mock.AnythingOfType("uint64"),
|
||||
).Return(nil)
|
||||
|
||||
core := newTestCore(withInvalidProxyManager(), withMeta(meta))
|
||||
task := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -116,7 +125,7 @@ func Test_dropCollectionTask_Execute(t *testing.T) {
|
|||
}
|
||||
core := newTestCore(withValidProxyManager(), withMeta(meta))
|
||||
task := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
|
||||
CollectionName: collectionName,
|
||||
|
@ -188,7 +197,7 @@ func Test_dropCollectionTask_Execute(t *testing.T) {
|
|||
withTtSynchronizer(ticker))
|
||||
|
||||
task := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
|
||||
CollectionName: collectionName,
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
type dropPartitionTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
Req *milvuspb.DropPartitionRequest
|
||||
collMeta *model.Collection
|
||||
}
|
||||
|
@ -54,12 +54,6 @@ func (t *dropPartitionTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
redoTask := newBaseRedoTask(t.core.stepExecutor)
|
||||
redoTask.AddSyncStep(&expireCacheStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
collectionNames: []string{t.Req.GetCollectionName()},
|
||||
collectionID: t.collMeta.CollectionID,
|
||||
ts: t.GetTs(),
|
||||
})
|
||||
redoTask.AddSyncStep(&changePartitionStateStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
collectionID: t.collMeta.CollectionID,
|
||||
|
@ -67,6 +61,12 @@ func (t *dropPartitionTask) Execute(ctx context.Context) error {
|
|||
state: pb.PartitionState_PartitionDropping,
|
||||
ts: t.GetTs(),
|
||||
})
|
||||
redoTask.AddSyncStep(&expireCacheStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
collectionNames: []string{t.Req.GetCollectionName()},
|
||||
collectionID: t.collMeta.CollectionID,
|
||||
ts: t.GetTs(),
|
||||
})
|
||||
|
||||
redoTask.AddAsyncStep(&dropIndexStep{
|
||||
baseStep: baseStep{core: t.core},
|
||||
|
|
|
@ -4,7 +4,10 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
@ -40,7 +43,7 @@ func Test_dropPartitionTask_Prepare(t *testing.T) {
|
|||
t.Run("failed to get collection meta", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &dropPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
|
||||
},
|
||||
|
@ -60,7 +63,7 @@ func Test_dropPartitionTask_Prepare(t *testing.T) {
|
|||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &dropPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
|
||||
CollectionName: collectionName,
|
||||
|
@ -93,9 +96,17 @@ func Test_dropPartitionTask_Execute(t *testing.T) {
|
|||
collectionName := funcutil.GenRandomStr()
|
||||
partitionName := funcutil.GenRandomStr()
|
||||
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}}
|
||||
core := newTestCore(withInvalidProxyManager())
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("ChangePartitionState",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("int64"),
|
||||
mock.AnythingOfType("int64"),
|
||||
mock.Anything, // pb.PartitionState
|
||||
mock.AnythingOfType("uint64"),
|
||||
).Return(nil)
|
||||
core := newTestCore(withInvalidProxyManager(), withMeta(meta))
|
||||
task := &dropPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
|
||||
CollectionName: collectionName,
|
||||
|
@ -113,7 +124,7 @@ func Test_dropPartitionTask_Execute(t *testing.T) {
|
|||
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}}
|
||||
core := newTestCore(withValidProxyManager(), withInvalidMeta())
|
||||
task := &dropPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
|
||||
CollectionName: collectionName,
|
||||
|
@ -153,7 +164,7 @@ func Test_dropPartitionTask_Execute(t *testing.T) {
|
|||
core := newTestCore(withValidProxyManager(), withMeta(meta), withGarbageCollector(gc), withDropIndex())
|
||||
|
||||
task := &dropPartitionTask{
|
||||
baseTaskV2: baseTaskV2{core: core},
|
||||
baseTask: baseTask{core: core},
|
||||
Req: &milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
|
||||
CollectionName: collectionName,
|
||||
|
|
|
@ -69,7 +69,7 @@ func TestGarbageCollectorCtx_ReDropCollection(t *testing.T) {
|
|||
return 100, nil
|
||||
}
|
||||
core := newTestCore(withBroker(broker), withTtSynchronizer(ticker), withTsoAllocator(tsoAllocator))
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(core.tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(core.tsoAllocator)
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.garbageCollector = gc
|
||||
shardsNum := 2
|
||||
|
@ -110,7 +110,7 @@ func TestGarbageCollectorCtx_ReDropCollection(t *testing.T) {
|
|||
withTtSynchronizer(ticker),
|
||||
withTsoAllocator(tsoAllocator),
|
||||
withMeta(meta))
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(core.tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(core.tsoAllocator)
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.garbageCollector = gc
|
||||
gc.ReDropCollection(&model.Collection{}, 1000)
|
||||
|
@ -153,7 +153,7 @@ func TestGarbageCollectorCtx_ReDropCollection(t *testing.T) {
|
|||
withTtSynchronizer(ticker),
|
||||
withTsoAllocator(tsoAllocator),
|
||||
withMeta(meta))
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(core.tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(core.tsoAllocator)
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.garbageCollector = gc
|
||||
gc.ReDropCollection(&model.Collection{}, 1000)
|
||||
|
@ -189,7 +189,7 @@ func TestGarbageCollectorCtx_RemoveCreatingCollection(t *testing.T) {
|
|||
|
||||
core := newTestCore(withTtSynchronizer(ticker), withTsoAllocator(tsoAllocator), withStepExecutor(executor))
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(tsoAllocator)
|
||||
core.garbageCollector = gc
|
||||
|
||||
gc.RemoveCreatingCollection(&model.Collection{PhysicalChannelNames: pchans})
|
||||
|
@ -224,7 +224,7 @@ func TestGarbageCollectorCtx_RemoveCreatingCollection(t *testing.T) {
|
|||
|
||||
core := newTestCore(withTtSynchronizer(ticker), withMeta(meta), withTsoAllocator(tsoAllocator))
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(tsoAllocator)
|
||||
core.garbageCollector = gc
|
||||
|
||||
gc.RemoveCreatingCollection(&model.Collection{PhysicalChannelNames: pchans})
|
||||
|
@ -260,7 +260,7 @@ func TestGarbageCollectorCtx_RemoveCreatingCollection(t *testing.T) {
|
|||
|
||||
core := newTestCore(withTtSynchronizer(ticker), withMeta(meta), withTsoAllocator(tsoAllocator))
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(tsoAllocator)
|
||||
core.garbageCollector = gc
|
||||
|
||||
gc.RemoveCreatingCollection(&model.Collection{PhysicalChannelNames: pchans})
|
||||
|
@ -279,7 +279,7 @@ func TestGarbageCollectorCtx_ReDropPartition(t *testing.T) {
|
|||
return 100, nil
|
||||
}
|
||||
core := newTestCore(withTtSynchronizer(ticker), withTsoAllocator(tsoAllocator), withDropIndex())
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(core.tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(core.tsoAllocator)
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.garbageCollector = gc
|
||||
gc.ReDropPartition(pchans, &model.Partition{}, 100000)
|
||||
|
@ -298,7 +298,7 @@ func TestGarbageCollectorCtx_ReDropPartition(t *testing.T) {
|
|||
return 100, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta), withTtSynchronizer(ticker), withTsoAllocator(tsoAllocator), withDropIndex())
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(core.tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(core.tsoAllocator)
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.garbageCollector = gc
|
||||
gc.ReDropPartition(pchans, &model.Partition{}, 100000)
|
||||
|
@ -321,7 +321,7 @@ func TestGarbageCollectorCtx_ReDropPartition(t *testing.T) {
|
|||
return 100, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta), withTtSynchronizer(ticker), withTsoAllocator(tsoAllocator), withDropIndex())
|
||||
core.ddlTsLockManager = newDdlTsLockManagerV2(core.tsoAllocator)
|
||||
core.ddlTsLockManager = newDdlTsLockManager(core.tsoAllocator)
|
||||
gc := newBgGarbageCollector(core)
|
||||
core.garbageCollector = gc
|
||||
gc.ReDropPartition(pchans, &model.Partition{}, 100000)
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
)
|
||||
|
||||
// hasCollectionTask has collection request task
|
||||
type hasCollectionTask struct {
|
||||
baseTaskV2
|
||||
Req *milvuspb.HasCollectionRequest
|
||||
Rsp *milvuspb.BoolResponse
|
||||
}
|
||||
|
||||
func (t *hasCollectionTask) Prepare(ctx context.Context) error {
|
||||
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasCollection); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute task execution
|
||||
func (t *hasCollectionTask) Execute(ctx context.Context) error {
|
||||
t.Rsp.Status = succStatus()
|
||||
if t.Req.GetTimeStamp() == 0 {
|
||||
t.Req.TimeStamp = typeutil.MaxTimestamp
|
||||
}
|
||||
_, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.Req.GetTimeStamp())
|
||||
t.Rsp.Value = err == nil
|
||||
return nil
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_hasCollectionTask_Prepare(t *testing.T) {
|
||||
t.Run("invalid msg type", func(t *testing.T) {
|
||||
task := &hasCollectionTask{
|
||||
Req: &milvuspb.HasCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Undefined,
|
||||
},
|
||||
CollectionName: "test coll",
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
task := &hasCollectionTask{
|
||||
Req: &milvuspb.HasCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasCollection,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_hasCollectionTask_Execute(t *testing.T) {
|
||||
t.Run("failed", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &hasCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.HasCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasCollection,
|
||||
},
|
||||
},
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
assert.False(t, task.Rsp.GetValue())
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
meta := newMockMetaTable()
|
||||
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
|
||||
return nil, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &hasCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.HasCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasCollection,
|
||||
},
|
||||
},
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
assert.True(t, task.Rsp.GetValue())
|
||||
})
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
)
|
||||
|
||||
// hasPartitionTask has partition request task
|
||||
type hasPartitionTask struct {
|
||||
baseTaskV2
|
||||
Req *milvuspb.HasPartitionRequest
|
||||
Rsp *milvuspb.BoolResponse
|
||||
}
|
||||
|
||||
func (t *hasPartitionTask) Prepare(ctx context.Context) error {
|
||||
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasPartition); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute task execution
|
||||
func (t *hasPartitionTask) Execute(ctx context.Context) error {
|
||||
t.Rsp.Status = succStatus()
|
||||
t.Rsp.Value = false
|
||||
// TODO: why HasPartitionRequest doesn't contain Timestamp but other requests do.
|
||||
coll, err := t.core.meta.GetCollectionByName(ctx, t.Req.CollectionName, typeutil.MaxTimestamp)
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
|
||||
return err
|
||||
}
|
||||
for _, part := range coll.Partitions {
|
||||
if part.PartitionName == t.Req.PartitionName {
|
||||
t.Rsp.Value = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_hasPartitionTask_Prepare(t *testing.T) {
|
||||
t.Run("invalid msg type", func(t *testing.T) {
|
||||
task := &hasPartitionTask{
|
||||
Req: &milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Undefined,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
task := &hasPartitionTask{
|
||||
Req: &milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasPartition,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_hasPartitionTask_Execute(t *testing.T) {
|
||||
t.Run("fail to get collection", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &hasPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasPartition,
|
||||
},
|
||||
CollectionName: "test coll",
|
||||
},
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
assert.False(t, task.Rsp.GetValue())
|
||||
})
|
||||
|
||||
t.Run("failed", func(t *testing.T) {
|
||||
meta := newMockMetaTable()
|
||||
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
|
||||
return &model.Collection{
|
||||
Partitions: []*model.Partition{
|
||||
{
|
||||
PartitionName: "invalid test partition",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &hasPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasCollection,
|
||||
},
|
||||
CollectionName: "test coll",
|
||||
PartitionName: "test partition",
|
||||
},
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
assert.False(t, task.Rsp.GetValue())
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
meta := newMockMetaTable()
|
||||
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
|
||||
return &model.Collection{
|
||||
Partitions: []*model.Partition{
|
||||
{
|
||||
PartitionName: "invalid test partition",
|
||||
},
|
||||
{
|
||||
PartitionName: "test partition",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &hasPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_HasCollection,
|
||||
},
|
||||
CollectionName: "test coll",
|
||||
PartitionName: "test partition",
|
||||
},
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
assert.True(t, task.Rsp.GetValue())
|
||||
})
|
||||
}
|
|
@ -68,6 +68,7 @@ const (
|
|||
DefaultStringIndexType = "Trie"
|
||||
)
|
||||
|
||||
//go:generate mockery --name=IMetaTable --outpkg=mockrootcoord
|
||||
type IMetaTable interface {
|
||||
AddCollection(ctx context.Context, coll *model.Collection) error
|
||||
ChangeCollectionState(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error
|
||||
|
|
|
@ -663,7 +663,7 @@ func withAbnormalCode() Opt {
|
|||
|
||||
type mockScheduler struct {
|
||||
IScheduler
|
||||
AddTaskFunc func(t taskV2) error
|
||||
AddTaskFunc func(t task) error
|
||||
GetMinDdlTsFunc func() Timestamp
|
||||
minDdlTs Timestamp
|
||||
}
|
||||
|
@ -672,7 +672,7 @@ func newMockScheduler() *mockScheduler {
|
|||
return &mockScheduler{}
|
||||
}
|
||||
|
||||
func (m mockScheduler) AddTask(t taskV2) error {
|
||||
func (m mockScheduler) AddTask(t task) error {
|
||||
if m.AddTaskFunc != nil {
|
||||
return m.AddTaskFunc(t)
|
||||
}
|
||||
|
@ -694,7 +694,7 @@ func withScheduler(sched IScheduler) Opt {
|
|||
|
||||
func withValidScheduler() Opt {
|
||||
sched := newMockScheduler()
|
||||
sched.AddTaskFunc = func(t taskV2) error {
|
||||
sched.AddTaskFunc = func(t task) error {
|
||||
t.NotifyDone(nil)
|
||||
return nil
|
||||
}
|
||||
|
@ -703,7 +703,7 @@ func withValidScheduler() Opt {
|
|||
|
||||
func withInvalidScheduler() Opt {
|
||||
sched := newMockScheduler()
|
||||
sched.AddTaskFunc = func(t taskV2) error {
|
||||
sched.AddTaskFunc = func(t task) error {
|
||||
return errors.New("error mock AddTask")
|
||||
}
|
||||
return withScheduler(sched)
|
||||
|
@ -711,7 +711,7 @@ func withInvalidScheduler() Opt {
|
|||
|
||||
func withTaskFailScheduler() Opt {
|
||||
sched := newMockScheduler()
|
||||
sched.AddTaskFunc = func(t taskV2) error {
|
||||
sched.AddTaskFunc = func(t task) error {
|
||||
err := errors.New("error mock task fail")
|
||||
t.NotifyDone(err)
|
||||
return nil
|
||||
|
@ -868,7 +868,7 @@ func newTickerWithFactory(factory msgstream.Factory) *timetickSync {
|
|||
}
|
||||
|
||||
type mockDdlTsLockManager struct {
|
||||
DdlTsLockManagerV2
|
||||
DdlTsLockManager
|
||||
GetMinDdlTsFunc func() Timestamp
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ func newMockDdlTsLockManager() *mockDdlTsLockManager {
|
|||
return &mockDdlTsLockManager{}
|
||||
}
|
||||
|
||||
func withDdlTsLockManager(m DdlTsLockManagerV2) Opt {
|
||||
func withDdlTsLockManager(m DdlTsLockManager) Opt {
|
||||
return func(c *Core) {
|
||||
c.ddlTsLockManager = m
|
||||
}
|
||||
|
|
|
@ -0,0 +1,657 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mockrootcoord
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
etcdpb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
internalpb "github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
|
||||
milvuspb "github.com/milvus-io/milvus/api/milvuspb"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
model "github.com/milvus-io/milvus/internal/metastore/model"
|
||||
)
|
||||
|
||||
// IMetaTable is an autogenerated mock type for the IMetaTable type
|
||||
type IMetaTable struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// AddCollection provides a mock function with given fields: ctx, coll
|
||||
func (_m *IMetaTable) AddCollection(ctx context.Context, coll *model.Collection) error {
|
||||
ret := _m.Called(ctx, coll)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *model.Collection) error); ok {
|
||||
r0 = rf(ctx, coll)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// AddCredential provides a mock function with given fields: credInfo
|
||||
func (_m *IMetaTable) AddCredential(credInfo *internalpb.CredentialInfo) error {
|
||||
ret := _m.Called(credInfo)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*internalpb.CredentialInfo) error); ok {
|
||||
r0 = rf(credInfo)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// AddPartition provides a mock function with given fields: ctx, partition
|
||||
func (_m *IMetaTable) AddPartition(ctx context.Context, partition *model.Partition) error {
|
||||
ret := _m.Called(ctx, partition)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *model.Partition) error); ok {
|
||||
r0 = rf(ctx, partition)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// AlterAlias provides a mock function with given fields: ctx, alias, collectionName, ts
|
||||
func (_m *IMetaTable) AlterAlias(ctx context.Context, alias string, collectionName string, ts uint64) error {
|
||||
ret := _m.Called(ctx, alias, collectionName, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64) error); ok {
|
||||
r0 = rf(ctx, alias, collectionName, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// AlterCredential provides a mock function with given fields: credInfo
|
||||
func (_m *IMetaTable) AlterCredential(credInfo *internalpb.CredentialInfo) error {
|
||||
ret := _m.Called(credInfo)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*internalpb.CredentialInfo) error); ok {
|
||||
r0 = rf(credInfo)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ChangeCollectionState provides a mock function with given fields: ctx, collectionID, state, ts
|
||||
func (_m *IMetaTable) ChangeCollectionState(ctx context.Context, collectionID int64, state etcdpb.CollectionState, ts uint64) error {
|
||||
ret := _m.Called(ctx, collectionID, state, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, etcdpb.CollectionState, uint64) error); ok {
|
||||
r0 = rf(ctx, collectionID, state, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ChangePartitionState provides a mock function with given fields: ctx, collectionID, partitionID, state, ts
|
||||
func (_m *IMetaTable) ChangePartitionState(ctx context.Context, collectionID int64, partitionID int64, state etcdpb.PartitionState, ts uint64) error {
|
||||
ret := _m.Called(ctx, collectionID, partitionID, state, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64, etcdpb.PartitionState, uint64) error); ok {
|
||||
r0 = rf(ctx, collectionID, partitionID, state, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CreateAlias provides a mock function with given fields: ctx, alias, collectionName, ts
|
||||
func (_m *IMetaTable) CreateAlias(ctx context.Context, alias string, collectionName string, ts uint64) error {
|
||||
ret := _m.Called(ctx, alias, collectionName, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64) error); ok {
|
||||
r0 = rf(ctx, alias, collectionName, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CreateRole provides a mock function with given fields: tenant, entity
|
||||
func (_m *IMetaTable) CreateRole(tenant string, entity *milvuspb.RoleEntity) error {
|
||||
ret := _m.Called(tenant, entity)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.RoleEntity) error); ok {
|
||||
r0 = rf(tenant, entity)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteCredential provides a mock function with given fields: username
|
||||
func (_m *IMetaTable) DeleteCredential(username string) error {
|
||||
ret := _m.Called(username)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string) error); ok {
|
||||
r0 = rf(username)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DropAlias provides a mock function with given fields: ctx, alias, ts
|
||||
func (_m *IMetaTable) DropAlias(ctx context.Context, alias string, ts uint64) error {
|
||||
ret := _m.Called(ctx, alias, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, uint64) error); ok {
|
||||
r0 = rf(ctx, alias, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DropGrant provides a mock function with given fields: tenant, role
|
||||
func (_m *IMetaTable) DropGrant(tenant string, role *milvuspb.RoleEntity) error {
|
||||
ret := _m.Called(tenant, role)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.RoleEntity) error); ok {
|
||||
r0 = rf(tenant, role)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DropRole provides a mock function with given fields: tenant, roleName
|
||||
func (_m *IMetaTable) DropRole(tenant string, roleName string) error {
|
||||
ret := _m.Called(tenant, roleName)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(tenant, roleName)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetCollectionByID provides a mock function with given fields: ctx, collectionID, ts
|
||||
func (_m *IMetaTable) GetCollectionByID(ctx context.Context, collectionID int64, ts uint64) (*model.Collection, error) {
|
||||
ret := _m.Called(ctx, collectionID, ts)
|
||||
|
||||
var r0 *model.Collection
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, uint64) *model.Collection); ok {
|
||||
r0 = rf(ctx, collectionID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*model.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, int64, uint64) error); ok {
|
||||
r1 = rf(ctx, collectionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetCollectionByName provides a mock function with given fields: ctx, collectionName, ts
|
||||
func (_m *IMetaTable) GetCollectionByName(ctx context.Context, collectionName string, ts uint64) (*model.Collection, error) {
|
||||
ret := _m.Called(ctx, collectionName, ts)
|
||||
|
||||
var r0 *model.Collection
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, uint64) *model.Collection); ok {
|
||||
r0 = rf(ctx, collectionName, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*model.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, uint64) error); ok {
|
||||
r1 = rf(ctx, collectionName, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetCollectionIDByName provides a mock function with given fields: name
|
||||
func (_m *IMetaTable) GetCollectionIDByName(name string) (int64, error) {
|
||||
ret := _m.Called(name)
|
||||
|
||||
var r0 int64
|
||||
if rf, ok := ret.Get(0).(func(string) int64); ok {
|
||||
r0 = rf(name)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(name)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetCollectionNameByID provides a mock function with given fields: collID
|
||||
func (_m *IMetaTable) GetCollectionNameByID(collID int64) (string, error) {
|
||||
ret := _m.Called(collID)
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func(int64) string); ok {
|
||||
r0 = rf(collID)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(int64) error); ok {
|
||||
r1 = rf(collID)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetCredential provides a mock function with given fields: username
|
||||
func (_m *IMetaTable) GetCredential(username string) (*internalpb.CredentialInfo, error) {
|
||||
ret := _m.Called(username)
|
||||
|
||||
var r0 *internalpb.CredentialInfo
|
||||
if rf, ok := ret.Get(0).(func(string) *internalpb.CredentialInfo); ok {
|
||||
r0 = rf(username)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*internalpb.CredentialInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(username)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPartitionByName provides a mock function with given fields: collID, partitionName, ts
|
||||
func (_m *IMetaTable) GetPartitionByName(collID int64, partitionName string, ts uint64) (int64, error) {
|
||||
ret := _m.Called(collID, partitionName, ts)
|
||||
|
||||
var r0 int64
|
||||
if rf, ok := ret.Get(0).(func(int64, string, uint64) int64); ok {
|
||||
r0 = rf(collID, partitionName, ts)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(int64, string, uint64) error); ok {
|
||||
r1 = rf(collID, partitionName, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPartitionNameByID provides a mock function with given fields: collID, partitionID, ts
|
||||
func (_m *IMetaTable) GetPartitionNameByID(collID int64, partitionID int64, ts uint64) (string, error) {
|
||||
ret := _m.Called(collID, partitionID, ts)
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func(int64, int64, uint64) string); ok {
|
||||
r0 = rf(collID, partitionID, ts)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(int64, int64, uint64) error); ok {
|
||||
r1 = rf(collID, partitionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// IsAlias provides a mock function with given fields: name
|
||||
func (_m *IMetaTable) IsAlias(name string) bool {
|
||||
ret := _m.Called(name)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(string) bool); ok {
|
||||
r0 = rf(name)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListAbnormalCollections provides a mock function with given fields: ctx, ts
|
||||
func (_m *IMetaTable) ListAbnormalCollections(ctx context.Context, ts uint64) ([]*model.Collection, error) {
|
||||
ret := _m.Called(ctx, ts)
|
||||
|
||||
var r0 []*model.Collection
|
||||
if rf, ok := ret.Get(0).(func(context.Context, uint64) []*model.Collection); ok {
|
||||
r0 = rf(ctx, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*model.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
|
||||
r1 = rf(ctx, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListAliasesByID provides a mock function with given fields: collID
|
||||
func (_m *IMetaTable) ListAliasesByID(collID int64) []string {
|
||||
ret := _m.Called(collID)
|
||||
|
||||
var r0 []string
|
||||
if rf, ok := ret.Get(0).(func(int64) []string); ok {
|
||||
r0 = rf(collID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListCollectionPhysicalChannels provides a mock function with given fields:
|
||||
func (_m *IMetaTable) ListCollectionPhysicalChannels() map[int64][]string {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 map[int64][]string
|
||||
if rf, ok := ret.Get(0).(func() map[int64][]string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[int64][]string)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListCollections provides a mock function with given fields: ctx, ts
|
||||
func (_m *IMetaTable) ListCollections(ctx context.Context, ts uint64) ([]*model.Collection, error) {
|
||||
ret := _m.Called(ctx, ts)
|
||||
|
||||
var r0 []*model.Collection
|
||||
if rf, ok := ret.Get(0).(func(context.Context, uint64) []*model.Collection); ok {
|
||||
r0 = rf(ctx, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*model.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
|
||||
r1 = rf(ctx, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListCredentialUsernames provides a mock function with given fields:
|
||||
func (_m *IMetaTable) ListCredentialUsernames() (*milvuspb.ListCredUsersResponse, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *milvuspb.ListCredUsersResponse
|
||||
if rf, ok := ret.Get(0).(func() *milvuspb.ListCredUsersResponse); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ListCredUsersResponse)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListPolicy provides a mock function with given fields: tenant
|
||||
func (_m *IMetaTable) ListPolicy(tenant string) ([]string, error) {
|
||||
ret := _m.Called(tenant)
|
||||
|
||||
var r0 []string
|
||||
if rf, ok := ret.Get(0).(func(string) []string); ok {
|
||||
r0 = rf(tenant)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(tenant)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListUserRole provides a mock function with given fields: tenant
|
||||
func (_m *IMetaTable) ListUserRole(tenant string) ([]string, error) {
|
||||
ret := _m.Called(tenant)
|
||||
|
||||
var r0 []string
|
||||
if rf, ok := ret.Get(0).(func(string) []string); ok {
|
||||
r0 = rf(tenant)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(tenant)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OperatePrivilege provides a mock function with given fields: tenant, entity, operateType
|
||||
func (_m *IMetaTable) OperatePrivilege(tenant string, entity *milvuspb.GrantEntity, operateType milvuspb.OperatePrivilegeType) error {
|
||||
ret := _m.Called(tenant, entity, operateType)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.GrantEntity, milvuspb.OperatePrivilegeType) error); ok {
|
||||
r0 = rf(tenant, entity, operateType)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OperateUserRole provides a mock function with given fields: tenant, userEntity, roleEntity, operateType
|
||||
func (_m *IMetaTable) OperateUserRole(tenant string, userEntity *milvuspb.UserEntity, roleEntity *milvuspb.RoleEntity, operateType milvuspb.OperateUserRoleType) error {
|
||||
ret := _m.Called(tenant, userEntity, roleEntity, operateType)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.UserEntity, *milvuspb.RoleEntity, milvuspb.OperateUserRoleType) error); ok {
|
||||
r0 = rf(tenant, userEntity, roleEntity, operateType)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RemoveCollection provides a mock function with given fields: ctx, collectionID, ts
|
||||
func (_m *IMetaTable) RemoveCollection(ctx context.Context, collectionID int64, ts uint64) error {
|
||||
ret := _m.Called(ctx, collectionID, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, uint64) error); ok {
|
||||
r0 = rf(ctx, collectionID, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RemovePartition provides a mock function with given fields: ctx, collectionID, partitionID, ts
|
||||
func (_m *IMetaTable) RemovePartition(ctx context.Context, collectionID int64, partitionID int64, ts uint64) error {
|
||||
ret := _m.Called(ctx, collectionID, partitionID, ts)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64, uint64) error); ok {
|
||||
r0 = rf(ctx, collectionID, partitionID, ts)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SelectGrant provides a mock function with given fields: tenant, entity
|
||||
func (_m *IMetaTable) SelectGrant(tenant string, entity *milvuspb.GrantEntity) ([]*milvuspb.GrantEntity, error) {
|
||||
ret := _m.Called(tenant, entity)
|
||||
|
||||
var r0 []*milvuspb.GrantEntity
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.GrantEntity) []*milvuspb.GrantEntity); ok {
|
||||
r0 = rf(tenant, entity)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*milvuspb.GrantEntity)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, *milvuspb.GrantEntity) error); ok {
|
||||
r1 = rf(tenant, entity)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SelectRole provides a mock function with given fields: tenant, entity, includeUserInfo
|
||||
func (_m *IMetaTable) SelectRole(tenant string, entity *milvuspb.RoleEntity, includeUserInfo bool) ([]*milvuspb.RoleResult, error) {
|
||||
ret := _m.Called(tenant, entity, includeUserInfo)
|
||||
|
||||
var r0 []*milvuspb.RoleResult
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.RoleEntity, bool) []*milvuspb.RoleResult); ok {
|
||||
r0 = rf(tenant, entity, includeUserInfo)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*milvuspb.RoleResult)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, *milvuspb.RoleEntity, bool) error); ok {
|
||||
r1 = rf(tenant, entity, includeUserInfo)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SelectUser provides a mock function with given fields: tenant, entity, includeRoleInfo
|
||||
func (_m *IMetaTable) SelectUser(tenant string, entity *milvuspb.UserEntity, includeRoleInfo bool) ([]*milvuspb.UserResult, error) {
|
||||
ret := _m.Called(tenant, entity, includeRoleInfo)
|
||||
|
||||
var r0 []*milvuspb.UserResult
|
||||
if rf, ok := ret.Get(0).(func(string, *milvuspb.UserEntity, bool) []*milvuspb.UserResult); ok {
|
||||
r0 = rf(tenant, entity, includeRoleInfo)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*milvuspb.UserResult)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, *milvuspb.UserEntity, bool) error); ok {
|
||||
r1 = rf(tenant, entity, includeRoleInfo)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewIMetaTable interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewIMetaTable creates a new instance of IMetaTable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewIMetaTable(t mockConstructorTestingTNewIMetaTable) *IMetaTable {
|
||||
mock := &IMetaTable{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -28,6 +28,8 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/api/schemapb"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/rootcoord"
|
||||
|
@ -99,7 +101,7 @@ type Core struct {
|
|||
meta IMetaTable
|
||||
scheduler IScheduler
|
||||
broker Broker
|
||||
ddlTsLockManager DdlTsLockManagerV2
|
||||
ddlTsLockManager DdlTsLockManager
|
||||
garbageCollector GarbageCollector
|
||||
stepExecutor StepExecutor
|
||||
|
||||
|
@ -446,7 +448,7 @@ func (c *Core) initInternal() error {
|
|||
c.proxyClientManager = newProxyClientManager(c.proxyCreator)
|
||||
|
||||
c.broker = newServerBroker(c)
|
||||
c.ddlTsLockManager = newDdlTsLockManagerV2(c.tsoAllocator)
|
||||
c.ddlTsLockManager = newDdlTsLockManager(c.tsoAllocator)
|
||||
c.garbageCollector = newBgGarbageCollector(c)
|
||||
c.stepExecutor = newBgStepExecutor(c.ctx)
|
||||
|
||||
|
@ -716,11 +718,11 @@ func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollecti
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("CreateCollection")
|
||||
|
||||
log.Info("received request to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("received request to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &createCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
@ -729,7 +731,7 @@ func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollecti
|
|||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to enqueue request to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
|
@ -738,7 +740,7 @@ func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollecti
|
|||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("name", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
@ -751,7 +753,7 @@ func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollecti
|
|||
metrics.RootCoordDDLReqLatency.WithLabelValues("CreateCollection").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.RootCoordNumOfCollections.Inc()
|
||||
|
||||
log.Info("done to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("done to create collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
return succStatus(), nil
|
||||
|
@ -766,11 +768,11 @@ func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRe
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("DropCollection")
|
||||
|
||||
log.Info("received request to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("received request to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &dropCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
@ -779,7 +781,7 @@ func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRe
|
|||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to enqueue request to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
|
@ -788,7 +790,7 @@ func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRe
|
|||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("name", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
@ -801,7 +803,7 @@ func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRe
|
|||
metrics.RootCoordDDLReqLatency.WithLabelValues("DropCollection").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.RootCoordNumOfCollections.Dec()
|
||||
|
||||
log.Info("done to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("done to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return succStatus(), nil
|
||||
|
@ -819,52 +821,56 @@ func (c *Core) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequ
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("HasCollection")
|
||||
|
||||
log.Info("received request to has collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
ts := getTravelTs(in)
|
||||
log := log.Ctx(ctx).With(zap.String("collection name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", ts))
|
||||
|
||||
t := &hasCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: in,
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
}
|
||||
log.Info("received request to has collection")
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to has collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.FailLabel).Inc()
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()),
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to enqueue request to has collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection name", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.FailLabel).Inc()
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()),
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
_, err := c.meta.GetCollectionByName(ctx, in.GetCollectionName(), ts)
|
||||
has := err == nil
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("HasCollection").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to has collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection name", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()),
|
||||
zap.Bool("exist", t.Rsp.GetValue()))
|
||||
return t.Rsp, nil
|
||||
log.Info("done to has collection", zap.Bool("exist", has))
|
||||
|
||||
return &milvuspb.BoolResponse{Status: succStatus(), Value: has}, nil
|
||||
}
|
||||
|
||||
func (c *Core) describeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*model.Collection, error) {
|
||||
ts := getTravelTs(in)
|
||||
if in.GetCollectionName() != "" {
|
||||
return c.meta.GetCollectionByName(ctx, in.GetCollectionName(), ts)
|
||||
}
|
||||
return c.meta.GetCollectionByID(ctx, in.GetCollectionID(), in.GetTimeStamp())
|
||||
}
|
||||
|
||||
func convertModelToDesc(collInfo *model.Collection, aliases []string) *milvuspb.DescribeCollectionResponse {
|
||||
resp := &milvuspb.DescribeCollectionResponse{Status: succStatus()}
|
||||
|
||||
resp.Schema = &schemapb.CollectionSchema{
|
||||
Name: collInfo.Name,
|
||||
Description: collInfo.Description,
|
||||
AutoID: collInfo.AutoID,
|
||||
Fields: model.MarshalFieldModels(collInfo.Fields),
|
||||
}
|
||||
resp.CollectionID = collInfo.CollectionID
|
||||
resp.VirtualChannelNames = collInfo.VirtualChannelNames
|
||||
resp.PhysicalChannelNames = collInfo.PhysicalChannelNames
|
||||
if collInfo.ShardsNum == 0 {
|
||||
collInfo.ShardsNum = int32(len(collInfo.VirtualChannelNames))
|
||||
}
|
||||
resp.ShardsNum = collInfo.ShardsNum
|
||||
resp.ConsistencyLevel = collInfo.ConsistencyLevel
|
||||
|
||||
resp.CreatedTimestamp = collInfo.CreateTime
|
||||
createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime)
|
||||
resp.CreatedUtcTimestamp = uint64(createdPhysicalTime)
|
||||
resp.Aliases = aliases
|
||||
resp.StartPositions = collInfo.StartPositions
|
||||
resp.CollectionName = resp.Schema.Name
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// DescribeCollection return collection info
|
||||
|
@ -878,53 +884,33 @@ func (c *Core) DescribeCollection(ctx context.Context, in *milvuspb.DescribeColl
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("DescribeCollection")
|
||||
|
||||
log.Info("received request to describe collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
ts := getTravelTs(in)
|
||||
log := log.Ctx(ctx).With(zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()), zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", ts))
|
||||
|
||||
t := &describeCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: in,
|
||||
Rsp: &milvuspb.DescribeCollectionResponse{},
|
||||
}
|
||||
// TODO(longjiquan): log may be very frequent here.
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to describe collection",
|
||||
zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
log.Info("received request to describe collection")
|
||||
|
||||
coll, err := c.describeCollection(ctx, in)
|
||||
if err != nil {
|
||||
log.Error("failed to describe collection", zap.Error(err))
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc()
|
||||
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeCollection failed: "+err.Error()),
|
||||
// TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now.
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to describe collection",
|
||||
zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc()
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeCollection failed: "+err.Error()),
|
||||
}, nil
|
||||
}
|
||||
aliases := c.meta.ListAliasesByID(coll.CollectionID)
|
||||
desc := convertModelToDesc(coll, aliases)
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("DescribeCollection").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to describe collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
return t.Rsp, nil
|
||||
log.Info("done to describe collection", zap.Int64("collection_id", desc.GetCollectionID()))
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// ShowCollections list all collection names
|
||||
|
@ -938,50 +924,34 @@ func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollections
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("ShowCollections")
|
||||
|
||||
log.Info("received request to show collections", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("dbname", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
resp := &milvuspb.ShowCollectionsResponse{Status: succStatus()}
|
||||
ts := getTravelTs(in)
|
||||
log := log.Ctx(ctx).With(zap.String("dbname", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", ts))
|
||||
|
||||
t := &showCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: in,
|
||||
Rsp: &milvuspb.ShowCollectionsResponse{},
|
||||
log.Info("received request to show collections")
|
||||
|
||||
colls, err := c.meta.ListCollections(ctx, ts)
|
||||
if err != nil {
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc()
|
||||
resp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error())
|
||||
log.Error("failed to show collections", zap.Error(err))
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to show collections", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("dbname", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc()
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to show collections", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("dbname", in.GetDbName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc()
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()),
|
||||
}, nil
|
||||
for _, meta := range colls {
|
||||
resp.CollectionNames = append(resp.CollectionNames, meta.Name)
|
||||
resp.CollectionIds = append(resp.CollectionIds, meta.CollectionID)
|
||||
resp.CreatedTimestamps = append(resp.CreatedTimestamps, meta.CreateTime)
|
||||
physical, _ := tsoutil.ParseHybridTs(meta.CreateTime)
|
||||
resp.CreatedUtcTimestamps = append(resp.CreatedUtcTimestamps, uint64(physical))
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("ShowCollections").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to show collections", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("dbname", in.GetDbName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()),
|
||||
zap.Int("num of collections", len(t.Rsp.GetCollectionNames()))) // maybe very large, print number instead.
|
||||
return t.Rsp, nil
|
||||
log.Info("done to show collections", zap.Int("num of collections", len(resp.GetCollectionNames()))) // maybe very large, print number instead.
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreatePartition create partition
|
||||
|
@ -993,12 +963,12 @@ func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartition
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("CreatePartition")
|
||||
|
||||
log.Info("received request to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("received request to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &createPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
@ -1007,7 +977,7 @@ func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartition
|
|||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to enqueue request to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
@ -1017,7 +987,7 @@ func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartition
|
|||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
@ -1029,7 +999,7 @@ func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartition
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("CreatePartition").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("done to create partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
return succStatus(), nil
|
||||
|
@ -1044,12 +1014,12 @@ func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequ
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("DropPartition")
|
||||
|
||||
log.Info("received request to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("received request to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &dropPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
@ -1058,7 +1028,7 @@ func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequ
|
|||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to enqueue request to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
@ -1067,7 +1037,7 @@ func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequ
|
|||
return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil
|
||||
}
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
@ -1079,7 +1049,7 @@ func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequ
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("DropPartition").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Info("done to drop partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
return succStatus(), nil
|
||||
|
@ -1097,54 +1067,35 @@ func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionReques
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("HasPartition")
|
||||
|
||||
log.Info("received request to has partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
// TODO(longjiquan): why HasPartitionRequest doesn't contain Timestamp but other requests do.
|
||||
ts := typeutil.MaxTimestamp
|
||||
resp := &milvuspb.BoolResponse{Status: succStatus(), Value: false}
|
||||
log := log.Ctx(ctx).With(zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", ts))
|
||||
|
||||
t := &hasPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: in,
|
||||
Rsp: &milvuspb.BoolResponse{},
|
||||
log.Info("received request to has partition")
|
||||
|
||||
coll, err := c.meta.GetCollectionByName(ctx, in.GetCollectionName(), ts)
|
||||
if err != nil {
|
||||
log.Error("failed to has partition", zap.Error(err))
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc()
|
||||
// TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now.
|
||||
resp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to has partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc()
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()),
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to has partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc()
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()),
|
||||
Value: false,
|
||||
}, nil
|
||||
for _, part := range coll.Partitions {
|
||||
if part.PartitionName == in.GetPartitionName() {
|
||||
resp.Value = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("HasPartition").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to has partition", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()),
|
||||
zap.Bool("exist", t.Rsp.GetValue()))
|
||||
return t.Rsp, nil
|
||||
log.Info("done to has partition", zap.Bool("exist", resp.GetValue()))
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ShowPartitions list all partition names
|
||||
|
@ -1158,49 +1109,44 @@ func (c *Core) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRe
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("ShowPartitions")
|
||||
|
||||
log.Info("received request to show partitions", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
// TODO(longjiquan): why ShowPartitionsRequest doesn't contain Timestamp but other requests do.
|
||||
ts := typeutil.MaxTimestamp
|
||||
resp := &milvuspb.ShowPartitionsResponse{Status: succStatus()}
|
||||
log := log.Ctx(ctx).With(zap.String("collection", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &showPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: in,
|
||||
Rsp: &milvuspb.ShowPartitionsResponse{},
|
||||
log.Info("received request to show partitions")
|
||||
|
||||
var coll *model.Collection
|
||||
var err error
|
||||
|
||||
if in.GetCollectionName() == "" {
|
||||
coll, err = c.meta.GetCollectionByID(ctx, in.GetCollectionID(), ts)
|
||||
} else {
|
||||
coll, err = c.meta.GetCollectionByName(ctx, in.GetCollectionName(), ts)
|
||||
}
|
||||
|
||||
if err := c.scheduler.AddTask(t); err != nil {
|
||||
log.Error("failed to enqueue request to show partitions", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to show partitions", zap.Error(err))
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.FailLabel).Inc()
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()),
|
||||
}, nil
|
||||
// TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now.
|
||||
resp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
log.Error("failed to show partitions", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.String("collection", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.FailLabel).Inc()
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()),
|
||||
}, nil
|
||||
for _, part := range coll.Partitions {
|
||||
resp.PartitionIDs = append(resp.PartitionIDs, part.PartitionID)
|
||||
resp.PartitionNames = append(resp.PartitionNames, part.PartitionName)
|
||||
resp.CreatedTimestamps = append(resp.CreatedTimestamps, part.PartitionCreatedTimestamp)
|
||||
physical, _ := tsoutil.ParseHybridTs(part.PartitionCreatedTimestamp)
|
||||
resp.CreatedUtcTimestamps = append(resp.CreatedUtcTimestamps, uint64(physical))
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues("ShowPartitions").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to show partitions", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("collection", in.GetCollectionName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()),
|
||||
zap.Strings("partitions", t.Rsp.GetPartitionNames()))
|
||||
return t.Rsp, nil
|
||||
log.Info("done to show partitions", zap.Strings("partitions", resp.GetPartitionNames()))
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ShowSegments list all segments
|
||||
|
@ -1220,7 +1166,7 @@ func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestam
|
|||
|
||||
ts, err := c.tsoAllocator.GenerateTSO(in.GetCount())
|
||||
if err != nil {
|
||||
log.Error("failed to allocate timestamp", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to allocate timestamp", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
|
@ -1248,7 +1194,7 @@ func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*ro
|
|||
}
|
||||
start, _, err := c.idAllocator.Alloc(in.Count)
|
||||
if err != nil {
|
||||
log.Error("failed to allocate id", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Ctx(ctx).Error("failed to allocate id", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.Error(err),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
|
@ -1378,7 +1324,7 @@ func (c *Core) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest)
|
|||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &createAliasTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
@ -1428,7 +1374,7 @@ func (c *Core) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest) (*c
|
|||
zap.String("alias", in.GetAlias()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &dropAliasTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
@ -1478,7 +1424,7 @@ func (c *Core) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest) (
|
|||
zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
t := &alterAliasTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: ctx,
|
||||
core: c,
|
||||
done: make(chan error, 1),
|
||||
|
|
|
@ -2,12 +2,20 @@ package rootcoord
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
|
||||
|
||||
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
|
||||
|
@ -114,34 +122,63 @@ func TestRootCoord_DescribeCollection(t *testing.T) {
|
|||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to add task", func(t *testing.T) {
|
||||
t.Run("failed to get collection by name", func(t *testing.T) {
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64")).
|
||||
Return(nil, errors.New("error mock GetCollectionByName"))
|
||||
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withInvalidScheduler())
|
||||
withMeta(meta))
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{})
|
||||
|
||||
resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{CollectionName: "test"})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to execute", func(t *testing.T) {
|
||||
t.Run("failed to get collection by id", func(t *testing.T) {
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByID",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("int64"),
|
||||
mock.AnythingOfType("uint64")).
|
||||
Return(nil, errors.New("error mock GetCollectionByID"))
|
||||
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withTaskFailScheduler())
|
||||
withMeta(meta))
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{})
|
||||
|
||||
resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{CollectionID: 100})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("normal case, everything is ok", func(t *testing.T) {
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64")).
|
||||
Return(&model.Collection{CollectionID: 100}, nil)
|
||||
meta.On("ListAliasesByID",
|
||||
mock.AnythingOfType("int64")).
|
||||
Return([]string{"alias1", "alias2"})
|
||||
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withValidScheduler())
|
||||
withMeta(meta))
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{})
|
||||
|
||||
resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{CollectionName: "test"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.Equal(t, UniqueID(100), resp.GetCollectionID())
|
||||
assert.ElementsMatch(t, []string{"alias1", "alias2"}, resp.GetAliases())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -154,34 +191,41 @@ func TestRootCoord_HasCollection(t *testing.T) {
|
|||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to add task", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withInvalidScheduler())
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to execute", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withTaskFailScheduler())
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("normal case, everything is ok", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withValidScheduler())
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64"),
|
||||
).Return(func(ctx context.Context, collectionName string, ts Timestamp) *model.Collection {
|
||||
if ts == typeutil.MaxTimestamp {
|
||||
return &model.Collection{}
|
||||
}
|
||||
return nil
|
||||
}, func(ctx context.Context, collectionName string, ts Timestamp) error {
|
||||
if ts == typeutil.MaxTimestamp {
|
||||
return nil
|
||||
}
|
||||
return errors.New("error mock GetCollectionByName")
|
||||
})
|
||||
|
||||
c := newTestCore(withHealthyCode(), withMeta(meta))
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.True(t, resp.GetValue())
|
||||
|
||||
resp, err = c.HasCollection(ctx, &milvuspb.HasCollectionRequest{TimeStamp: typeutil.MaxTimestamp})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.True(t, resp.GetValue())
|
||||
|
||||
resp, err = c.HasCollection(ctx, &milvuspb.HasCollectionRequest{TimeStamp: 100})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.False(t, resp.GetValue())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -194,34 +238,49 @@ func TestRootCoord_ShowCollections(t *testing.T) {
|
|||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to add task", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withInvalidScheduler())
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to execute", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withTaskFailScheduler())
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("normal case, everything is ok", func(t *testing.T) {
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("ListCollections",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("uint64")).
|
||||
Return(func(ctx context.Context, ts Timestamp) []*model.Collection {
|
||||
if ts == typeutil.MaxTimestamp {
|
||||
return []*model.Collection{
|
||||
{
|
||||
CollectionID: 100,
|
||||
Name: "test",
|
||||
State: pb.CollectionState_CollectionCreated,
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, func(ctx context.Context, ts Timestamp) error {
|
||||
if ts == typeutil.MaxTimestamp {
|
||||
return nil
|
||||
}
|
||||
return errors.New("error mock ListCollections")
|
||||
})
|
||||
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withValidScheduler())
|
||||
withMeta(meta))
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.ElementsMatch(t, []int64{100}, resp.GetCollectionIds())
|
||||
assert.ElementsMatch(t, []string{"test"}, resp.GetCollectionNames())
|
||||
|
||||
resp, err = c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{TimeStamp: typeutil.MaxTimestamp})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.ElementsMatch(t, []int64{100}, resp.GetCollectionIds())
|
||||
assert.ElementsMatch(t, []string{"test"}, resp.GetCollectionNames())
|
||||
|
||||
resp, err = c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{TimeStamp: 10000})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -314,34 +373,40 @@ func TestRootCoord_HasPartition(t *testing.T) {
|
|||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to add task", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withInvalidScheduler())
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to execute", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withTaskFailScheduler())
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("normal case, everything is ok", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withValidScheduler())
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64"),
|
||||
).Return(func(ctx context.Context, collectionName string, ts Timestamp) *model.Collection {
|
||||
if collectionName == "test1" {
|
||||
return &model.Collection{Partitions: []*model.Partition{{PartitionName: "test_partition"}}}
|
||||
}
|
||||
return nil
|
||||
}, func(ctx context.Context, collectionName string, ts Timestamp) error {
|
||||
if collectionName == "test1" {
|
||||
return nil
|
||||
}
|
||||
return errors.New("error mock GetCollectionByName")
|
||||
})
|
||||
|
||||
c := newTestCore(withHealthyCode(), withMeta(meta))
|
||||
ctx := context.Background()
|
||||
resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{})
|
||||
|
||||
resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{CollectionName: "error_case"})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
|
||||
resp, err = c.HasPartition(ctx, &milvuspb.HasPartitionRequest{CollectionName: "test1", PartitionName: "test_partition"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.True(t, resp.GetValue())
|
||||
|
||||
resp, err = c.HasPartition(ctx, &milvuspb.HasPartitionRequest{CollectionName: "test1", PartitionName: "non_exist"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.False(t, resp.GetValue())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -354,32 +419,61 @@ func TestRootCoord_ShowPartitions(t *testing.T) {
|
|||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to add task", func(t *testing.T) {
|
||||
t.Run("failed to get collection by name", func(t *testing.T) {
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64")).
|
||||
Return(nil, errors.New("error mock GetCollectionByName"))
|
||||
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withInvalidScheduler())
|
||||
withMeta(meta))
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{})
|
||||
|
||||
resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{CollectionName: "test"})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("failed to execute", func(t *testing.T) {
|
||||
t.Run("failed to get collection by id", func(t *testing.T) {
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByID",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("int64"),
|
||||
mock.AnythingOfType("uint64")).
|
||||
Return(nil, errors.New("error mock GetCollectionByID"))
|
||||
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withTaskFailScheduler())
|
||||
withMeta(meta))
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{})
|
||||
|
||||
resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{CollectionID: 100})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
t.Run("normal case, everything is ok", func(t *testing.T) {
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withValidScheduler())
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.On("GetCollectionByName",
|
||||
mock.Anything, // context.Context
|
||||
mock.AnythingOfType("string"),
|
||||
mock.AnythingOfType("uint64"),
|
||||
).Return(&model.Collection{Partitions: []*model.Partition{{
|
||||
PartitionName: "test_partition",
|
||||
PartitionID: 102,
|
||||
}}}, nil)
|
||||
|
||||
c := newTestCore(withHealthyCode(), withMeta(meta))
|
||||
ctx := context.Background()
|
||||
resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{})
|
||||
|
||||
resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{CollectionName: "test"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
assert.ElementsMatch(t, []string{"test_partition"}, resp.GetPartitionNames())
|
||||
assert.ElementsMatch(t, []int64{102}, resp.GetPartitionIDs())
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
type IScheduler interface {
|
||||
Start()
|
||||
Stop()
|
||||
AddTask(t taskV2) error
|
||||
AddTask(t task) error
|
||||
}
|
||||
|
||||
type scheduler struct {
|
||||
|
@ -23,7 +23,7 @@ type scheduler struct {
|
|||
idAllocator allocator.GIDAllocator
|
||||
tsoAllocator tso.Allocator
|
||||
|
||||
taskChan chan taskV2
|
||||
taskChan chan task
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func newScheduler(ctx context.Context, idAllocator allocator.GIDAllocator, tsoAl
|
|||
cancel: cancel,
|
||||
idAllocator: idAllocator,
|
||||
tsoAllocator: tsoAllocator,
|
||||
taskChan: make(chan taskV2, n),
|
||||
taskChan: make(chan task, n),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ func (s *scheduler) Stop() {
|
|||
s.wg.Wait()
|
||||
}
|
||||
|
||||
func (s *scheduler) execute(task taskV2) {
|
||||
func (s *scheduler) execute(task task) {
|
||||
if err := task.Prepare(task.GetCtx()); err != nil {
|
||||
task.NotifyDone(err)
|
||||
return
|
||||
|
@ -72,7 +72,7 @@ func (s *scheduler) taskLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *scheduler) setID(task taskV2) error {
|
||||
func (s *scheduler) setID(task task) error {
|
||||
id, err := s.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -81,7 +81,7 @@ func (s *scheduler) setID(task taskV2) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *scheduler) setTs(task taskV2) error {
|
||||
func (s *scheduler) setTs(task task) error {
|
||||
ts, err := s.tsoAllocator.GenerateTSO(1)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -90,11 +90,11 @@ func (s *scheduler) setTs(task taskV2) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *scheduler) enqueue(task taskV2) {
|
||||
func (s *scheduler) enqueue(task task) {
|
||||
s.taskChan <- task
|
||||
}
|
||||
|
||||
func (s *scheduler) AddTask(task taskV2) error {
|
||||
func (s *scheduler) AddTask(task task) error {
|
||||
// make sure that setting ts and enqueue is atomic.
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
|
|
@ -14,14 +14,14 @@ import (
|
|||
)
|
||||
|
||||
type mockFailTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
prepareErr error
|
||||
executeErr error
|
||||
}
|
||||
|
||||
func newMockFailTask() *mockFailTask {
|
||||
task := &mockFailTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: context.Background(),
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
|
@ -51,12 +51,12 @@ func (m mockFailTask) Execute(context.Context) error {
|
|||
}
|
||||
|
||||
type mockNormalTask struct {
|
||||
baseTaskV2
|
||||
baseTask
|
||||
}
|
||||
|
||||
func newMockNormalTask() *mockNormalTask {
|
||||
task := &mockNormalTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
baseTask: baseTask{
|
||||
ctx: context.Background(),
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
|
@ -141,7 +141,7 @@ func Test_scheduler_bg(t *testing.T) {
|
|||
s.Start()
|
||||
|
||||
n := 10
|
||||
tasks := make([]taskV2, 0, n)
|
||||
tasks := make([]task, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
which := rand.Int() % 3
|
||||
switch which {
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
)
|
||||
|
||||
// showCollectionTask show collection request task
|
||||
type showCollectionTask struct {
|
||||
baseTaskV2
|
||||
Req *milvuspb.ShowCollectionsRequest
|
||||
Rsp *milvuspb.ShowCollectionsResponse
|
||||
}
|
||||
|
||||
func (t *showCollectionTask) Prepare(ctx context.Context) error {
|
||||
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_ShowCollections); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute task execution
|
||||
func (t *showCollectionTask) Execute(ctx context.Context) error {
|
||||
t.Rsp.Status = succStatus()
|
||||
ts := t.Req.GetTimeStamp()
|
||||
if ts == 0 {
|
||||
ts = typeutil.MaxTimestamp
|
||||
}
|
||||
colls, err := t.core.meta.ListCollections(ctx, ts)
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error())
|
||||
return err
|
||||
}
|
||||
for _, meta := range colls {
|
||||
t.Rsp.CollectionNames = append(t.Rsp.CollectionNames, meta.Name)
|
||||
t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.CollectionID)
|
||||
t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, meta.CreateTime)
|
||||
physical, _ := tsoutil.ParseHybridTs(meta.CreateTime)
|
||||
t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_showCollectionTask_Prepare(t *testing.T) {
|
||||
t.Run("invalid msg type", func(t *testing.T) {
|
||||
task := &showCollectionTask{
|
||||
Req: &milvuspb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Undefined,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
task := &showCollectionTask{
|
||||
Req: &milvuspb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_showCollectionTask_Execute(t *testing.T) {
|
||||
t.Run("failed to list collections", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &showCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
},
|
||||
},
|
||||
Rsp: &milvuspb.ShowCollectionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
meta := newMockMetaTable()
|
||||
meta.ListCollectionsFunc = func(ctx context.Context, ts Timestamp) ([]*model.Collection, error) {
|
||||
return []*model.Collection{
|
||||
{
|
||||
Name: "test coll",
|
||||
},
|
||||
{
|
||||
Name: "test coll2",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &showCollectionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
},
|
||||
},
|
||||
Rsp: &milvuspb.ShowCollectionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, task.Rsp.GetStatus().GetErrorCode())
|
||||
assert.Equal(t, 2, len(task.Rsp.GetCollectionNames()))
|
||||
})
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
// showPartitionTask show partition request task
|
||||
type showPartitionTask struct {
|
||||
baseTaskV2
|
||||
Req *milvuspb.ShowPartitionsRequest
|
||||
Rsp *milvuspb.ShowPartitionsResponse
|
||||
}
|
||||
|
||||
func (t *showPartitionTask) Prepare(ctx context.Context) error {
|
||||
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_ShowPartitions); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute task execution
|
||||
func (t *showPartitionTask) Execute(ctx context.Context) error {
|
||||
var coll *model.Collection
|
||||
var err error
|
||||
t.Rsp.Status = succStatus()
|
||||
if t.Req.GetCollectionName() == "" {
|
||||
coll, err = t.core.meta.GetCollectionByID(ctx, t.Req.GetCollectionID(), typeutil.MaxTimestamp)
|
||||
} else {
|
||||
coll, err = t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
for _, part := range coll.Partitions {
|
||||
t.Rsp.PartitionIDs = append(t.Rsp.PartitionIDs, part.PartitionID)
|
||||
t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, part.PartitionName)
|
||||
t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, part.PartitionCreatedTimestamp)
|
||||
physical, _ := tsoutil.ParseHybridTs(part.PartitionCreatedTimestamp)
|
||||
t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
package rootcoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_showPartitionTask_Prepare(t *testing.T) {
|
||||
t.Run("invalid msg type", func(t *testing.T) {
|
||||
task := &showPartitionTask{
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Undefined,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
task := &showPartitionTask{
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_showPartitionTask_Execute(t *testing.T) {
|
||||
t.Run("failed to list collections by name", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &showPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
},
|
||||
CollectionName: "test coll",
|
||||
},
|
||||
Rsp: &milvuspb.ShowPartitionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
})
|
||||
|
||||
t.Run("failed to list collections by id", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
task := &showPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
},
|
||||
CollectionID: 1,
|
||||
},
|
||||
Rsp: &milvuspb.ShowPartitionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
meta := newMockMetaTable()
|
||||
meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID typeutil.UniqueID, ts Timestamp) (*model.Collection, error) {
|
||||
return &model.Collection{
|
||||
CollectionID: collectionID,
|
||||
Name: "test coll",
|
||||
Partitions: []*model.Partition{
|
||||
{
|
||||
PartitionID: 1,
|
||||
PartitionName: "test partition1",
|
||||
},
|
||||
{
|
||||
PartitionID: 2,
|
||||
PartitionName: "test partition2",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
core := newTestCore(withMeta(meta))
|
||||
task := &showPartitionTask{
|
||||
baseTaskV2: baseTaskV2{
|
||||
core: core,
|
||||
done: make(chan error, 1),
|
||||
},
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
},
|
||||
CollectionID: 1,
|
||||
},
|
||||
Rsp: &milvuspb.ShowPartitionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, task.Rsp.GetStatus().GetErrorCode())
|
||||
assert.Equal(t, 2, len(task.Rsp.GetPartitionNames()))
|
||||
})
|
||||
}
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
)
|
||||
|
||||
type taskV2 interface {
|
||||
type task interface {
|
||||
GetCtx() context.Context
|
||||
SetCtx(context.Context)
|
||||
SetTs(ts Timestamp)
|
||||
|
@ -17,7 +17,7 @@ type taskV2 interface {
|
|||
NotifyDone(err error)
|
||||
}
|
||||
|
||||
type baseTaskV2 struct {
|
||||
type baseTask struct {
|
||||
ctx context.Context
|
||||
core *Core
|
||||
done chan error
|
||||
|
@ -25,42 +25,42 @@ type baseTaskV2 struct {
|
|||
id UniqueID
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) SetCtx(ctx context.Context) {
|
||||
func (b *baseTask) SetCtx(ctx context.Context) {
|
||||
b.ctx = ctx
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) GetCtx() context.Context {
|
||||
func (b *baseTask) GetCtx() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) SetTs(ts Timestamp) {
|
||||
func (b *baseTask) SetTs(ts Timestamp) {
|
||||
b.ts = ts
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) GetTs() Timestamp {
|
||||
func (b *baseTask) GetTs() Timestamp {
|
||||
return b.ts
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) SetID(id UniqueID) {
|
||||
func (b *baseTask) SetID(id UniqueID) {
|
||||
b.id = id
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) GetID() UniqueID {
|
||||
func (b *baseTask) GetID() UniqueID {
|
||||
return b.id
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) Prepare(ctx context.Context) error {
|
||||
func (b *baseTask) Prepare(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) Execute(ctx context.Context) error {
|
||||
func (b *baseTask) Execute(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) WaitToFinish() error {
|
||||
func (b *baseTask) WaitToFinish() error {
|
||||
return <-b.done
|
||||
}
|
||||
|
||||
func (b *baseTaskV2) NotifyDone(err error) {
|
||||
func (b *baseTask) NotifyDone(err error) {
|
||||
b.done <- err
|
||||
}
|
|
@ -121,3 +121,15 @@ func succStatus() *commonpb.Status {
|
|||
Reason: "",
|
||||
}
|
||||
}
|
||||
|
||||
type TimeTravelRequest interface {
|
||||
GetBase() *commonpb.MsgBase
|
||||
GetTimeStamp() Timestamp
|
||||
}
|
||||
|
||||
func getTravelTs(req TimeTravelRequest) Timestamp {
|
||||
if req.GetTimeStamp() == 0 {
|
||||
return typeutil.MaxTimestamp
|
||||
}
|
||||
return req.GetTimeStamp()
|
||||
}
|
||||
|
|
|
@ -19,6 +19,10 @@ package rootcoord
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/api/milvuspb"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
|
||||
"github.com/milvus-io/milvus/api/commonpb"
|
||||
|
@ -108,3 +112,22 @@ func Test_DecodeMsgPositions(t *testing.T) {
|
|||
err = DecodeMsgPositions("null", &mpOut)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_getTravelTs(t *testing.T) {
|
||||
type args struct {
|
||||
req TimeTravelRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want Timestamp
|
||||
}{
|
||||
{args: args{req: &milvuspb.HasCollectionRequest{}}, want: typeutil.MaxTimestamp},
|
||||
{args: args{req: &milvuspb.DescribeCollectionRequest{TimeStamp: 100}}, want: 100},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, getTravelTs(tt.args.req), "getTravelTs(%v)", tt.args.req)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue