Unify metastore catalog interface (#17772)

Signed-off-by: yun.zhang <yun.zhang@zilliz.com>
pull/18365/head
jaime 2022-07-22 10:20:29 +08:00 committed by GitHub
parent f37e07b0f8
commit 9672eae62c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 3855 additions and 2066 deletions

View File

@ -627,7 +627,6 @@ _proxyMetaBlob_, _collectionInfoBlob_, _partitionInfoBlob_, _IndexInfoBlob_, _se
type metaTable struct {
txn kv.TxnKV // client of a reliable txnkv service, i.e. etcd client
snapshot kv.SnapShotKV // client of a reliable snapshotkv service, i.e. etcd client
proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection_id -> meta
collName2ID map[string]typeutil.UniqueID // collection name to collection id
collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id
@ -641,7 +640,6 @@ type metaTable struct {
func NewMetaTable(kv kv.SnapShotKV) (*metaTable, error)
func (mt *metaTable) AddProxy(po *pb.ProxyMeta) (typeutil.Timestamp, error)
func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, part *pb.PartitionInfo, idx []*pb.IndexInfo, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error)
func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error)
func (mt *metaTable) HasCollection(collID typeutil.UniqueID, ts typeutil.Timestamp) bool
@ -692,7 +690,6 @@ type timetickSync struct {
func newTimeTickSync(core *Core) *timetickSync
func (t *timetickSync) UpdateTimeTick(in *internalpb.ChannelTimeTickMsg) error
func (t *timetickSync) AddProxy(sess *sessionutil.Session)
func (t *timetickSync) DelProxy(sess *sessionutil.Session)
func (t *timetickSync) GetProxy(sess []*sessionutil.Session)
func (t *timetickSync) StartWatch()

1
go.mod
View File

@ -20,6 +20,7 @@ require (
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/gin-gonic/gin v1.7.7
github.com/go-basic/ipv4 v1.0.0
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/gofrs/flock v0.8.1
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
github.com/golang/mock v1.5.0

1
go.sum
View File

@ -503,6 +503,7 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=

5
internal/common/tuple.go Normal file
View File

@ -0,0 +1,5 @@
package common
type Int64Tuple struct {
Key, Value int64
}

View File

@ -26,6 +26,8 @@ import (
"testing"
"time"
"github.com/milvus-io/milvus/internal/metastore/model"
clientv3 "go.etcd.io/etcd/client/v3"
"github.com/golang/protobuf/proto"
@ -34,7 +36,6 @@ import (
rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
@ -192,7 +193,7 @@ func TestGrpcService(t *testing.T) {
var binlogLock sync.Mutex
binlogPathArray := make([]string, 0, 16)
core.CallBuildIndexService = func(ctx context.Context, segID typeutil.UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (typeutil.UniqueID, error) {
core.CallBuildIndexService = func(ctx context.Context, segID typeutil.UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (typeutil.UniqueID, error) {
binlogLock.Lock()
defer binlogLock.Unlock()
binlogPathArray = append(binlogPathArray, binlog...)
@ -483,7 +484,7 @@ func TestGrpcService(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, collName, rsp.Schema.Name)
assert.Equal(t, collMeta.ID, rsp.CollectionID)
assert.Equal(t, collMeta.CollectionID, rsp.CollectionID)
})
t.Run("show collection", func(t *testing.T) {
@ -520,8 +521,8 @@ func TestGrpcService(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
assert.Equal(t, 2, len(collMeta.PartitionIDs))
partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[1], 0)
assert.Equal(t, 2, len(collMeta.Partitions))
partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0)
assert.Nil(t, err)
assert.Equal(t, partName, partName2)
assert.Equal(t, 1, len(collectionMetaCache))
@ -557,7 +558,7 @@ func TestGrpcService(t *testing.T) {
},
DbName: "testDb",
CollectionName: collName,
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
}
rsp, err := cli.ShowPartitions(ctx, req)
assert.Nil(t, err)
@ -569,8 +570,8 @@ func TestGrpcService(t *testing.T) {
t.Run("show segment", func(t *testing.T) {
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
partID := coll.PartitionIDs[1]
_, err = core.MetaTable.GetPartitionNameByID(coll.ID, partID, 0)
partID := coll.Partitions[1].PartitionID
_, err = core.MetaTable.GetPartitionNameByID(coll.CollectionID, partID, 0)
assert.Nil(t, err)
segLock.Lock()
@ -584,7 +585,7 @@ func TestGrpcService(t *testing.T) {
Timestamp: 170,
SourceID: 170,
},
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
PartitionID: partID,
}
rsp, err := cli.ShowSegments(ctx, req)
@ -614,13 +615,13 @@ func TestGrpcService(t *testing.T) {
}
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
assert.Zero(t, len(collMeta.FieldIndexes))
assert.Zero(t, len(collMeta.FieldIDToIndexID))
rsp, err := cli.CreateIndex(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.ErrorCode)
collMeta, err = core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
assert.Equal(t, 1, len(collMeta.FieldIndexes))
assert.Equal(t, 1, len(collMeta.FieldIDToIndexID))
binlogLock.Lock()
defer binlogLock.Unlock()
@ -637,6 +638,10 @@ func TestGrpcService(t *testing.T) {
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
segLock.Lock()
segs = []typeutil.UniqueID{segID}
segLock.Unlock()
req := &milvuspb.DescribeSegmentRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeSegment,
@ -644,8 +649,8 @@ func TestGrpcService(t *testing.T) {
Timestamp: 190,
SourceID: 190,
},
CollectionID: coll.ID,
SegmentID: 1000,
CollectionID: coll.CollectionID,
SegmentID: segID,
}
rsp, err := cli.DescribeSegment(ctx, req)
assert.Nil(t, err)
@ -676,8 +681,8 @@ func TestGrpcService(t *testing.T) {
t.Run("flush segment", func(t *testing.T) {
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
partID := coll.PartitionIDs[1]
_, err = core.MetaTable.GetPartitionNameByID(coll.ID, partID, 0)
partID := coll.Partitions[1].PartitionID
_, err = core.MetaTable.GetPartitionNameByID(coll.CollectionID, partID, 0)
assert.Nil(t, err)
segLock.Lock()
@ -690,7 +695,7 @@ func TestGrpcService(t *testing.T) {
},
Segment: &datapb.SegmentInfo{
ID: segID,
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
PartitionID: partID,
},
}
@ -772,8 +777,8 @@ func TestGrpcService(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
assert.Equal(t, 1, len(collMeta.PartitionIDs))
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[0], 0)
assert.Equal(t, 1, len(collMeta.Partitions))
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0)
assert.Nil(t, err)
assert.Equal(t, rootcoord.Params.CommonCfg.DefaultPartitionName, partName)
assert.Equal(t, 2, len(collectionMetaCache))

View File

@ -0,0 +1,46 @@
package metastore
import (
"context"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
type Catalog interface {
CreateCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error
GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error)
GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error)
ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error)
CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool
DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error
CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error
DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error
CreateIndex(ctx context.Context, col *model.Collection, index *model.Index) error
// AlterIndex newIndex only contains updated parts
AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType AlterType) error
DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error
ListIndexes(ctx context.Context) ([]*model.Index, error)
CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error
DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error
AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error
ListAliases(ctx context.Context) ([]*model.Collection, error)
GetCredential(ctx context.Context, username string) (*model.Credential, error)
CreateCredential(ctx context.Context, credential *model.Credential) error
DropCredential(ctx context.Context, username string) error
ListCredentials(ctx context.Context) ([]string, error)
Close()
}
type AlterType int32
const (
ADD AlterType = iota
DELETE
MODIFY
)

View File

@ -0,0 +1,583 @@
package kv
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strconv"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/model"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
)
type Catalog struct {
Txn kv.TxnKV
Snapshot kv.SnapShotKV
}
func (kc *Catalog) CreateCollection(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.CollectionID)
collInfo := model.MarshalCollectionModel(coll)
v1, err := proto.Marshal(collInfo)
if err != nil {
log.Error("create collection marshal fail", zap.String("key", k1), zap.Error(err))
return err
}
// save ddOpStr into etcd
kvs := map[string]string{k1: string(v1)}
for k, v := range coll.Extra {
kvs[k] = v
}
err = kc.Snapshot.MultiSave(kvs, ts)
if err != nil {
log.Error("create collection persist meta fail", zap.String("key", k1), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.CollectionID)
collInfo := model.MarshalCollectionModel(coll)
v1, err := proto.Marshal(collInfo)
if err != nil {
log.Error("create partition marshal fail", zap.String("key", k1), zap.Error(err))
return err
}
kvs := map[string]string{k1: string(v1)}
err = kc.Snapshot.MultiSave(kvs, ts)
if err != nil {
log.Error("create partition persist meta fail", zap.String("key", k1), zap.Error(err))
return err
}
// save ddOpStr into etcd
err = kc.Txn.MultiSave(coll.Extra)
if err != nil {
// will not panic, missing create msg
log.Warn("create partition persist ddop meta fail", zap.Int64("collectionID", coll.CollectionID), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) CreateIndex(ctx context.Context, col *model.Collection, index *model.Index) error {
k1 := path.Join(CollectionMetaPrefix, strconv.FormatInt(col.CollectionID, 10))
v1, err := proto.Marshal(model.MarshalCollectionModel(col))
if err != nil {
log.Error("create index marshal fail", zap.String("key", k1), zap.Error(err))
return err
}
k2 := path.Join(IndexMetaPrefix, strconv.FormatInt(index.IndexID, 10))
v2, err := proto.Marshal(model.MarshalIndexModel(index))
if err != nil {
log.Error("create index marshal fail", zap.String("key", k2), zap.Error(err))
return err
}
meta := map[string]string{k1: string(v1), k2: string(v2)}
err = kc.Txn.MultiSave(meta)
if err != nil {
log.Error("create index persist meta fail", zap.String("key", k1), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) alterAddIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index) error {
kvs := make(map[string]string, len(newIndex.SegmentIndexes))
for segID, newSegIdx := range newIndex.SegmentIndexes {
oldSegIdx, ok := oldIndex.SegmentIndexes[segID]
if !ok || !reflect.DeepEqual(oldSegIdx, newSegIdx) {
segment := newSegIdx.Segment
k := fmt.Sprintf("%s/%d/%d/%d/%d", SegmentIndexMetaPrefix, newIndex.CollectionID, newIndex.IndexID, segment.PartitionID, segment.SegmentID)
segIdxInfo := &pb.SegmentIndexInfo{
CollectionID: newIndex.CollectionID,
PartitionID: segment.PartitionID,
SegmentID: segment.SegmentID,
BuildID: newSegIdx.BuildID,
EnableIndex: newSegIdx.EnableIndex,
CreateTime: newSegIdx.CreateTime,
FieldID: newIndex.FieldID,
IndexID: newIndex.IndexID,
}
v, err := proto.Marshal(segIdxInfo)
if err != nil {
log.Error("alter index marshal fail", zap.String("key", k), zap.Error(err))
return err
}
kvs[k] = string(v)
}
}
if oldIndex.CreateTime != newIndex.CreateTime || oldIndex.IsDeleted != newIndex.IsDeleted {
idxPb := model.MarshalIndexModel(newIndex)
k := fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, newIndex.CollectionID, newIndex.IndexID)
v, err := proto.Marshal(idxPb)
if err != nil {
log.Error("alter index marshal fail", zap.String("key", k), zap.Error(err))
return err
}
kvs[k] = string(v)
}
if len(kvs) == 0 {
return nil
}
err := kc.Txn.MultiSave(kvs)
if err != nil {
log.Error("alter add index persist meta fail", zap.Any("segmentIndex", newIndex.SegmentIndexes), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) alterDeleteIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index) error {
delKeys := make([]string, len(newIndex.SegmentIndexes))
for _, segIdx := range newIndex.SegmentIndexes {
delKeys = append(delKeys, fmt.Sprintf("%s/%d/%d/%d/%d",
SegmentIndexMetaPrefix, newIndex.CollectionID, newIndex.IndexID, segIdx.PartitionID, segIdx.SegmentID))
}
if len(delKeys) == 0 {
return nil
}
if err := kc.Txn.MultiRemove(delKeys); err != nil {
log.Error("alter delete index persist meta fail", zap.Any("keys", delKeys), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType) error {
switch alterType {
case metastore.ADD:
return kc.alterAddIndex(ctx, oldIndex, newIndex)
case metastore.DELETE:
return kc.alterDeleteIndex(ctx, oldIndex, newIndex)
default:
return errors.New("Unknown alter type:" + fmt.Sprintf("%d", alterType))
}
}
func (kc *Catalog) CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
k := fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collection.Aliases[0])
v, err := proto.Marshal(&pb.CollectionInfo{ID: collection.CollectionID, Schema: &schemapb.CollectionSchema{Name: collection.Aliases[0]}})
if err != nil {
log.Error("create alias marshal fail", zap.String("key", k), zap.Error(err))
return err
}
err = kc.Snapshot.Save(k, string(v), ts)
if err != nil {
log.Error("create alias persist meta fail", zap.String("key", k), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) CreateCredential(ctx context.Context, credential *model.Credential) error {
k := fmt.Sprintf("%s/%s", CredentialPrefix, credential.Username)
v, err := json.Marshal(&internalpb.CredentialInfo{EncryptedPassword: credential.EncryptedPassword})
if err != nil {
log.Error("create credential marshal fail", zap.String("key", k), zap.Error(err))
return err
}
err = kc.Txn.Save(k, string(v))
if err != nil {
log.Error("create credential persist meta fail", zap.String("key", k), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) {
collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionID)
collVal, err := kc.Snapshot.Load(collKey, ts)
if err != nil {
log.Error("get collection meta fail", zap.String("key", collKey), zap.Error(err))
return nil, err
}
collMeta := &pb.CollectionInfo{}
err = proto.Unmarshal([]byte(collVal), collMeta)
if err != nil {
log.Error("collection meta marshal fail", zap.String("key", collKey), zap.Error(err))
return nil, err
}
return model.UnmarshalCollectionModel(collMeta), nil
}
func (kc *Catalog) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool {
_, err := kc.GetCollectionByID(ctx, collectionID, ts)
return err == nil
}
func (kc *Catalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) {
k := fmt.Sprintf("%s/%s", CredentialPrefix, username)
v, err := kc.Txn.Load(k)
if err != nil {
log.Warn("get credential meta fail", zap.String("key", k), zap.Error(err))
return nil, err
}
credentialInfo := internalpb.CredentialInfo{}
err = json.Unmarshal([]byte(v), &credentialInfo)
if err != nil {
return nil, fmt.Errorf("unmarshal credential info err:%w", err)
}
return &model.Credential{Username: username, EncryptedPassword: credentialInfo.EncryptedPassword}, nil
}
func (kc *Catalog) AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
return kc.CreateAlias(ctx, collection, ts)
}
func (kc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error {
delMetakeysSnap := []string{
fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionInfo.CollectionID),
}
for _, alias := range collectionInfo.Aliases {
delMetakeysSnap = append(delMetakeysSnap,
fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias),
)
}
err := kc.Snapshot.MultiSaveAndRemoveWithPrefix(map[string]string{}, delMetakeysSnap, ts)
if err != nil {
log.Error("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err))
return err
}
// Txn operation
kvs := map[string]string{}
for k, v := range collectionInfo.Extra {
kvs[k] = v
}
delMetaKeysTxn := []string{
fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collectionInfo.CollectionID),
fmt.Sprintf("%s/%d", IndexMetaPrefix, collectionInfo.CollectionID),
}
err = kc.Txn.MultiSaveAndRemoveWithPrefix(kvs, delMetaKeysTxn)
if err != nil {
log.Warn("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error {
collMeta := model.MarshalCollectionModel(collectionInfo)
k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collectionInfo.CollectionID, 10))
v, err := proto.Marshal(collMeta)
if err != nil {
log.Error("drop partition marshal fail", zap.String("key", k), zap.Error(err))
return err
}
err = kc.Snapshot.Save(k, string(v), ts)
if err != nil {
log.Error("drop partition update collection meta fail",
zap.Int64("collectionID", collectionInfo.CollectionID),
zap.Int64("partitionID", partitionID),
zap.Error(err))
return err
}
var delMetaKeys []string
for _, idxInfo := range collMeta.FieldIndexes {
k := fmt.Sprintf("%s/%d/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, idxInfo.IndexID, partitionID)
delMetaKeys = append(delMetaKeys, k)
}
// Txn operation
metaTxn := map[string]string{}
for k, v := range collectionInfo.Extra {
metaTxn[k] = v
}
err = kc.Txn.MultiSaveAndRemoveWithPrefix(metaTxn, delMetaKeys)
if err != nil {
log.Warn("drop partition update meta fail",
zap.Int64("collectionID", collectionInfo.CollectionID),
zap.Int64("partitionID", partitionID),
zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error {
collMeta := model.MarshalCollectionModel(collectionInfo)
k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collectionInfo.CollectionID, 10))
v, err := proto.Marshal(collMeta)
if err != nil {
log.Error("drop index marshal fail", zap.String("key", k), zap.Error(err))
return err
}
saveMeta := map[string]string{k: string(v)}
delMeta := []string{
fmt.Sprintf("%s/%d/%d", SegmentIndexMetaPrefix, collectionInfo.CollectionID, dropIdxID),
fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, collectionInfo.CollectionID, dropIdxID),
}
err = kc.Txn.MultiSaveAndRemoveWithPrefix(saveMeta, delMeta)
if err != nil {
log.Error("drop partition update meta fail",
zap.Int64("collectionID", collectionInfo.CollectionID),
zap.Int64("indexID", dropIdxID),
zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) DropCredential(ctx context.Context, username string) error {
k := fmt.Sprintf("%s/%s", CredentialPrefix, username)
err := kc.Txn.Remove(k)
if err != nil {
log.Error("drop credential update meta fail", zap.String("key", k), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error {
delMetakeys := []string{
fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias),
}
meta := make(map[string]string)
err := kc.Snapshot.MultiSaveAndRemoveWithPrefix(meta, delMetakeys, ts)
if err != nil {
log.Error("drop alias update meta fail", zap.String("alias", alias), zap.Error(err))
return err
}
return nil
}
func (kc *Catalog) GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) {
_, vals, err := kc.Snapshot.LoadWithPrefix(CollectionMetaPrefix, ts)
if err != nil {
log.Warn("get collection meta fail", zap.String("collectionName", collectionName), zap.Error(err))
return nil, err
}
for _, val := range vals {
colMeta := pb.CollectionInfo{}
err = proto.Unmarshal([]byte(val), &colMeta)
if err != nil {
log.Warn("get collection meta unmarshal fail", zap.String("collectionName", collectionName), zap.Error(err))
continue
}
if colMeta.Schema.Name == collectionName {
return model.UnmarshalCollectionModel(&colMeta), nil
}
}
return nil, fmt.Errorf("can't find collection: %s, at timestamp = %d", collectionName, ts)
}
func (kc *Catalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) {
_, vals, err := kc.Snapshot.LoadWithPrefix(CollectionMetaPrefix, ts)
if err != nil {
log.Error("get collections meta fail",
zap.String("prefix", CollectionMetaPrefix),
zap.Uint64("timestamp", ts),
zap.Error(err))
return nil, nil
}
colls := make(map[string]*model.Collection)
for _, val := range vals {
collMeta := pb.CollectionInfo{}
err := proto.Unmarshal([]byte(val), &collMeta)
if err != nil {
log.Warn("unmarshal collection info failed", zap.Error(err))
continue
}
colls[collMeta.Schema.Name] = model.UnmarshalCollectionModel(&collMeta)
}
return colls, nil
}
func (kc *Catalog) ListAliases(ctx context.Context) ([]*model.Collection, error) {
_, values, err := kc.Snapshot.LoadWithPrefix(CollectionAliasMetaPrefix, 0)
if err != nil {
log.Error("get aliases meta fail", zap.String("prefix", CollectionAliasMetaPrefix), zap.Error(err))
return nil, err
}
var colls []*model.Collection
for _, value := range values {
aliasInfo := pb.CollectionInfo{}
err = proto.Unmarshal([]byte(value), &aliasInfo)
if err != nil {
log.Warn("unmarshal aliases failed", zap.Error(err))
continue
}
colls = append(colls, model.UnmarshalCollectionModel(&aliasInfo))
}
return colls, nil
}
func (kc *Catalog) listSegmentIndexes(ctx context.Context) (map[int64]*model.Index, error) {
_, values, err := kc.Txn.LoadWithPrefix(SegmentIndexMetaPrefix)
if err != nil {
log.Error("list segment index meta fail", zap.String("prefix", SegmentIndexMetaPrefix), zap.Error(err))
return nil, err
}
indexes := make(map[int64]*model.Index, len(values))
for _, value := range values {
if bytes.Equal([]byte(value), SuffixSnapshotTombstone) {
// backward compatibility, IndexMeta used to be in SnapshotKV
continue
}
segmentIndexInfo := pb.SegmentIndexInfo{}
err = proto.Unmarshal([]byte(value), &segmentIndexInfo)
if err != nil {
log.Warn("unmarshal segment index info failed", zap.Error(err))
continue
}
newIndex := model.UnmarshalSegmentIndexModel(&segmentIndexInfo)
oldIndex, ok := indexes[segmentIndexInfo.IndexID]
if ok {
for segID, segmentIdxInfo := range newIndex.SegmentIndexes {
oldIndex.SegmentIndexes[segID] = segmentIdxInfo
}
} else {
indexes[segmentIndexInfo.IndexID] = newIndex
}
}
return indexes, nil
}
func (kc *Catalog) listIndexMeta(ctx context.Context) (map[int64]*model.Index, error) {
_, values, err := kc.Txn.LoadWithPrefix(IndexMetaPrefix)
if err != nil {
log.Error("list index meta fail", zap.String("prefix", IndexMetaPrefix), zap.Error(err))
return nil, err
}
indexes := make(map[int64]*model.Index, len(values))
for _, value := range values {
if bytes.Equal([]byte(value), SuffixSnapshotTombstone) {
// backward compatibility, IndexMeta used to be in SnapshotKV
continue
}
meta := pb.IndexInfo{}
err = proto.Unmarshal([]byte(value), &meta)
if err != nil {
log.Warn("unmarshal index info failed", zap.Error(err))
continue
}
index := model.UnmarshalIndexModel(&meta)
if _, ok := indexes[meta.IndexID]; ok {
log.Warn("duplicated index id exists in index meta", zap.Int64("index id", meta.IndexID))
}
indexes[meta.IndexID] = index
}
return indexes, nil
}
func (kc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
indexMeta, err := kc.listIndexMeta(ctx)
if err != nil {
return nil, err
}
segmentIndexMeta, err := kc.listSegmentIndexes(ctx)
if err != nil {
return nil, err
}
var indexes []*model.Index
//merge index and segment index
for indexID, index := range indexMeta {
segmentIndex, ok := segmentIndexMeta[indexID]
if ok {
index = model.MergeIndexModel(index, segmentIndex)
delete(segmentIndexMeta, indexID)
}
indexes = append(indexes, index)
}
// add remain segmentIndexMeta
for _, index := range segmentIndexMeta {
indexes = append(indexes, index)
}
return indexes, nil
}
func (kc *Catalog) ListCredentials(ctx context.Context) ([]string, error) {
keys, _, err := kc.Txn.LoadWithPrefix(CredentialPrefix)
if err != nil {
log.Error("list all credential usernames fail", zap.String("prefix", CredentialPrefix), zap.Error(err))
return nil, err
}
var usernames []string
for _, path := range keys {
username := typeutil.After(path, UserSubPrefix+"/")
if len(username) == 0 {
log.Warn("no username extract from path:", zap.String("path", path))
continue
}
usernames = append(usernames, username)
}
return usernames, nil
}
func (kc *Catalog) Close() {
// do nothing
}

View File

@ -0,0 +1,166 @@
package kv
import (
"context"
"errors"
"testing"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/stretchr/testify/mock"
)
type MockedTxnKV struct {
kv.TxnKV
loadWithPrefixFn func(key string) ([]string, []string, error)
}
func (mc *MockedTxnKV) LoadWithPrefix(key string) ([]string, []string, error) {
return mc.loadWithPrefixFn(key)
}
type MockedSnapShotKV struct {
mock.Mock
kv.SnapShotKV
}
var (
indexName = "idx"
IndexID = 1
index = model.Index{
CollectionID: 1,
IndexName: indexName,
IndexID: 1,
FieldID: 1,
IndexParams: []*commonpb.KeyValuePair{{Key: "index_type", Value: "STL_SORT"}},
IsDeleted: true,
SegmentIndexes: map[int64]model.SegmentIndex{
1: {
Segment: model.Segment{
SegmentID: 1,
PartitionID: 1,
},
BuildID: 1000,
EnableIndex: false,
CreateTime: 0,
},
},
}
segIdx1 = model.SegmentIndex{
Segment: model.Segment{
SegmentID: 1,
PartitionID: 1,
},
BuildID: 1000,
EnableIndex: true,
CreateTime: 10,
}
segIdx2 = model.SegmentIndex{
Segment: model.Segment{
SegmentID: 2,
PartitionID: 1,
},
BuildID: 1000,
EnableIndex: false,
CreateTime: 10,
}
)
func getStrIndexPb(t *testing.T) string {
idxPB := model.MarshalIndexModel(&index)
msg, err := proto.Marshal(idxPB)
assert.Nil(t, err)
return string(msg)
}
func getStrSegIdxPb(idx model.Index, newSegIdx model.SegmentIndex) (string, error) {
segIdxInfo := &pb.SegmentIndexInfo{
CollectionID: idx.CollectionID,
PartitionID: newSegIdx.PartitionID,
SegmentID: newSegIdx.SegmentID,
BuildID: newSegIdx.BuildID,
EnableIndex: newSegIdx.EnableIndex,
CreateTime: newSegIdx.CreateTime,
FieldID: idx.FieldID,
IndexID: idx.IndexID,
}
msg, err := proto.Marshal(segIdxInfo)
if err != nil {
return "", err
}
return string(msg), nil
}
func getStrSegIdxPbs(t *testing.T) []string {
msg1, err := getStrSegIdxPb(index, segIdx1)
assert.Nil(t, err)
msg2, err := getStrSegIdxPb(index, segIdx2)
assert.Nil(t, err)
return []string{msg1, msg2}
}
func Test_ListIndexes(t *testing.T) {
ctx := context.TODO()
t.Run("test return err for remote services", func(t *testing.T) {
tnx := &MockedTxnKV{}
skv := &MockedSnapShotKV{}
expectedErr := errors.New("error")
tnx.loadWithPrefixFn = func(key string) ([]string, []string, error) {
return []string{}, []string{}, expectedErr
}
catalog := &Catalog{tnx, skv}
ret, err := catalog.ListIndexes(ctx)
assert.Nil(t, ret)
assert.ErrorIs(t, err, expectedErr)
tnx = &MockedTxnKV{}
skv = &MockedSnapShotKV{}
tnx.loadWithPrefixFn = func(key string) ([]string, []string, error) {
if key == SegmentIndexMetaPrefix {
return []string{}, []string{}, expectedErr
}
return []string{}, []string{}, nil
}
catalog = &Catalog{tnx, skv}
ret, err = catalog.ListIndexes(ctx)
assert.Nil(t, ret)
assert.ErrorIs(t, err, expectedErr)
})
t.Run("test list index successful", func(t *testing.T) {
tnx := &MockedTxnKV{}
skv := &MockedSnapShotKV{}
tnx.loadWithPrefixFn = func(key string) ([]string, []string, error) {
if key == SegmentIndexMetaPrefix {
return []string{}, getStrSegIdxPbs(t), nil
}
if key == IndexMetaPrefix {
return []string{}, []string{getStrIndexPb(t)}, nil
}
return []string{}, []string{}, nil
}
catalog := &Catalog{tnx, skv}
idxs, err := catalog.ListIndexes(ctx)
assert.Nil(t, err)
assert.Equal(t, 1, len(idxs))
assert.Equal(t, int64(1), idxs[0].IndexID)
assert.Equal(t, 2, len(idxs[0].SegmentIndexes))
assert.Equal(t, 2, len(idxs[0].SegmentIndexes))
assert.Equal(t, true, idxs[0].SegmentIndexes[1].EnableIndex)
assert.Equal(t, false, idxs[0].SegmentIndexes[2].EnableIndex)
})
}

View File

@ -14,7 +14,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package rootcoord
package kv
import (
"context"
@ -40,7 +40,7 @@ type rtPair struct {
ts typeutil.Timestamp
}
type metaSnapshot struct {
type MetaSnapshot struct {
cli *clientv3.Client
root string
tsKey string
@ -52,11 +52,11 @@ type metaSnapshot struct {
numTs int
}
func newMetaSnapshot(cli *clientv3.Client, root, tsKey string, bufSize int) (*metaSnapshot, error) {
func NewMetaSnapshot(cli *clientv3.Client, root, tsKey string, bufSize int) (*MetaSnapshot, error) {
if bufSize <= 0 {
bufSize = 1024
}
ms := &metaSnapshot{
ms := &MetaSnapshot{
cli: cli,
root: root,
tsKey: tsKey,
@ -72,7 +72,7 @@ func newMetaSnapshot(cli *clientv3.Client, root, tsKey string, bufSize int) (*me
return ms, nil
}
func (ms *metaSnapshot) loadTs() error {
func (ms *MetaSnapshot) loadTs() error {
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
defer cancel()
@ -115,12 +115,12 @@ func (ms *metaSnapshot) loadTs() error {
return nil
}
if curVer == version {
log.Debug("snapshot found save version with different revision", zap.Int64("revision", revision), zap.Int64("version", version))
log.Debug("Snapshot found save version with different revision", zap.Int64("revision", revision), zap.Int64("version", version))
}
strTs := string(resp.Kvs[0].Value)
if strTs == "0" {
//#issue 7150, index building inserted "0", skipping
//this is a special fix for backward compatibility, the previous version will put 0 ts into the snapshot building index
//this is a special fix for backward compatibility, the previous version will put 0 ts into the Snapshot building index
continue
}
curTs, err := strconv.ParseUint(strTs, 10, 64)
@ -139,16 +139,16 @@ func (ms *metaSnapshot) loadTs() error {
return nil
}
func (ms *metaSnapshot) maxTs() typeutil.Timestamp {
func (ms *MetaSnapshot) maxTs() typeutil.Timestamp {
return ms.ts2Rev[ms.maxPos].ts
}
func (ms *metaSnapshot) minTs() typeutil.Timestamp {
func (ms *MetaSnapshot) minTs() typeutil.Timestamp {
return ms.ts2Rev[ms.minPos].ts
}
func (ms *metaSnapshot) initTs(rev int64, ts typeutil.Timestamp) {
log.Debug("init meta snapshot ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
func (ms *MetaSnapshot) initTs(rev int64, ts typeutil.Timestamp) {
log.Debug("init meta Snapshot ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
if ms.numTs == 0 {
ms.maxPos = len(ms.ts2Rev) - 1
ms.minPos = len(ms.ts2Rev) - 1
@ -163,7 +163,7 @@ func (ms *metaSnapshot) initTs(rev int64, ts typeutil.Timestamp) {
}
}
func (ms *metaSnapshot) putTs(rev int64, ts typeutil.Timestamp) {
func (ms *MetaSnapshot) putTs(rev int64, ts typeutil.Timestamp) {
log.Debug("put meta snapshto ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
ms.maxPos++
if ms.maxPos == len(ms.ts2Rev) {
@ -182,7 +182,7 @@ func (ms *metaSnapshot) putTs(rev int64, ts typeutil.Timestamp) {
}
}
func (ms *metaSnapshot) searchOnCache(ts typeutil.Timestamp, start, length int) int64 {
func (ms *MetaSnapshot) searchOnCache(ts typeutil.Timestamp, start, length int) int64 {
if length == 1 {
return ms.ts2Rev[start].rev
}
@ -208,7 +208,7 @@ func (ms *metaSnapshot) searchOnCache(ts typeutil.Timestamp, start, length int)
}
}
func (ms *metaSnapshot) getRevOnCache(ts typeutil.Timestamp) int64 {
func (ms *MetaSnapshot) getRevOnCache(ts typeutil.Timestamp) int64 {
if ms.numTs == 0 {
return 0
}
@ -236,7 +236,7 @@ func (ms *metaSnapshot) getRevOnCache(ts typeutil.Timestamp) int64 {
return 0
}
func (ms *metaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 {
func (ms *MetaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 {
if rev < 2 {
return 0
}
@ -265,7 +265,7 @@ func (ms *metaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 {
return 0
}
func (ms *metaSnapshot) getRev(ts typeutil.Timestamp) (int64, error) {
func (ms *MetaSnapshot) getRev(ts typeutil.Timestamp) (int64, error) {
rev := ms.getRevOnCache(ts)
if rev > 0 {
return rev, nil
@ -278,7 +278,7 @@ func (ms *metaSnapshot) getRev(ts typeutil.Timestamp) (int64, error) {
return 0, fmt.Errorf("can't find revision on ts=%d", ts)
}
func (ms *metaSnapshot) Save(key, value string, ts typeutil.Timestamp) error {
func (ms *MetaSnapshot) Save(key, value string, ts typeutil.Timestamp) error {
ms.lock.Lock()
defer ms.lock.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
@ -297,7 +297,7 @@ func (ms *metaSnapshot) Save(key, value string, ts typeutil.Timestamp) error {
return nil
}
func (ms *metaSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) {
func (ms *MetaSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) {
ms.lock.RLock()
defer ms.lock.RUnlock()
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
@ -327,7 +327,7 @@ func (ms *metaSnapshot) Load(key string, ts typeutil.Timestamp) (string, error)
return string(resp.Kvs[0].Value), nil
}
func (ms *metaSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error {
func (ms *MetaSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error {
ms.lock.Lock()
defer ms.lock.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
@ -348,7 +348,7 @@ func (ms *metaSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp)
return nil
}
func (ms *metaSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) {
func (ms *MetaSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) {
ms.lock.RLock()
defer ms.lock.RUnlock()
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
@ -385,7 +385,7 @@ func (ms *metaSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]str
return keys, values, nil
}
func (ms *metaSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error {
func (ms *MetaSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error {
ms.lock.Lock()
defer ms.lock.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)

View File

@ -14,21 +14,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package rootcoord
package kv
import (
"context"
"fmt"
"math/rand"
"os"
"path"
"testing"
"time"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
)
var Params paramtable.ComponentParam
func TestMain(m *testing.M) {
Params.Init()
code := m.Run()
os.Exit(code)
}
func TestMetaSnapshot(t *testing.T) {
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
@ -46,7 +57,7 @@ func TestMetaSnapshot(t *testing.T) {
return vtso
}
ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 4)
ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 4)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -60,13 +71,13 @@ func TestMetaSnapshot(t *testing.T) {
assert.Nil(t, err)
}
ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 4)
ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 4)
assert.Nil(t, err)
assert.NotNil(t, ms)
}
func TestSearchOnCache(t *testing.T) {
ms := &metaSnapshot{}
ms := &MetaSnapshot{}
for i := 0; i < 8; i++ {
ms.ts2Rev = append(ms.ts2Rev,
rtPair{
@ -87,7 +98,7 @@ func TestSearchOnCache(t *testing.T) {
}
func TestGetRevOnCache(t *testing.T) {
ms := &metaSnapshot{}
ms := &MetaSnapshot{}
ms.ts2Rev = make([]rtPair, 7)
ms.initTs(7, 16)
ms.initTs(6, 14)
@ -181,7 +192,7 @@ func TestGetRevOnEtcd(t *testing.T) {
assert.Nil(t, err)
defer etcdCli.Close()
ms := metaSnapshot{
ms := MetaSnapshot{
cli: etcdCli,
root: rootPath,
tsKey: tsKey,
@ -230,7 +241,7 @@ func TestLoad(t *testing.T) {
return vtso
}
ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 7)
ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 7)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -250,7 +261,7 @@ func TestLoad(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, "value-19", val)
ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 11)
ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 11)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -278,7 +289,7 @@ func TestMultiSave(t *testing.T) {
return vtso
}
ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 7)
ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 7)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -309,7 +320,7 @@ func TestMultiSave(t *testing.T) {
assert.Equal(t, vals[0], "v1-19")
assert.Equal(t, vals[1], "v2-19")
ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 11)
ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 11)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -343,7 +354,7 @@ func TestMultiSaveAndRemoveWithPrefix(t *testing.T) {
}
defer etcdCli.Close()
ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 7)
ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 7)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -381,7 +392,7 @@ func TestMultiSaveAndRemoveWithPrefix(t *testing.T) {
assert.Equal(t, 39-i, len(vals))
}
ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 11)
ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 11)
assert.Nil(t, err)
assert.NotNil(t, ms)
@ -415,7 +426,7 @@ func TestTsBackward(t *testing.T) {
assert.Nil(t, err)
defer etcdCli.Close()
kv, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
kv, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
assert.Nil(t, err)
err = kv.loadTs()
@ -425,7 +436,7 @@ func TestTsBackward(t *testing.T) {
kv.Save("a", "c", 99) // backward
kv.Save("a", "d", 200)
kv, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
kv, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
assert.Error(t, err)
}
@ -442,7 +453,7 @@ func TestFix7150(t *testing.T) {
assert.Nil(t, err)
defer etcdCli.Close()
kv, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
kv, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
assert.Nil(t, err)
err = kv.loadTs()
@ -452,7 +463,7 @@ func TestFix7150(t *testing.T) {
kv.Save("a", "c", 0) // bug introduced
kv.Save("a", "d", 200)
kv, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
kv, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024)
assert.Nil(t, err)
err = kv.loadTs()
assert.Nil(t, err)

View File

@ -0,0 +1,24 @@
package kv
const (
// ComponentPrefix prefix for rootcoord component
ComponentPrefix = "root-coord"
// CollectionMetaPrefix prefix for collection meta
CollectionMetaPrefix = ComponentPrefix + "/collection"
// SegmentIndexMetaPrefix prefix for segment index meta
SegmentIndexMetaPrefix = ComponentPrefix + "/segment-index"
// IndexMetaPrefix prefix for index meta
IndexMetaPrefix = ComponentPrefix + "/index"
// CollectionAliasMetaPrefix prefix for collection alias meta
CollectionAliasMetaPrefix = ComponentPrefix + "/collection-alias"
// UserSubPrefix subpath for credential user
UserSubPrefix = "/credential/users"
// CredentialPrefix prefix for credential user
CredentialPrefix = ComponentPrefix + UserSubPrefix
)

View File

@ -14,7 +14,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package rootcoord
package kv
import (
"bytes"
@ -35,15 +35,15 @@ import (
)
var (
// suffixSnapshotTombstone special value for tombstone mark
suffixSnapshotTombstone = []byte{0xE2, 0x9B, 0xBC}
// SuffixSnapshotTombstone special value for tombstone mark
SuffixSnapshotTombstone = []byte{0xE2, 0x9B, 0xBC}
)
// suffixSnapshot implements SnapshotKV
// this is a simple replacement for metaSnapshot, which is not available due to etcd compaction
// suffixSnapshot record timestamp as prefix of a key under the snapshot prefix path
type suffixSnapshot struct {
// internal kv which suffixSnapshot based on
// SuffixSnapshot implements SnapshotKV
// this is a simple replacement for MetaSnapshot, which is not available due to etcd compaction
// SuffixSnapshot record timestamp as prefix of a key under the Snapshot prefix path
type SuffixSnapshot struct {
// internal kv which SuffixSnapshot based on
kv.TxnKV
// rw mutex provided range lock
sync.RWMutex
@ -76,10 +76,10 @@ type tsv struct {
}
// type conversion make sure implementation
var _ kv.SnapShotKV = (*suffixSnapshot)(nil)
var _ kv.SnapShotKV = (*SuffixSnapshot)(nil)
// newSuffixSnapshot creates a newSuffixSnapshot with provided kv
func newSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*suffixSnapshot, error) {
// NewSuffixSnapshot creates a NewSuffixSnapshot with provided kv
func NewSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*SuffixSnapshot, error) {
if txnKV == nil {
return nil, retry.Unrecoverable(errors.New("txnKV is nil"))
}
@ -92,7 +92,7 @@ func newSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*suffixSnaps
tk = path.Join(root, "k")
rootLen := len(tk) - 1
return &suffixSnapshot{
return &SuffixSnapshot{
TxnKV: txnKV,
lastestTS: make(map[string]typeutil.Timestamp),
separator: sep,
@ -105,31 +105,31 @@ func newSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*suffixSnaps
}
// isTombstone helper function to check whether is tombstone mark
func (ss *suffixSnapshot) isTombstone(value string) bool {
return bytes.Equal([]byte(value), suffixSnapshotTombstone)
func (ss *SuffixSnapshot) isTombstone(value string) bool {
return bytes.Equal([]byte(value), SuffixSnapshotTombstone)
}
// hideRootPrefix helper function to hide root prefix from key
func (ss *suffixSnapshot) hideRootPrefix(value string) string {
func (ss *SuffixSnapshot) hideRootPrefix(value string) string {
return value[ss.rootLen:]
}
// composeSnapshotPrefix build a prefix for load snapshots
// formated like [snapshotPrefix]/key[sep]
func (ss *suffixSnapshot) composeSnapshotPrefix(key string) string {
func (ss *SuffixSnapshot) composeSnapshotPrefix(key string) string {
return path.Join(ss.snapshotPrefix, key+ss.separator)
}
// composeTSKey unified tsKey composing method
// uses key, ts and separator to form a key
func (ss *suffixSnapshot) composeTSKey(key string, ts typeutil.Timestamp) string {
func (ss *SuffixSnapshot) composeTSKey(key string, ts typeutil.Timestamp) string {
// [key][sep][ts]
return path.Join(ss.snapshotPrefix, fmt.Sprintf("%s%s%d", key, ss.separator, ts))
}
// isTSKey checks whether a key is in ts-key format
// if true, also returns parsed ts value
func (ss *suffixSnapshot) isTSKey(key string) (typeutil.Timestamp, bool) {
func (ss *SuffixSnapshot) isTSKey(key string) (typeutil.Timestamp, bool) {
// not in snapshot path
if !strings.HasPrefix(key, ss.snapshotPrefix) {
return 0, false
@ -146,7 +146,7 @@ func (ss *suffixSnapshot) isTSKey(key string) (typeutil.Timestamp, bool) {
// isTSOfKey check whether a key is in ts-key format of provided group key
// if true, laso returns parsed ts value
func (ss *suffixSnapshot) isTSOfKey(key string, groupKey string) (typeutil.Timestamp, bool) {
func (ss *SuffixSnapshot) isTSOfKey(key string, groupKey string) (typeutil.Timestamp, bool) {
// not in snapshot path
if !strings.HasPrefix(key, ss.snapshotPrefix) {
return 0, false
@ -167,7 +167,7 @@ func (ss *suffixSnapshot) isTSOfKey(key string, groupKey string) (typeutil.Times
// checkKeyTS checks provided key's latest ts is before provided ts
// lock is needed
func (ss *suffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, error) {
func (ss *SuffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, error) {
latest, has := ss.lastestTS[key]
if !has {
err := ss.loadLatestTS(key)
@ -180,11 +180,11 @@ func (ss *suffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, e
}
// loadLatestTS load the loatest ts for specified key
func (ss *suffixSnapshot) loadLatestTS(key string) error {
func (ss *SuffixSnapshot) loadLatestTS(key string) error {
prefix := ss.composeSnapshotPrefix(key)
keys, _, err := ss.TxnKV.LoadWithPrefix(prefix)
if err != nil {
log.Warn("suffixSnapshot txnkv LoadWithPrefix failed", zap.String("key", key),
log.Warn("SuffixSnapshot txnkv LoadWithPrefix failed", zap.String("key", key),
zap.Error(err))
return err
}
@ -241,10 +241,10 @@ func binarySearchRecords(records []tsv, ts typeutil.Timestamp) (string, bool) {
}
// Save stores key-value pairs with timestamp
// if ts is 0, suffixSnapshot works as a TxnKV
// otherwise, suffixSnapshot will store a ts-key as "key[sep]ts"-value pair in snapshot path
// if ts is 0, SuffixSnapshot works as a TxnKV
// otherwise, SuffixSnapshot will store a ts-key as "key[sep]ts"-value pair in snapshot path
// and for acceleration store original key-value if ts is the latest
func (ss *suffixSnapshot) Save(key string, value string, ts typeutil.Timestamp) error {
func (ss *SuffixSnapshot) Save(key string, value string, ts typeutil.Timestamp) error {
// if ts == 0, act like TxnKv
// will not update lastestTs since ts not not valid
if ts == 0 {
@ -278,7 +278,7 @@ func (ss *suffixSnapshot) Save(key string, value string, ts typeutil.Timestamp)
return ss.TxnKV.Save(tsKey, value)
}
func (ss *suffixSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) {
func (ss *SuffixSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) {
// if ts == 0, load latest by definition
// and with acceleration logic, just do load key will do
if ts == 0 {
@ -351,7 +351,7 @@ func (ss *suffixSnapshot) Load(key string, ts typeutil.Timestamp) (string, error
// MultiSave save multiple kvs
// if ts == 0, act like TxnKV
// each key-value will be treated using same logic like Save
func (ss *suffixSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error {
func (ss *SuffixSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error {
// if ts == 0, act like TxnKV
if ts == 0 {
return ss.TxnKV.MultiSave(kvs)
@ -378,7 +378,7 @@ func (ss *suffixSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp
// generateSaveExecute examine each key is the after the corresponding latest
// returns calculated execute map and update ts list
func (ss *suffixSnapshot) generateSaveExecute(kvs map[string]string, ts typeutil.Timestamp) (map[string]string, []string, error) {
func (ss *SuffixSnapshot) generateSaveExecute(kvs map[string]string, ts typeutil.Timestamp) (map[string]string, []string, error) {
var after bool
var err error
execute := make(map[string]string)
@ -403,7 +403,7 @@ func (ss *suffixSnapshot) generateSaveExecute(kvs map[string]string, ts typeutil
}
// LoadWithPrefix load keys with provided prefix and returns value in the ts
func (ss *suffixSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) {
func (ss *SuffixSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) {
// ts 0 case shall be treated as fetch latest/current value
if ts == 0 {
keys, values, err := ss.TxnKV.LoadWithPrefix(key)
@ -484,7 +484,7 @@ func (ss *suffixSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]s
// MultiSaveAndRemoveWithPrefix save muiltple kvs and remove as well
// if ts == 0, act like TxnKV
// each key-value will be treated in same logic like Save
func (ss *suffixSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error {
func (ss *SuffixSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error {
// if ts == 0, act like TxnKV
if ts == 0 {
return ss.TxnKV.MultiSaveAndRemoveWithPrefix(saves, removals)
@ -503,15 +503,15 @@ func (ss *suffixSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string,
for _, removal := range removals {
keys, _, err := ss.TxnKV.LoadWithPrefix(removal)
if err != nil {
log.Warn("suffixSnapshot TxnKV LoadwithPrefix failed", zap.String("key", removal), zap.Error(err))
log.Warn("SuffixSnapshot TxnKV LoadwithPrefix failed", zap.String("key", removal), zap.Error(err))
return err
}
// add tombstone to orignal key and add ts entry
for _, key := range keys {
key = ss.hideRootPrefix(key)
execute[key] = string(suffixSnapshotTombstone)
execute[ss.composeTSKey(key, ts)] = string(suffixSnapshotTombstone)
execute[key] = string(SuffixSnapshotTombstone)
execute[ss.composeTSKey(key, ts)] = string(SuffixSnapshotTombstone)
updateList = append(updateList, key)
}
}

View File

@ -14,7 +14,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package rootcoord
package kv
import (
"fmt"
@ -171,7 +171,7 @@ func Test_binarySearchRecords(t *testing.T) {
func Test_ComposeIsTsKey(t *testing.T) {
sep := "_ts"
ss, err := newSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix)
ss, err := NewSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix)
require.Nil(t, err)
type testcase struct {
key string
@ -209,7 +209,7 @@ func Test_ComposeIsTsKey(t *testing.T) {
func Test_SuffixSnaphotIsTSOfKey(t *testing.T) {
sep := "_ts"
ss, err := newSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix)
ss, err := NewSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix)
require.Nil(t, err)
type testcase struct {
key string
@ -274,7 +274,7 @@ func Test_SuffixSnapshotLoad(t *testing.T) {
return vtso
}
ss, err := newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NotNil(t, ss)
@ -295,7 +295,7 @@ func Test_SuffixSnapshotLoad(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, "value-19", val)
ss, err = newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
ss, err = NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NotNil(t, ss)
@ -326,7 +326,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
return vtso
}
ss, err := newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NotNil(t, ss)
@ -358,7 +358,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
assert.Equal(t, vals[0], "v1-19")
assert.Equal(t, vals[1], "v2-19")
ss, err = newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
ss, err = NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NotNil(t, ss)
for i := 0; i < 20; i++ {
@ -403,7 +403,7 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) {
return vtso
}
ss, err := newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NotNil(t, ss)
@ -440,7 +440,7 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) {
assert.Equal(t, 39-i, len(vals))
}
ss, err = newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
ss, err = NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NotNil(t, ss)

View File

@ -0,0 +1,155 @@
package model
import (
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
type Collection struct {
TenantID string
CollectionID int64
Partitions []*Partition
Name string
Description string
AutoID bool
Fields []*Field
FieldIDToIndexID []common.Int64Tuple
VirtualChannelNames []string
PhysicalChannelNames []string
ShardsNum int32
StartPositions []*commonpb.KeyDataPair
CreateTime uint64
ConsistencyLevel commonpb.ConsistencyLevel
Aliases []string
Extra map[string]string // extra kvs
}
func (c Collection) Clone() *Collection {
return &Collection{
TenantID: c.TenantID,
CollectionID: c.CollectionID,
Name: c.Name,
Description: c.Description,
AutoID: c.AutoID,
Fields: c.Fields,
Partitions: c.Partitions,
FieldIDToIndexID: c.FieldIDToIndexID,
VirtualChannelNames: c.VirtualChannelNames,
PhysicalChannelNames: c.PhysicalChannelNames,
ShardsNum: c.ShardsNum,
ConsistencyLevel: c.ConsistencyLevel,
CreateTime: c.CreateTime,
StartPositions: c.StartPositions,
Aliases: c.Aliases,
Extra: c.Extra,
}
}
func UnmarshalCollectionModel(coll *pb.CollectionInfo) *Collection {
if coll == nil {
return nil
}
// backward compatible for deprecated fields
var partitions []*Partition
if len(coll.Partitions) != 0 {
partitions = make([]*Partition, len(coll.Partitions))
for idx, partition := range coll.Partitions {
partitions[idx] = &Partition{
PartitionID: partition.GetPartitionID(),
PartitionName: partition.GetPartitionName(),
PartitionCreatedTimestamp: partition.GetPartitionCreatedTimestamp(),
}
}
} else {
partitions = make([]*Partition, len(coll.PartitionIDs))
for idx := range coll.PartitionIDs {
partitions[idx] = &Partition{
PartitionID: coll.PartitionIDs[idx],
PartitionName: coll.PartitionNames[idx],
PartitionCreatedTimestamp: coll.PartitionCreatedTimestamps[idx],
}
}
}
filedIDToIndexIDs := make([]common.Int64Tuple, len(coll.FieldIndexes))
for idx, fieldIndexInfo := range coll.FieldIndexes {
filedIDToIndexIDs[idx] = common.Int64Tuple{
Key: fieldIndexInfo.FiledID,
Value: fieldIndexInfo.IndexID,
}
}
return &Collection{
CollectionID: coll.ID,
Name: coll.Schema.Name,
Description: coll.Schema.Description,
AutoID: coll.Schema.AutoID,
Fields: UnmarshalFieldModels(coll.Schema.Fields),
Partitions: partitions,
FieldIDToIndexID: filedIDToIndexIDs,
VirtualChannelNames: coll.VirtualChannelNames,
PhysicalChannelNames: coll.PhysicalChannelNames,
ShardsNum: coll.ShardsNum,
ConsistencyLevel: coll.ConsistencyLevel,
CreateTime: coll.CreateTime,
StartPositions: coll.StartPositions,
}
}
func MarshalCollectionModel(coll *Collection) *pb.CollectionInfo {
if coll == nil {
return nil
}
fields := make([]*schemapb.FieldSchema, len(coll.Fields))
for idx, field := range coll.Fields {
fields[idx] = &schemapb.FieldSchema{
FieldID: field.FieldID,
Name: field.Name,
IsPrimaryKey: field.IsPrimaryKey,
Description: field.Description,
DataType: field.DataType,
TypeParams: field.TypeParams,
IndexParams: field.IndexParams,
AutoID: field.AutoID,
}
}
collSchema := &schemapb.CollectionSchema{
Name: coll.Name,
Description: coll.Description,
AutoID: coll.AutoID,
Fields: fields,
}
partitions := make([]*pb.PartitionInfo, len(coll.Partitions))
for idx, partition := range coll.Partitions {
partitions[idx] = &pb.PartitionInfo{
PartitionID: partition.PartitionID,
PartitionName: partition.PartitionName,
PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp,
}
}
fieldIndexes := make([]*pb.FieldIndexInfo, len(coll.FieldIDToIndexID))
for idx, tuple := range coll.FieldIDToIndexID {
fieldIndexes[idx] = &pb.FieldIndexInfo{
FiledID: tuple.Key,
IndexID: tuple.Value,
}
}
return &pb.CollectionInfo{
ID: coll.CollectionID,
Schema: collSchema,
Partitions: partitions,
FieldIndexes: fieldIndexes,
CreateTime: coll.CreateTime,
VirtualChannelNames: coll.VirtualChannelNames,
PhysicalChannelNames: coll.PhysicalChannelNames,
ShardsNum: coll.ShardsNum,
ConsistencyLevel: coll.ConsistencyLevel,
StartPositions: coll.StartPositions,
}
}

View File

@ -0,0 +1,138 @@
package model
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/common"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
)
var (
colID = typeutil.UniqueID(1)
colName = "c"
fieldID = typeutil.UniqueID(101)
fieldName = "field110"
partID = typeutil.UniqueID(20)
partName = "testPart"
tenantID = "tenant-1"
typeParams = []*commonpb.KeyValuePair{
{
Key: "field110-k1",
Value: "field110-v1",
},
}
startPositions = []*commonpb.KeyDataPair{
{
Key: "k1",
Data: []byte{byte(1)},
},
}
colModel = &Collection{
TenantID: tenantID,
CollectionID: colID,
Name: colName,
AutoID: false,
Description: "none",
Fields: []*Field{fieldModel},
FieldIDToIndexID: []common.Int64Tuple{
{
Key: fieldID,
Value: indexID,
},
},
VirtualChannelNames: []string{"vch"},
PhysicalChannelNames: []string{"pch"},
ShardsNum: 1,
CreateTime: 1,
StartPositions: startPositions,
ConsistencyLevel: commonpb.ConsistencyLevel_Strong,
Partitions: []*Partition{
{
PartitionID: partID,
PartitionName: partName,
PartitionCreatedTimestamp: 1,
},
},
}
deprecatedColPb = &pb.CollectionInfo{
ID: colID,
Schema: &schemapb.CollectionSchema{
Name: colName,
Description: "none",
AutoID: false,
Fields: []*schemapb.FieldSchema{filedSchemaPb},
},
CreateTime: 1,
PartitionIDs: []int64{partID},
PartitionNames: []string{partName},
PartitionCreatedTimestamps: []uint64{1},
FieldIndexes: []*pb.FieldIndexInfo{
{
FiledID: fieldID,
IndexID: indexID,
},
},
VirtualChannelNames: []string{"vch"},
PhysicalChannelNames: []string{"pch"},
ShardsNum: 1,
StartPositions: startPositions,
ConsistencyLevel: commonpb.ConsistencyLevel_Strong,
}
newColPb = &pb.CollectionInfo{
ID: colID,
Schema: &schemapb.CollectionSchema{
Name: colName,
Description: "none",
AutoID: false,
Fields: []*schemapb.FieldSchema{filedSchemaPb},
},
CreateTime: 1,
Partitions: []*pb.PartitionInfo{
{
PartitionID: partID,
PartitionName: partName,
PartitionCreatedTimestamp: 1,
},
},
FieldIndexes: []*pb.FieldIndexInfo{
{
FiledID: fieldID,
IndexID: indexID,
},
},
VirtualChannelNames: []string{"vch"},
PhysicalChannelNames: []string{"pch"},
ShardsNum: 1,
StartPositions: startPositions,
ConsistencyLevel: commonpb.ConsistencyLevel_Strong,
}
)
func TestUnmarshalCollectionModel(t *testing.T) {
ret := UnmarshalCollectionModel(deprecatedColPb)
ret.TenantID = tenantID
assert.Equal(t, ret, colModel)
ret = UnmarshalCollectionModel(newColPb)
ret.TenantID = tenantID
assert.Equal(t, ret, colModel)
assert.Nil(t, UnmarshalCollectionModel(nil))
}
func TestMarshalCollectionModel(t *testing.T) {
ret := MarshalCollectionModel(colModel)
assert.Equal(t, ret, newColPb)
assert.Nil(t, MarshalCollectionModel(nil))
}

View File

@ -0,0 +1,24 @@
package model
import "github.com/milvus-io/milvus/internal/proto/internalpb"
type Credential struct {
Username string
EncryptedPassword string
Tenant string
IsSuper bool
Sha256Password string
}
func MarshalCredentialModel(cred *Credential) *internalpb.CredentialInfo {
if cred == nil {
return nil
}
return &internalpb.CredentialInfo{
Tenant: cred.Tenant,
Username: cred.Username,
EncryptedPassword: cred.EncryptedPassword,
IsSuper: cred.IsSuper,
Sha256Password: cred.Sha256Password,
}
}

View File

@ -0,0 +1,34 @@
package model
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/internalpb"
)
var (
credentialModel = &Credential{
Username: "user",
EncryptedPassword: "password",
Tenant: "tenant-1",
IsSuper: true,
Sha256Password: "xxxx",
}
credentialPb = &internalpb.CredentialInfo{
Username: "user",
EncryptedPassword: "password",
Tenant: "tenant-1",
IsSuper: true,
Sha256Password: "xxxx",
}
)
func TestMarshalCredentialModel(t *testing.T) {
ret := MarshalCredentialModel(credentialModel)
assert.Equal(t, credentialPb, ret)
assert.Nil(t, MarshalCredentialModel(nil))
}

View File

@ -0,0 +1,75 @@
package model
import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
type Field struct {
FieldID int64
Name string
IsPrimaryKey bool
Description string
DataType schemapb.DataType
TypeParams []*commonpb.KeyValuePair
IndexParams []*commonpb.KeyValuePair
AutoID bool
}
func MarshalFieldModel(field *Field) *schemapb.FieldSchema {
if field == nil {
return nil
}
return &schemapb.FieldSchema{
FieldID: field.FieldID,
Name: field.Name,
IsPrimaryKey: field.IsPrimaryKey,
Description: field.Description,
DataType: field.DataType,
TypeParams: field.TypeParams,
IndexParams: field.IndexParams,
AutoID: field.AutoID,
}
}
func MarshalFieldModels(fields []*Field) []*schemapb.FieldSchema {
if fields == nil {
return nil
}
fieldSchemas := make([]*schemapb.FieldSchema, len(fields))
for idx, field := range fields {
fieldSchemas[idx] = MarshalFieldModel(field)
}
return fieldSchemas
}
func UnmarshalFieldModel(fieldSchema *schemapb.FieldSchema) *Field {
if fieldSchema == nil {
return nil
}
return &Field{
FieldID: fieldSchema.FieldID,
Name: fieldSchema.Name,
IsPrimaryKey: fieldSchema.IsPrimaryKey,
Description: fieldSchema.Description,
DataType: fieldSchema.DataType,
TypeParams: fieldSchema.TypeParams,
IndexParams: fieldSchema.IndexParams,
AutoID: fieldSchema.AutoID,
}
}
func UnmarshalFieldModels(fieldSchemas []*schemapb.FieldSchema) []*Field {
if fieldSchemas == nil {
return nil
}
fields := make([]*Field, len(fieldSchemas))
for idx, fieldSchema := range fieldSchemas {
fields[idx] = UnmarshalFieldModel(fieldSchema)
}
return fields
}

View File

@ -0,0 +1,56 @@
package model
import (
"testing"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/stretchr/testify/assert"
)
var (
filedSchemaPb = &schemapb.FieldSchema{
FieldID: fieldID,
Name: fieldName,
IsPrimaryKey: false,
Description: "none",
DataType: schemapb.DataType_FloatVector,
TypeParams: typeParams,
IndexParams: indexParams,
AutoID: false,
}
fieldModel = &Field{
FieldID: fieldID,
Name: fieldName,
IsPrimaryKey: false,
Description: "none",
AutoID: false,
DataType: schemapb.DataType_FloatVector,
TypeParams: typeParams,
IndexParams: indexParams,
}
)
func TestMarshalFieldModel(t *testing.T) {
ret := MarshalFieldModel(fieldModel)
assert.Equal(t, filedSchemaPb, ret)
assert.Nil(t, MarshalFieldModel(nil))
}
func TestMarshalFieldModels(t *testing.T) {
ret := MarshalFieldModels([]*Field{fieldModel})
assert.Equal(t, []*schemapb.FieldSchema{filedSchemaPb}, ret)
assert.Nil(t, MarshalFieldModels(nil))
}
func TestUnmarshalFieldModel(t *testing.T) {
ret := UnmarshalFieldModel(filedSchemaPb)
assert.Equal(t, fieldModel, ret)
assert.Nil(t, UnmarshalFieldModel(nil))
}
func TestUnmarshalFieldModels(t *testing.T) {
ret := UnmarshalFieldModels([]*schemapb.FieldSchema{filedSchemaPb})
assert.Equal(t, []*Field{fieldModel}, ret)
assert.Nil(t, UnmarshalFieldModels(nil))
}

View File

@ -0,0 +1,96 @@
package model
import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
)
type Index struct {
CollectionID int64
FieldID int64
IndexID int64
IndexName string
IsDeleted bool
CreateTime uint64
IndexParams []*commonpb.KeyValuePair
SegmentIndexes map[int64]SegmentIndex //segmentID -> segmentIndex
Extra map[string]string
}
func UnmarshalIndexModel(indexInfo *pb.IndexInfo) *Index {
if indexInfo == nil {
return nil
}
return &Index{
IndexName: indexInfo.IndexName,
IndexID: indexInfo.IndexID,
IndexParams: indexInfo.IndexParams,
IsDeleted: indexInfo.Deleted,
CreateTime: indexInfo.CreateTime,
}
}
func MarshalIndexModel(index *Index) *pb.IndexInfo {
if index == nil {
return nil
}
return &pb.IndexInfo{
IndexName: index.IndexName,
IndexID: index.IndexID,
IndexParams: index.IndexParams,
Deleted: index.IsDeleted,
CreateTime: index.CreateTime,
}
}
func MergeIndexModel(a *Index, b *Index) *Index {
if a == nil {
return b
}
if b == nil {
return a
}
newIdx := *a
if b.SegmentIndexes != nil {
if newIdx.SegmentIndexes == nil {
newIdx.SegmentIndexes = b.SegmentIndexes
} else {
for segID, segmentIndex := range b.SegmentIndexes {
newIdx.SegmentIndexes[segID] = segmentIndex
}
}
}
if newIdx.CollectionID == 0 && b.CollectionID != 0 {
newIdx.CollectionID = b.CollectionID
}
if newIdx.FieldID == 0 && b.FieldID != 0 {
newIdx.FieldID = b.FieldID
}
if newIdx.IndexID == 0 && b.IndexID != 0 {
newIdx.IndexID = b.IndexID
}
if newIdx.IndexName == "" && b.IndexName != "" {
newIdx.IndexName = b.IndexName
}
if newIdx.IndexParams == nil && b.IndexParams != nil {
newIdx.IndexParams = b.IndexParams
}
newIdx.IsDeleted = b.IsDeleted
newIdx.CreateTime = b.CreateTime
if newIdx.Extra == nil && b.Extra != nil {
newIdx.Extra = b.Extra
}
return &newIdx
}

View File

@ -0,0 +1,50 @@
package model
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
var (
indexID = typeutil.UniqueID(1)
indexName = "idx"
indexParams = []*commonpb.KeyValuePair{
{
Key: "field110-i1",
Value: "field110-v1",
},
}
indexModel = &Index{
IndexID: indexID,
IndexName: indexName,
IndexParams: indexParams,
IsDeleted: true,
CreateTime: 1,
}
indexPb = &pb.IndexInfo{
IndexName: indexName,
IndexID: indexID,
IndexParams: indexParams,
Deleted: true,
CreateTime: 1,
}
)
func TestMarshalIndexModel(t *testing.T) {
ret := MarshalIndexModel(indexModel)
assert.Equal(t, indexPb, ret)
assert.Nil(t, MarshalIndexModel(nil))
}
func TestUnmarshalIndexModel(t *testing.T) {
ret := UnmarshalIndexModel(indexPb)
assert.Equal(t, indexModel, ret)
assert.Nil(t, UnmarshalIndexModel(nil))
}

View File

@ -0,0 +1,8 @@
package model
type Partition struct {
PartitionID int64
PartitionName string
PartitionCreatedTimestamp uint64
Extra map[string]string
}

View File

@ -0,0 +1,52 @@
package model
import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
)
type Segment struct {
SegmentID int64
PartitionID int64
NumRows int64
MemSize int64
DmChannel string
CompactionFrom []int64
CreatedByCompaction bool
SegmentState commonpb.SegmentState
IndexInfos []*SegmentIndex
ReplicaIds []int64
NodeIds []int64
}
type SegmentIndex struct {
Segment
EnableIndex bool
CreateTime uint64
BuildID int64
IndexSize uint64
IndexFilePaths []string
}
func UnmarshalSegmentIndexModel(segIndex *pb.SegmentIndexInfo) *Index {
if segIndex == nil {
return nil
}
return &Index{
CollectionID: segIndex.CollectionID,
SegmentIndexes: map[int64]SegmentIndex{
segIndex.SegmentID: {
Segment: Segment{
SegmentID: segIndex.SegmentID,
PartitionID: segIndex.PartitionID,
},
BuildID: segIndex.BuildID,
EnableIndex: segIndex.EnableIndex,
CreateTime: segIndex.CreateTime,
},
},
FieldID: segIndex.FieldID,
IndexID: segIndex.IndexID,
}
}

View File

@ -0,0 +1,48 @@
package model
import (
"testing"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
)
var (
segmentID = typeutil.UniqueID(1)
buildID = typeutil.UniqueID(1)
segmentIdxPb = &pb.SegmentIndexInfo{
CollectionID: colID,
PartitionID: partID,
SegmentID: segmentID,
FieldID: fieldID,
IndexID: indexID,
BuildID: buildID,
EnableIndex: true,
CreateTime: 1,
}
indexModel2 = &Index{
CollectionID: colID,
IndexID: indexID,
FieldID: fieldID,
SegmentIndexes: map[int64]SegmentIndex{
segmentID: {
Segment: Segment{
SegmentID: segmentID,
PartitionID: partID,
},
BuildID: buildID,
EnableIndex: true,
CreateTime: 1,
},
},
}
)
func TestUnmarshalSegmentIndexModel(t *testing.T) {
ret := UnmarshalSegmentIndexModel(segmentIdxPb)
assert.Equal(t, indexModel2, ret)
assert.Nil(t, UnmarshalSegmentIndexModel(nil))
}

View File

@ -0,0 +1,91 @@
package table
import (
"context"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
type Catalog struct {
}
func (tc *Catalog) CreateCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) {
return nil, nil
}
func (tc *Catalog) GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) {
return nil, nil
}
func (tc *Catalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) {
return nil, nil
}
func (tc *Catalog) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool {
return false
}
func (tc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) AlterIndex(ctx context.Context, index *model.Index) error {
return nil
}
func (tc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
return nil, nil
}
func (tc *Catalog) CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
return nil
}
func (tc *Catalog) ListAliases(ctx context.Context) ([]*model.Collection, error) {
return nil, nil
}
func (tc *Catalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) {
return nil, nil
}
func (tc *Catalog) CreateCredential(ctx context.Context, credential *model.Credential) error {
return nil
}
func (tc *Catalog) DropCredential(ctx context.Context, username string) error {
return nil
}
func (tc *Catalog) ListCredentials(ctx context.Context) ([]string, error) {
return nil, nil
}
func (tc *Catalog) Close() {
}

View File

@ -5,11 +5,7 @@ option go_package="github.com/milvus-io/milvus/internal/proto/etcdpb";
import "common.proto";
import "schema.proto";
message ProxyMeta {
int64 ID = 1;
common.Address address = 2;
repeated string result_channelIDs = 3;
}
// this proto only used to describe object that will persist into etcd
message IndexInfo {
string index_name = 1;
@ -28,15 +24,25 @@ message CollectionInfo {
int64 ID = 1;
schema.CollectionSchema schema = 2;
uint64 create_time = 3;
// deprecate
repeated int64 partitionIDs = 4;
// deprecate
repeated string partitionNames = 5;
repeated FieldIndexInfo field_indexes = 6;
repeated string virtual_channel_names = 7;
repeated string physical_channel_names = 8;
// deprecate
repeated uint64 partition_created_timestamps = 9;
int32 shards_num = 10;
repeated common.KeyDataPair start_positions = 11;
common.ConsistencyLevel consistency_level = 12;
repeated PartitionInfo partitions = 13;
}
message PartitionInfo {
int64 partitionID = 1;
string partitionName = 2;
uint64 partition_created_timestamp = 3;
}
message SegmentIndexInfo {
@ -50,6 +56,7 @@ message SegmentIndexInfo {
uint64 create_time = 8;
}
// TODO move to proto files of interprocess communication
message CollectionMeta {
int64 ID=1;
schema.CollectionSchema schema=2;
@ -58,3 +65,13 @@ message CollectionMeta {
repeated string partition_tags=5;
repeated int64 partitionIDs=6;
}
message CredentialInfo {
string username = 1;
// encrypted by bcrypt (for higher security level)
string encrypted_password = 2;
string tenant = 3;
bool is_super = 4;
// encrypted by sha256 (for good performance in cache mapping)
string sha256_password = 5;
}

View File

@ -22,61 +22,6 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ProxyMeta struct {
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Address *commonpb.Address `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
ResultChannelIDs []string `protobuf:"bytes,3,rep,name=result_channelIDs,json=resultChannelIDs,proto3" json:"result_channelIDs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProxyMeta) Reset() { *m = ProxyMeta{} }
func (m *ProxyMeta) String() string { return proto.CompactTextString(m) }
func (*ProxyMeta) ProtoMessage() {}
func (*ProxyMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{0}
}
func (m *ProxyMeta) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProxyMeta.Unmarshal(m, b)
}
func (m *ProxyMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProxyMeta.Marshal(b, m, deterministic)
}
func (m *ProxyMeta) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProxyMeta.Merge(m, src)
}
func (m *ProxyMeta) XXX_Size() int {
return xxx_messageInfo_ProxyMeta.Size(m)
}
func (m *ProxyMeta) XXX_DiscardUnknown() {
xxx_messageInfo_ProxyMeta.DiscardUnknown(m)
}
var xxx_messageInfo_ProxyMeta proto.InternalMessageInfo
func (m *ProxyMeta) GetID() int64 {
if m != nil {
return m.ID
}
return 0
}
func (m *ProxyMeta) GetAddress() *commonpb.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *ProxyMeta) GetResultChannelIDs() []string {
if m != nil {
return m.ResultChannelIDs
}
return nil
}
type IndexInfo struct {
IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
IndexID int64 `protobuf:"varint,2,opt,name=indexID,proto3" json:"indexID,omitempty"`
@ -92,7 +37,7 @@ func (m *IndexInfo) Reset() { *m = IndexInfo{} }
func (m *IndexInfo) String() string { return proto.CompactTextString(m) }
func (*IndexInfo) ProtoMessage() {}
func (*IndexInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{1}
return fileDescriptor_975d306d62b73e88, []int{0}
}
func (m *IndexInfo) XXX_Unmarshal(b []byte) error {
@ -160,7 +105,7 @@ func (m *FieldIndexInfo) Reset() { *m = FieldIndexInfo{} }
func (m *FieldIndexInfo) String() string { return proto.CompactTextString(m) }
func (*FieldIndexInfo) ProtoMessage() {}
func (*FieldIndexInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{2}
return fileDescriptor_975d306d62b73e88, []int{1}
}
func (m *FieldIndexInfo) XXX_Unmarshal(b []byte) error {
@ -196,28 +141,32 @@ func (m *FieldIndexInfo) GetIndexID() int64 {
}
type CollectionInfo struct {
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
CreateTime uint64 `protobuf:"varint,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
PartitionIDs []int64 `protobuf:"varint,4,rep,packed,name=partitionIDs,proto3" json:"partitionIDs,omitempty"`
PartitionNames []string `protobuf:"bytes,5,rep,name=partitionNames,proto3" json:"partitionNames,omitempty"`
FieldIndexes []*FieldIndexInfo `protobuf:"bytes,6,rep,name=field_indexes,json=fieldIndexes,proto3" json:"field_indexes,omitempty"`
VirtualChannelNames []string `protobuf:"bytes,7,rep,name=virtual_channel_names,json=virtualChannelNames,proto3" json:"virtual_channel_names,omitempty"`
PhysicalChannelNames []string `protobuf:"bytes,8,rep,name=physical_channel_names,json=physicalChannelNames,proto3" json:"physical_channel_names,omitempty"`
PartitionCreatedTimestamps []uint64 `protobuf:"varint,9,rep,packed,name=partition_created_timestamps,json=partitionCreatedTimestamps,proto3" json:"partition_created_timestamps,omitempty"`
ShardsNum int32 `protobuf:"varint,10,opt,name=shards_num,json=shardsNum,proto3" json:"shards_num,omitempty"`
StartPositions []*commonpb.KeyDataPair `protobuf:"bytes,11,rep,name=start_positions,json=startPositions,proto3" json:"start_positions,omitempty"`
ConsistencyLevel commonpb.ConsistencyLevel `protobuf:"varint,12,opt,name=consistency_level,json=consistencyLevel,proto3,enum=milvus.proto.common.ConsistencyLevel" json:"consistency_level,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
CreateTime uint64 `protobuf:"varint,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// deprecate
PartitionIDs []int64 `protobuf:"varint,4,rep,packed,name=partitionIDs,proto3" json:"partitionIDs,omitempty"`
// deprecate
PartitionNames []string `protobuf:"bytes,5,rep,name=partitionNames,proto3" json:"partitionNames,omitempty"`
FieldIndexes []*FieldIndexInfo `protobuf:"bytes,6,rep,name=field_indexes,json=fieldIndexes,proto3" json:"field_indexes,omitempty"`
VirtualChannelNames []string `protobuf:"bytes,7,rep,name=virtual_channel_names,json=virtualChannelNames,proto3" json:"virtual_channel_names,omitempty"`
PhysicalChannelNames []string `protobuf:"bytes,8,rep,name=physical_channel_names,json=physicalChannelNames,proto3" json:"physical_channel_names,omitempty"`
// deprecate
PartitionCreatedTimestamps []uint64 `protobuf:"varint,9,rep,packed,name=partition_created_timestamps,json=partitionCreatedTimestamps,proto3" json:"partition_created_timestamps,omitempty"`
ShardsNum int32 `protobuf:"varint,10,opt,name=shards_num,json=shardsNum,proto3" json:"shards_num,omitempty"`
StartPositions []*commonpb.KeyDataPair `protobuf:"bytes,11,rep,name=start_positions,json=startPositions,proto3" json:"start_positions,omitempty"`
ConsistencyLevel commonpb.ConsistencyLevel `protobuf:"varint,12,opt,name=consistency_level,json=consistencyLevel,proto3,enum=milvus.proto.common.ConsistencyLevel" json:"consistency_level,omitempty"`
Partitions []*PartitionInfo `protobuf:"bytes,13,rep,name=partitions,proto3" json:"partitions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CollectionInfo) Reset() { *m = CollectionInfo{} }
func (m *CollectionInfo) String() string { return proto.CompactTextString(m) }
func (*CollectionInfo) ProtoMessage() {}
func (*CollectionInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{3}
return fileDescriptor_975d306d62b73e88, []int{2}
}
func (m *CollectionInfo) XXX_Unmarshal(b []byte) error {
@ -322,6 +271,68 @@ func (m *CollectionInfo) GetConsistencyLevel() commonpb.ConsistencyLevel {
return commonpb.ConsistencyLevel_Strong
}
func (m *CollectionInfo) GetPartitions() []*PartitionInfo {
if m != nil {
return m.Partitions
}
return nil
}
type PartitionInfo struct {
PartitionID int64 `protobuf:"varint,1,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
PartitionName string `protobuf:"bytes,2,opt,name=partitionName,proto3" json:"partitionName,omitempty"`
PartitionCreatedTimestamp uint64 `protobuf:"varint,3,opt,name=partition_created_timestamp,json=partitionCreatedTimestamp,proto3" json:"partition_created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PartitionInfo) Reset() { *m = PartitionInfo{} }
func (m *PartitionInfo) String() string { return proto.CompactTextString(m) }
func (*PartitionInfo) ProtoMessage() {}
func (*PartitionInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{3}
}
func (m *PartitionInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PartitionInfo.Unmarshal(m, b)
}
func (m *PartitionInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PartitionInfo.Marshal(b, m, deterministic)
}
func (m *PartitionInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartitionInfo.Merge(m, src)
}
func (m *PartitionInfo) XXX_Size() int {
return xxx_messageInfo_PartitionInfo.Size(m)
}
func (m *PartitionInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PartitionInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PartitionInfo proto.InternalMessageInfo
func (m *PartitionInfo) GetPartitionID() int64 {
if m != nil {
return m.PartitionID
}
return 0
}
func (m *PartitionInfo) GetPartitionName() string {
if m != nil {
return m.PartitionName
}
return ""
}
func (m *PartitionInfo) GetPartitionCreatedTimestamp() uint64 {
if m != nil {
return m.PartitionCreatedTimestamp
}
return 0
}
type SegmentIndexInfo struct {
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
@ -417,6 +428,7 @@ func (m *SegmentIndexInfo) GetCreateTime() uint64 {
return 0
}
// TODO move to proto files of interprocess communication
type CollectionMeta struct {
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
@ -496,64 +508,144 @@ func (m *CollectionMeta) GetPartitionIDs() []int64 {
return nil
}
type CredentialInfo struct {
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
// encrypted by bcrypt (for higher security level)
EncryptedPassword string `protobuf:"bytes,2,opt,name=encrypted_password,json=encryptedPassword,proto3" json:"encrypted_password,omitempty"`
Tenant string `protobuf:"bytes,3,opt,name=tenant,proto3" json:"tenant,omitempty"`
IsSuper bool `protobuf:"varint,4,opt,name=is_super,json=isSuper,proto3" json:"is_super,omitempty"`
// encrypted by sha256 (for good performance in cache mapping)
Sha256Password string `protobuf:"bytes,5,opt,name=sha256_password,json=sha256Password,proto3" json:"sha256_password,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CredentialInfo) Reset() { *m = CredentialInfo{} }
func (m *CredentialInfo) String() string { return proto.CompactTextString(m) }
func (*CredentialInfo) ProtoMessage() {}
func (*CredentialInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{6}
}
func (m *CredentialInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CredentialInfo.Unmarshal(m, b)
}
func (m *CredentialInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CredentialInfo.Marshal(b, m, deterministic)
}
func (m *CredentialInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_CredentialInfo.Merge(m, src)
}
func (m *CredentialInfo) XXX_Size() int {
return xxx_messageInfo_CredentialInfo.Size(m)
}
func (m *CredentialInfo) XXX_DiscardUnknown() {
xxx_messageInfo_CredentialInfo.DiscardUnknown(m)
}
var xxx_messageInfo_CredentialInfo proto.InternalMessageInfo
func (m *CredentialInfo) GetUsername() string {
if m != nil {
return m.Username
}
return ""
}
func (m *CredentialInfo) GetEncryptedPassword() string {
if m != nil {
return m.EncryptedPassword
}
return ""
}
func (m *CredentialInfo) GetTenant() string {
if m != nil {
return m.Tenant
}
return ""
}
func (m *CredentialInfo) GetIsSuper() bool {
if m != nil {
return m.IsSuper
}
return false
}
func (m *CredentialInfo) GetSha256Password() string {
if m != nil {
return m.Sha256Password
}
return ""
}
func init() {
proto.RegisterType((*ProxyMeta)(nil), "milvus.proto.etcd.ProxyMeta")
proto.RegisterType((*IndexInfo)(nil), "milvus.proto.etcd.IndexInfo")
proto.RegisterType((*FieldIndexInfo)(nil), "milvus.proto.etcd.FieldIndexInfo")
proto.RegisterType((*CollectionInfo)(nil), "milvus.proto.etcd.CollectionInfo")
proto.RegisterType((*PartitionInfo)(nil), "milvus.proto.etcd.PartitionInfo")
proto.RegisterType((*SegmentIndexInfo)(nil), "milvus.proto.etcd.SegmentIndexInfo")
proto.RegisterType((*CollectionMeta)(nil), "milvus.proto.etcd.CollectionMeta")
proto.RegisterType((*CredentialInfo)(nil), "milvus.proto.etcd.CredentialInfo")
}
func init() { proto.RegisterFile("etcd_meta.proto", fileDescriptor_975d306d62b73e88) }
var fileDescriptor_975d306d62b73e88 = []byte{
// 737 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x4b, 0x6b, 0xeb, 0x46,
0x14, 0x46, 0x96, 0x1f, 0xd1, 0xb1, 0xae, 0x6f, 0x32, 0x7d, 0x30, 0x84, 0xb4, 0xd5, 0x35, 0xdc,
0x22, 0x28, 0xb5, 0x69, 0x6e, 0xe9, 0xae, 0xd0, 0xd6, 0x22, 0x60, 0xda, 0x06, 0x33, 0x09, 0x5d,
0x74, 0x23, 0xc6, 0xd2, 0xb1, 0x3d, 0xa0, 0x87, 0xd1, 0x8c, 0x42, 0xbc, 0xeb, 0xb2, 0x7f, 0xac,
0xbf, 0xa6, 0xff, 0xa1, 0x14, 0xcd, 0x48, 0xf2, 0x2b, 0x59, 0xde, 0x9d, 0xbf, 0xef, 0x3c, 0x34,
0xe7, 0x9c, 0xef, 0x33, 0xbc, 0x45, 0x15, 0xc5, 0x61, 0x8a, 0x8a, 0x4f, 0xb6, 0x45, 0xae, 0x72,
0x72, 0x95, 0x8a, 0xe4, 0xa9, 0x94, 0x06, 0x4d, 0xaa, 0xe8, 0xb5, 0x1b, 0xe5, 0x69, 0x9a, 0x67,
0x86, 0xba, 0x76, 0x65, 0xb4, 0xc1, 0xb4, 0x4e, 0x1f, 0xff, 0x65, 0x81, 0xb3, 0x28, 0xf2, 0xe7,
0xdd, 0xef, 0xa8, 0x38, 0x19, 0x41, 0x67, 0x1e, 0x50, 0xcb, 0xb3, 0x7c, 0x9b, 0x75, 0xe6, 0x01,
0xf9, 0x01, 0x06, 0x3c, 0x8e, 0x0b, 0x94, 0x92, 0x76, 0x3c, 0xcb, 0x1f, 0xde, 0xde, 0x4c, 0x8e,
0xda, 0xd7, 0x8d, 0x7f, 0x36, 0x39, 0xac, 0x49, 0x26, 0xdf, 0xc0, 0x55, 0x81, 0xb2, 0x4c, 0x54,
0x18, 0x6d, 0x78, 0x96, 0x61, 0x32, 0x0f, 0x24, 0xb5, 0x3d, 0xdb, 0x77, 0xd8, 0xa5, 0x09, 0xcc,
0x5a, 0x7e, 0xfc, 0x8f, 0x05, 0xce, 0x3c, 0x8b, 0xf1, 0x79, 0x9e, 0xad, 0x72, 0xf2, 0x05, 0x80,
0xa8, 0x40, 0x98, 0xf1, 0x14, 0xf5, 0x53, 0x1c, 0xe6, 0x68, 0xe6, 0x9e, 0xa7, 0x48, 0x28, 0x0c,
0x34, 0x98, 0x07, 0xfa, 0x45, 0x36, 0x6b, 0x20, 0x09, 0xc0, 0x35, 0x85, 0x5b, 0x5e, 0xf0, 0xd4,
0x7c, 0x6e, 0x78, 0xfb, 0xee, 0xc5, 0x07, 0xff, 0x8a, 0xbb, 0x3f, 0x78, 0x52, 0xe2, 0x82, 0x8b,
0x82, 0x0d, 0x75, 0xd9, 0x42, 0x57, 0x55, 0xfd, 0x63, 0x4c, 0x50, 0x61, 0x4c, 0xbb, 0x9e, 0xe5,
0x5f, 0xb0, 0x06, 0x92, 0xaf, 0x60, 0x18, 0x15, 0xc8, 0x15, 0x86, 0x4a, 0xa4, 0x48, 0x7b, 0x9e,
0xe5, 0x77, 0x19, 0x18, 0xea, 0x51, 0xa4, 0x38, 0x0e, 0x60, 0x74, 0x27, 0x30, 0x89, 0xf7, 0xb3,
0x50, 0x18, 0xac, 0x44, 0x82, 0x71, 0xbb, 0xd3, 0x06, 0xbe, 0x3e, 0xc6, 0xf8, 0xbf, 0x2e, 0x8c,
0x66, 0x79, 0x92, 0x60, 0xa4, 0x44, 0x9e, 0xe9, 0x36, 0xa7, 0x57, 0xf9, 0x11, 0xfa, 0xe6, 0x86,
0xf5, 0x51, 0xde, 0x1f, 0xcf, 0x58, 0xdf, 0x77, 0xdf, 0xe4, 0x41, 0x13, 0xac, 0x2e, 0x3a, 0x1d,
0xc4, 0x3e, 0x1d, 0x84, 0x8c, 0xc1, 0xdd, 0xf2, 0x42, 0x09, 0xfd, 0x80, 0x40, 0xd2, 0xae, 0x67,
0xfb, 0x36, 0x3b, 0xe2, 0xc8, 0xd7, 0x30, 0x6a, 0x71, 0x75, 0x18, 0x49, 0x7b, 0xfa, 0xbc, 0x27,
0x2c, 0xb9, 0x83, 0x37, 0xab, 0x6a, 0x29, 0xa1, 0x9e, 0x0f, 0x25, 0xed, 0xbf, 0x74, 0x96, 0x4a,
0xa6, 0x93, 0xe3, 0xe5, 0x31, 0x77, 0xd5, 0x62, 0x94, 0xe4, 0x16, 0x3e, 0x7b, 0x12, 0x85, 0x2a,
0x79, 0xd2, 0x48, 0x4a, 0x0b, 0x44, 0xd2, 0x81, 0xfe, 0xec, 0x27, 0x75, 0xb0, 0x96, 0x95, 0xf9,
0xf6, 0xf7, 0xf0, 0xf9, 0x76, 0xb3, 0x93, 0x22, 0x3a, 0x2b, 0xba, 0xd0, 0x45, 0x9f, 0x36, 0xd1,
0xa3, 0xaa, 0x9f, 0xe0, 0xa6, 0x9d, 0x21, 0x34, 0x5b, 0x89, 0xf5, 0xa6, 0xa4, 0xe2, 0xe9, 0x56,
0x52, 0xc7, 0xb3, 0xfd, 0x2e, 0xbb, 0x6e, 0x73, 0x66, 0x26, 0xe5, 0xb1, 0xcd, 0xa8, 0x24, 0x2c,
0x37, 0xbc, 0x88, 0x65, 0x98, 0x95, 0x29, 0x05, 0xcf, 0xf2, 0x7b, 0xcc, 0x31, 0xcc, 0x7d, 0x99,
0x92, 0x39, 0xbc, 0x95, 0x8a, 0x17, 0x2a, 0xdc, 0xe6, 0x52, 0x77, 0x90, 0x74, 0xa8, 0x97, 0xe2,
0xbd, 0xa6, 0xd5, 0x80, 0x2b, 0xae, 0xa5, 0x3a, 0xd2, 0x85, 0x8b, 0xa6, 0x8e, 0x30, 0xb8, 0x8a,
0xf2, 0x4c, 0x0a, 0xa9, 0x30, 0x8b, 0x76, 0x61, 0x82, 0x4f, 0x98, 0x50, 0xd7, 0xb3, 0xfc, 0xd1,
0xa9, 0x28, 0xea, 0x66, 0xb3, 0x7d, 0xf6, 0x6f, 0x55, 0x32, 0xbb, 0x8c, 0x4e, 0x98, 0xf1, 0xdf,
0x1d, 0xb8, 0x7c, 0xc0, 0x75, 0x8a, 0x99, 0xda, 0x2b, 0x79, 0x0c, 0x6e, 0xb4, 0x17, 0x65, 0x23,
0xc6, 0x23, 0x8e, 0x78, 0x30, 0x3c, 0x90, 0x48, 0xad, 0xeb, 0x43, 0x8a, 0xdc, 0x80, 0x23, 0xeb,
0xce, 0x81, 0xd6, 0x9d, 0xcd, 0xf6, 0x84, 0x71, 0x4b, 0x75, 0xf2, 0x40, 0x5b, 0x4f, 0xbb, 0x45,
0xc3, 0x43, 0xb7, 0xf4, 0x8e, 0x4d, 0x4f, 0x61, 0xb0, 0x2c, 0x85, 0xae, 0xe9, 0x9b, 0x48, 0x0d,
0xc9, 0x3b, 0x70, 0x31, 0xe3, 0xcb, 0x04, 0x8d, 0xf2, 0xe8, 0x40, 0xbb, 0x79, 0x68, 0x38, 0x3d,
0xd8, 0xa9, 0x11, 0x2e, 0xce, 0x1c, 0xfd, 0xaf, 0x75, 0xe8, 0xc5, 0x17, 0xff, 0x21, 0x3f, 0xb6,
0x17, 0xbf, 0x04, 0x68, 0x37, 0xd4, 0x38, 0xf1, 0x80, 0x21, 0xef, 0x0f, 0x7c, 0x18, 0x2a, 0xbe,
0x6e, 0x7c, 0xf8, 0xa6, 0x65, 0x1f, 0xf9, 0x5a, 0x9e, 0x59, 0xba, 0x7f, 0x6e, 0xe9, 0x5f, 0x3e,
0xfc, 0xf9, 0xdd, 0x5a, 0xa8, 0x4d, 0xb9, 0xac, 0xc4, 0x32, 0x35, 0x63, 0x7c, 0x2b, 0xf2, 0xfa,
0xd7, 0x54, 0x64, 0x0a, 0x8b, 0x8c, 0x27, 0x53, 0x3d, 0xd9, 0xb4, 0xb2, 0xec, 0x76, 0xb9, 0xec,
0x6b, 0xf4, 0xe1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x2d, 0xaa, 0x13, 0x88, 0x06, 0x00,
0x00,
// 837 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xdd, 0x6e, 0xe3, 0x44,
0x14, 0x96, 0xe3, 0xe6, 0xc7, 0x27, 0x69, 0xba, 0x1d, 0x60, 0x35, 0x5b, 0x16, 0xf0, 0x46, 0x2c,
0xf8, 0x66, 0x5b, 0xd1, 0x05, 0xee, 0x40, 0x2b, 0x6a, 0xad, 0x14, 0x01, 0xab, 0x68, 0x5a, 0x71,
0xc1, 0x8d, 0x35, 0xb1, 0x4f, 0x9b, 0x91, 0xec, 0xb1, 0xe5, 0x19, 0x17, 0xf2, 0x06, 0xbc, 0x01,
0x8f, 0xc2, 0x2d, 0x37, 0x3c, 0x0d, 0x2f, 0x81, 0x3c, 0xfe, 0x89, 0x9d, 0xb4, 0x5c, 0xee, 0x5d,
0xce, 0x77, 0xe6, 0x9c, 0xcc, 0x77, 0xce, 0x37, 0x9f, 0xe1, 0x04, 0x75, 0x18, 0x05, 0x09, 0x6a,
0x7e, 0x9e, 0xe5, 0xa9, 0x4e, 0xc9, 0x69, 0x22, 0xe2, 0xfb, 0x42, 0x55, 0xd1, 0x79, 0x99, 0x3d,
0x9b, 0x85, 0x69, 0x92, 0xa4, 0xb2, 0x82, 0xce, 0x66, 0x2a, 0xdc, 0x60, 0x52, 0x1f, 0x5f, 0xfc,
0x63, 0x81, 0xb3, 0x94, 0x11, 0xfe, 0xbe, 0x94, 0xb7, 0x29, 0xf9, 0x04, 0x40, 0x94, 0x41, 0x20,
0x79, 0x82, 0xd4, 0x72, 0x2d, 0xcf, 0x61, 0x8e, 0x41, 0xde, 0xf1, 0x04, 0x09, 0x85, 0xb1, 0x09,
0x96, 0x3e, 0x1d, 0xb8, 0x96, 0x67, 0xb3, 0x26, 0x24, 0x3e, 0xcc, 0xaa, 0xc2, 0x8c, 0xe7, 0x3c,
0x51, 0xd4, 0x76, 0x6d, 0x6f, 0x7a, 0xf9, 0xe2, 0xbc, 0x77, 0x99, 0xfa, 0x1a, 0x3f, 0xe2, 0xf6,
0x17, 0x1e, 0x17, 0xb8, 0xe2, 0x22, 0x67, 0x53, 0x53, 0xb6, 0x32, 0x55, 0x65, 0xff, 0x08, 0x63,
0xd4, 0x18, 0xd1, 0x23, 0xd7, 0xf2, 0x26, 0xac, 0x09, 0xc9, 0x67, 0x30, 0x0d, 0x73, 0xe4, 0x1a,
0x03, 0x2d, 0x12, 0xa4, 0x43, 0xd7, 0xf2, 0x8e, 0x18, 0x54, 0xd0, 0x8d, 0x48, 0x70, 0xe1, 0xc3,
0xfc, 0xad, 0xc0, 0x38, 0xda, 0x71, 0xa1, 0x30, 0xbe, 0x15, 0x31, 0x46, 0x4b, 0xdf, 0x10, 0xb1,
0x59, 0x13, 0x3e, 0x4e, 0x63, 0xf1, 0xf7, 0x10, 0xe6, 0x57, 0x69, 0x1c, 0x63, 0xa8, 0x45, 0x2a,
0x4d, 0x9b, 0x39, 0x0c, 0xda, 0x0e, 0x83, 0xa5, 0x4f, 0xbe, 0x83, 0x51, 0x35, 0x40, 0x53, 0x3b,
0xbd, 0x7c, 0xd9, 0xe7, 0x58, 0x0f, 0x77, 0xd7, 0xe4, 0xda, 0x00, 0xac, 0x2e, 0xda, 0x27, 0x62,
0xef, 0x13, 0x21, 0x0b, 0x98, 0x65, 0x3c, 0xd7, 0xc2, 0x5c, 0xc0, 0x57, 0xf4, 0xc8, 0xb5, 0x3d,
0x9b, 0xf5, 0x30, 0xf2, 0x05, 0xcc, 0xdb, 0xb8, 0x5c, 0x8c, 0xa2, 0x43, 0xd7, 0xf6, 0x1c, 0xb6,
0x87, 0x92, 0xb7, 0x70, 0x7c, 0x5b, 0x0e, 0x25, 0x30, 0xfc, 0x50, 0xd1, 0xd1, 0x43, 0x6b, 0x29,
0x35, 0x72, 0xde, 0x1f, 0x1e, 0x9b, 0xdd, 0xb6, 0x31, 0x2a, 0x72, 0x09, 0x1f, 0xdd, 0x8b, 0x5c,
0x17, 0x3c, 0x0e, 0xc2, 0x0d, 0x97, 0x12, 0x63, 0x23, 0x10, 0x45, 0xc7, 0xe6, 0x6f, 0x3f, 0xa8,
0x93, 0x57, 0x55, 0xae, 0xfa, 0xef, 0xaf, 0xe1, 0x69, 0xb6, 0xd9, 0x2a, 0x11, 0x1e, 0x14, 0x4d,
0x4c, 0xd1, 0x87, 0x4d, 0xb6, 0x57, 0xf5, 0x06, 0x9e, 0xb7, 0x1c, 0x82, 0x6a, 0x2a, 0x91, 0x99,
0x94, 0xd2, 0x3c, 0xc9, 0x14, 0x75, 0x5c, 0xdb, 0x3b, 0x62, 0x67, 0xed, 0x99, 0xab, 0xea, 0xc8,
0x4d, 0x7b, 0xa2, 0x94, 0xb0, 0xda, 0xf0, 0x3c, 0x52, 0x81, 0x2c, 0x12, 0x0a, 0xae, 0xe5, 0x0d,
0x99, 0x53, 0x21, 0xef, 0x8a, 0x84, 0x2c, 0xe1, 0x44, 0x69, 0x9e, 0xeb, 0x20, 0x4b, 0x95, 0xe9,
0xa0, 0xe8, 0xd4, 0x0c, 0xc5, 0x7d, 0x4c, 0xab, 0x3e, 0xd7, 0xdc, 0x48, 0x75, 0x6e, 0x0a, 0x57,
0x4d, 0x1d, 0x61, 0x70, 0x1a, 0xa6, 0x52, 0x09, 0xa5, 0x51, 0x86, 0xdb, 0x20, 0xc6, 0x7b, 0x8c,
0xe9, 0xcc, 0xb5, 0xbc, 0xf9, 0xbe, 0x28, 0xea, 0x66, 0x57, 0xbb, 0xd3, 0x3f, 0x95, 0x87, 0xd9,
0x93, 0x70, 0x0f, 0x21, 0x6f, 0x00, 0x5a, 0x6e, 0x8a, 0x1e, 0x3f, 0x74, 0x33, 0xb3, 0xae, 0x55,
0x2b, 0x87, 0x72, 0x5b, 0x9d, 0x9a, 0xc5, 0x9f, 0x16, 0x1c, 0xf7, 0xb2, 0xc4, 0x85, 0x69, 0x47,
0x3d, 0xb5, 0x94, 0xbb, 0x10, 0xf9, 0x1c, 0x8e, 0x7b, 0xca, 0x31, 0xd2, 0x76, 0x58, 0x1f, 0x24,
0xdf, 0xc3, 0xc7, 0xff, 0xb3, 0x9b, 0x5a, 0xca, 0xcf, 0x1e, 0x5d, 0xcd, 0xe2, 0x8f, 0x01, 0x3c,
0xb9, 0xc6, 0xbb, 0x04, 0xa5, 0xde, 0xbd, 0xd2, 0x05, 0xcc, 0xc2, 0xdd, 0x83, 0x6b, 0x6e, 0xd7,
0xc3, 0xf6, 0x09, 0x0c, 0x0e, 0x09, 0x3c, 0x07, 0x47, 0xd5, 0x9d, 0x7d, 0x73, 0x11, 0x9b, 0xed,
0x80, 0xca, 0x09, 0x4a, 0x39, 0xfb, 0xc6, 0x56, 0x8c, 0x13, 0x98, 0xb0, 0xeb, 0x04, 0xc3, 0xbe,
0xa1, 0x51, 0x18, 0xaf, 0x0b, 0x61, 0x6a, 0x46, 0x55, 0xa6, 0x0e, 0xc9, 0x0b, 0x98, 0xa1, 0xe4,
0xeb, 0x18, 0xab, 0x57, 0x45, 0xc7, 0xc6, 0xa9, 0xa6, 0x15, 0x66, 0x88, 0xed, 0x3f, 0xf2, 0xc9,
0x81, 0x5b, 0xfd, 0x6b, 0x75, 0x7d, 0xe6, 0x67, 0xd4, 0xfc, 0xbd, 0xfb, 0xcc, 0xa7, 0x00, 0xed,
0x84, 0x1a, 0x97, 0xe9, 0x20, 0xe4, 0x65, 0xc7, 0x63, 0x02, 0xcd, 0xef, 0x1a, 0x8f, 0xd9, 0x89,
0xe2, 0x86, 0xdf, 0xa9, 0x03, 0xbb, 0x1a, 0x1d, 0xda, 0xd5, 0xe2, 0xaf, 0x92, 0x6d, 0x8e, 0x11,
0x4a, 0x2d, 0x78, 0x6c, 0xd6, 0x7e, 0x06, 0x93, 0x42, 0x61, 0xde, 0xf9, 0xcc, 0xb4, 0x31, 0x79,
0x05, 0x04, 0x65, 0x98, 0x6f, 0xb3, 0x52, 0x5f, 0x19, 0x57, 0xea, 0xb7, 0x34, 0x8f, 0x6a, 0x49,
0x9e, 0xb6, 0x99, 0x55, 0x9d, 0x20, 0x4f, 0x61, 0xa4, 0x51, 0x72, 0xa9, 0x0d, 0x49, 0x87, 0xd5,
0x11, 0x79, 0x06, 0x13, 0xa1, 0x02, 0x55, 0x64, 0x98, 0x37, 0x5f, 0x13, 0xa1, 0xae, 0xcb, 0x90,
0x7c, 0x09, 0x27, 0x6a, 0xc3, 0x2f, 0xbf, 0xf9, 0x76, 0xd7, 0x7e, 0x68, 0x6a, 0xe7, 0x15, 0xdc,
0xf4, 0xfe, 0xe1, 0xf5, 0xaf, 0x5f, 0xdd, 0x09, 0xbd, 0x29, 0xd6, 0xe5, 0x13, 0xbe, 0xa8, 0x16,
0xf0, 0x4a, 0xa4, 0xf5, 0xaf, 0x0b, 0x21, 0x75, 0x79, 0xe7, 0xf8, 0xc2, 0xec, 0xe4, 0xa2, 0x7c,
0x99, 0xd9, 0x7a, 0x3d, 0x32, 0xd1, 0xeb, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x26, 0x75, 0x94,
0x72, 0x9b, 0x07, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@ import (
v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/kv"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -76,7 +77,7 @@ func (p *proxyManager) DelSessionFunc(fns ...func(*sessionutil.Session)) {
// WatchProxy starts a goroutine to watch proxy session changes on etcd
func (p *proxyManager) WatchProxy() error {
ctx, cancel := context.WithTimeout(p.ctx, RequestTimeout)
ctx, cancel := context.WithTimeout(p.ctx, kv.RequestTimeout)
defer cancel()
sessions, rev, err := p.getSessionsOnEtcd(ctx)
@ -209,7 +210,7 @@ func (p *proxyManager) Stop() {
// listProxyInEtcd helper function lists proxy in etcd
func listProxyInEtcd(ctx context.Context, cli *clientv3.Client) (map[int64]*sessionutil.Session, error) {
ctx2, cancel := context.WithTimeout(ctx, RequestTimeout)
ctx2, cancel := context.WithTimeout(ctx, kv.RequestTimeout)
defer cancel()
resp, err := cli.Get(
ctx2,

View File

@ -31,26 +31,24 @@ import (
"time"
"github.com/golang/protobuf/proto"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/allocator"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/kv"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/log"
kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/metrics"
ms "github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/tso"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util"
@ -64,6 +62,8 @@ import (
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
)
// UniqueID is an alias of typeutil.UniqueID.
@ -133,7 +133,7 @@ type Core struct {
CallGetRecoveryInfoService func(ctx context.Context, collID, partID UniqueID) ([]*datapb.SegmentBinlogs, error)
//call index builder's client to build index, return build id or get index state.
CallBuildIndexService func(ctx context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (typeutil.UniqueID, error)
CallBuildIndexService func(ctx context.Context, segID UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (typeutil.UniqueID, error)
CallDropIndexService func(ctx context.Context, indexID typeutil.UniqueID) error
CallRemoveIndexService func(ctx context.Context, buildIDs []UniqueID) error
CallGetIndexStatesService func(ctx context.Context, IndexBuildIDs []int64) ([]*indexpb.IndexInfo, error)
@ -383,13 +383,12 @@ func (c *Core) recycleDroppedIndex() {
return
case <-ticker.C:
droppedIndex := c.MetaTable.GetDroppedIndex()
for collID, fieldIndexes := range droppedIndex {
for _, fieldIndex := range fieldIndexes {
indexID := fieldIndex.GetIndexID()
fieldID := fieldIndex.GetFiledID()
for collID, idxIDs := range droppedIndex {
for _, indexID := range idxIDs {
if err := c.CallDropIndexService(c.ctx, indexID); err != nil {
log.Warn("Notify IndexCoord to drop index failed, wait to retry", zap.Int64("collID", collID),
zap.Int64("fieldID", fieldID), zap.Int64("indexID", indexID))
log.Warn("Notify IndexCoord to drop index failed, wait to retry",
zap.Int64("collID", collID),
zap.Int64("indexID", indexID))
}
}
}
@ -408,27 +407,29 @@ func (c *Core) createIndexForSegment(ctx context.Context, collID, partID, segID
log.Error("collection meta is not exist", zap.Int64("collID", collID))
return fmt.Errorf("collection meta is not exist with ID = %d", collID)
}
if len(collMeta.FieldIndexes) == 0 {
if len(collMeta.FieldIDToIndexID) == 0 {
log.Info("collection has no index, no need to build index on segment", zap.Int64("collID", collID),
zap.Int64("segID", segID))
return nil
}
for _, fieldIndex := range collMeta.FieldIndexes {
indexMeta, ok := indexID2Meta[fieldIndex.IndexID]
for _, t := range collMeta.FieldIDToIndexID {
fieldID := t.Key
indexID := t.Value
indexMeta, ok := indexID2Meta[indexID]
if !ok {
log.Warn("index has no meta", zap.Int64("collID", collID), zap.Int64("indexID", fieldIndex.IndexID))
return fmt.Errorf("index has no meta with ID = %d in collection %d", fieldIndex.IndexID, collID)
log.Warn("index has no meta", zap.Int64("collID", collID), zap.Int64("indexID", indexID))
return fmt.Errorf("index has no meta with ID = %d in collection %d", indexID, collID)
}
if indexMeta.Deleted {
if indexMeta.IsDeleted {
log.Info("index has been deleted, no need to build index on segment")
continue
}
field, err := GetFieldSchemaByID(&collMeta, fieldIndex.FiledID)
field, err := GetFieldSchemaByID(&collMeta, fieldID)
if err != nil {
log.Error("GetFieldSchemaByID failed",
zap.Int64("collectionID", collID),
zap.Int64("fieldID", fieldIndex.FiledID), zap.Error(err))
zap.Int64("fieldID", fieldID))
return err
}
if c.MetaTable.IsSegmentIndexed(segID, field, indexMeta.IndexParams) {
@ -440,15 +441,23 @@ func (c *Core) createIndexForSegment(ctx context.Context, collID, partID, segID
return err
}
segIndexInfo := etcdpb.SegmentIndexInfo{
CollectionID: collMeta.ID,
PartitionID: partID,
SegmentID: segID,
FieldID: fieldIndex.FiledID,
IndexID: fieldIndex.IndexID,
EnableIndex: false,
CreateTime: createTS,
indexInfo := model.Index{
CollectionID: collMeta.CollectionID,
FieldID: fieldID,
IndexID: indexID,
SegmentIndexes: map[int64]model.SegmentIndex{
segID: {
Segment: model.Segment{
PartitionID: partID,
SegmentID: segID,
},
EnableIndex: false,
CreateTime: createTS,
},
},
}
segIndexInfo := indexInfo.SegmentIndexes[segID]
buildID, err := c.BuildIndex(ctx, segID, numRows, binlogs, field, &indexMeta, false)
if err != nil {
log.Debug("build index failed",
@ -463,15 +472,15 @@ func (c *Core) createIndexForSegment(ctx context.Context, collID, partID, segID
segIndexInfo.EnableIndex = true
}
if err := c.MetaTable.AddIndex(&segIndexInfo); err != nil {
log.Error("Add index into meta table failed, need remove index with buildID",
zap.Int64("collectionID", collID), zap.Int64("indexID", fieldIndex.IndexID),
if err := c.MetaTable.AlterIndex(&indexInfo); err != nil {
log.Error("alter index into meta table failed, need remove index with buildID",
zap.Int64("collectionID", collID), zap.Int64("indexID", indexID),
zap.Int64("buildID", buildID), zap.Error(err))
if err = retry.Do(ctx, func() error {
return c.CallRemoveIndexService(ctx, []UniqueID{buildID})
}); err != nil {
log.Error("remove index failed, need to be resolved manually", zap.Int64("collectionID", collID),
zap.Int64("indexID", fieldIndex.IndexID), zap.Int64("buildID", buildID), zap.Error(err))
zap.Int64("indexID", indexID), zap.Int64("buildID", buildID), zap.Error(err))
return err
}
return err
@ -483,29 +492,29 @@ func (c *Core) createIndexForSegment(ctx context.Context, collID, partID, segID
func (c *Core) checkFlushedSegments(ctx context.Context) {
collID2Meta := c.MetaTable.dupCollectionMeta()
for collID, collMeta := range collID2Meta {
if len(collMeta.FieldIndexes) == 0 {
if len(collMeta.FieldIDToIndexID) == 0 {
continue
}
for _, partID := range collMeta.PartitionIDs {
segBinlogs, err := c.CallGetRecoveryInfoService(ctx, collMeta.ID, partID)
for _, part := range collMeta.Partitions {
segBinlogs, err := c.CallGetRecoveryInfoService(ctx, collMeta.CollectionID, part.PartitionID)
if err != nil {
log.Debug("failed to get flushed segments from dataCoord",
zap.Int64("collection ID", collMeta.GetID()),
zap.Int64("partition ID", partID),
zap.Int64("collection ID", collMeta.CollectionID),
zap.Int64("partition ID", part.PartitionID),
zap.Error(err))
continue
}
segIDs := make(map[UniqueID]struct{})
for _, segBinlog := range segBinlogs {
segIDs[segBinlog.GetSegmentID()] = struct{}{}
err = c.createIndexForSegment(ctx, collID, partID, segBinlog.GetSegmentID(), segBinlog.GetNumOfRows(), segBinlog.GetFieldBinlogs())
err = c.createIndexForSegment(ctx, collID, part.PartitionID, segBinlog.GetSegmentID(), segBinlog.GetNumOfRows(), segBinlog.GetFieldBinlogs())
if err != nil {
log.Error("createIndexForSegment failed, wait to retry", zap.Int64("collID", collID),
zap.Int64("partID", partID), zap.Int64("segID", segBinlog.GetSegmentID()), zap.Error(err))
zap.Int64("partID", part.PartitionID), zap.Int64("segID", segBinlog.GetSegmentID()), zap.Error(err))
continue
}
}
recycledSegIDs, recycledBuildIDs := c.MetaTable.AlignSegmentsMeta(collID, partID, segIDs)
recycledSegIDs, recycledBuildIDs := c.MetaTable.AlignSegmentsMeta(collID, part.PartitionID, segIDs)
log.Info("there buildIDs should be remove index", zap.Int64s("buildIDs", recycledBuildIDs))
if len(recycledBuildIDs) > 0 {
if err := c.CallRemoveIndexService(ctx, recycledBuildIDs); err != nil {
@ -514,9 +523,8 @@ func (c *Core) checkFlushedSegments(ctx context.Context) {
continue
}
}
if err := c.MetaTable.RemoveSegments(collID, partID, recycledSegIDs); err != nil {
log.Warn("remove segments failed, wait to retry", zap.Int64("collID", collID), zap.Int64("partID", partID),
if err := c.MetaTable.RemoveSegments(collID, part.PartitionID, recycledSegIDs); err != nil {
log.Warn("remove segments failed, wait to retry", zap.Int64("collID", collID), zap.Int64("partID", part.PartitionID),
zap.Int64s("segIDs", recycledSegIDs), zap.Error(err))
continue
}
@ -531,16 +539,16 @@ func (c *Core) getSegments(ctx context.Context, collID typeutil.UniqueID) (map[U
}
segID2PartID := make(map[UniqueID]UniqueID)
segID2Binlog := make(map[UniqueID]*datapb.SegmentBinlogs)
for _, partID := range collMeta.PartitionIDs {
if segs, err := c.CallGetRecoveryInfoService(ctx, collID, partID); err == nil {
for _, part := range collMeta.Partitions {
if segs, err := c.CallGetRecoveryInfoService(ctx, collID, part.PartitionID); err == nil {
for _, s := range segs {
segID2PartID[s.SegmentID] = partID
segID2PartID[s.SegmentID] = part.PartitionID
segID2Binlog[s.SegmentID] = s
}
} else {
log.Error("failed to get flushed segments info from dataCoord",
zap.Int64("collection ID", collID),
zap.Int64("partition ID", partID),
zap.Int64("partition ID", part.PartitionID),
zap.Error(err))
return nil, nil, err
}
@ -838,7 +846,7 @@ func (c *Core) SetIndexCoord(s types.IndexCoord) error {
}
}()
c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (retID typeutil.UniqueID, retErr error) {
c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (retID typeutil.UniqueID, retErr error) {
defer func() {
if err := recover(); err != nil {
retErr = fmt.Errorf("build index panic, msg = %v", err)
@ -852,7 +860,7 @@ func (c *Core) SetIndexCoord(s types.IndexCoord) error {
IndexID: idxInfo.IndexID,
IndexName: idxInfo.IndexName,
NumRows: numRows,
FieldSchema: field,
FieldSchema: model.MarshalFieldModel(field),
SegmentID: segID,
})
if err != nil {
@ -1019,14 +1027,14 @@ func (c *Core) SetQueryCoord(s types.QueryCoord) error {
}
// BuildIndex will check row num and call build index service
func (c *Core) BuildIndex(ctx context.Context, segID UniqueID, numRows int64, binlogs []*datapb.FieldBinlog, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, isFlush bool) (typeutil.UniqueID, error) {
func (c *Core) BuildIndex(ctx context.Context, segID UniqueID, numRows int64, binlogs []*datapb.FieldBinlog, field *model.Field, idxInfo *model.Index, isFlush bool) (typeutil.UniqueID, error) {
log.Debug("start build index", zap.String("index name", idxInfo.IndexName),
zap.String("field name", field.Name), zap.Int64("segment id", segID))
sp, ctx := trace.StartSpanFromContext(ctx)
defer sp.Finish()
if c.MetaTable.IsSegmentIndexed(segID, field, idxInfo.IndexParams) {
info, err := c.MetaTable.GetSegmentIndexInfoByID(segID, field.FieldID, idxInfo.GetIndexName())
return info.BuildID, err
info, err := c.MetaTable.GetSegmentIndexInfoByID(segID, field.FieldID, idxInfo.IndexName)
return info.SegmentIndexes[segID].BuildID, err
}
var bldID UniqueID
var err error
@ -1035,7 +1043,7 @@ func (c *Core) BuildIndex(ctx context.Context, segID UniqueID, numRows int64, bi
} else {
binLogs := make([]string, 0)
for _, fieldBinLog := range binlogs {
if fieldBinLog.GetFieldID() == field.GetFieldID() {
if fieldBinLog.GetFieldID() == field.FieldID {
for _, binLog := range fieldBinLog.GetBinlogs() {
binLogs = append(binLogs, binLog.LogPath)
}
@ -1152,12 +1160,13 @@ func (c *Core) Init() error {
log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError))
return initError
}
var ss *suffixSnapshot
if ss, initError = newSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil {
var ss *kvmetestore.SuffixSnapshot
if ss, initError = kvmetestore.NewSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil {
log.Error("RootCoord failed to new suffixSnapshot", zap.Error(initError))
return initError
}
if c.MetaTable, initError = NewMetaTable(metaKV, ss); initError != nil {
if c.MetaTable, initError = NewMetaTable(c.ctx, metaKV, ss); initError != nil {
log.Error("RootCoord failed to new MetaTable", zap.Any("reason", initError))
return initError
}
@ -1336,7 +1345,7 @@ func (c *Core) reSendDdMsg(ctx context.Context, force bool) error {
if err != nil {
return err
}
if _, err = c.MetaTable.GetPartitionByName(collInfo.ID, ddReq.PartitionName, 0); err != nil {
if _, err = c.MetaTable.GetPartitionByName(collInfo.CollectionID, ddReq.PartitionName, 0); err != nil {
if err = c.SendDdCreatePartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
return err
}
@ -1357,7 +1366,7 @@ func (c *Core) reSendDdMsg(ctx context.Context, force bool) error {
if err != nil {
return err
}
if _, err = c.MetaTable.GetPartitionByName(collInfo.ID, ddReq.PartitionName, 0); err == nil {
if _, err = c.MetaTable.GetPartitionByName(collInfo.CollectionID, ddReq.PartitionName, 0); err == nil {
if err = c.SendDdDropPartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
return err
}
@ -2566,7 +2575,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (
// Look up collection name on collection ID.
var colName string
var colMeta *etcdpb.CollectionInfo
var colMeta *model.Collection
if colMeta, err = c.MetaTable.GetCollectionByID(ti.GetCollectionId(), 0); err != nil {
log.Error("failed to get collection name",
zap.Int64("collection ID", ti.GetCollectionId()),
@ -2579,7 +2588,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (
Reason: "failed to get collection name for collection ID" + strconv.FormatInt(ti.GetCollectionId(), 10),
}, nil
}
colName = colMeta.GetSchema().GetName()
colName = colMeta.Name
// When DataNode has done its thing, remove it from the busy node list. And send import task again
resendTaskFunc()
@ -2709,7 +2718,7 @@ func (c *Core) postImportPersistLoop(ctx context.Context, taskID int64, colID in
log.Error("failed to find meta for collection",
zap.Int64("collection ID", colID),
zap.Error(err))
} else if len(colMeta.GetFieldIndexes()) == 0 {
} else if len(colMeta.FieldIDToIndexID) == 0 {
log.Info("no index field found for collection", zap.Int64("collection ID", colID))
} else {
log.Info("start checking index state", zap.Int64("collection ID", colID))

View File

@ -28,19 +28,16 @@ import (
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/kv"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
memkv "github.com/milvus-io/milvus/internal/kv/mem"
"github.com/milvus-io/milvus/internal/log"
kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
@ -57,6 +54,9 @@ import (
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"
)
const (
@ -473,7 +473,7 @@ func getNotTtMsg(ctx context.Context, n int, ch <-chan *msgstream.MsgPack) []msg
}
}
func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32, modifyFunc func(*etcdpb.CollectionInfo)) error {
func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32, modifyFunc func(collection *model.Collection)) error {
schema := schemapb.CollectionSchema{
Name: collName,
}
@ -534,24 +534,29 @@ func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32
chanNames[i] = funcutil.ToPhysicalChannel(vchanNames[i])
}
collInfo := etcdpb.CollectionInfo{
ID: collID,
Schema: &schema,
PartitionIDs: []typeutil.UniqueID{partID},
PartitionNames: []string{Params.CommonCfg.DefaultPartitionName},
FieldIndexes: make([]*etcdpb.FieldIndexInfo, 0, 16),
VirtualChannelNames: vchanNames,
PhysicalChannelNames: chanNames,
ShardsNum: 0, // intend to set zero
PartitionCreatedTimestamps: []uint64{0},
collInfo := model.Collection{
CollectionID: collID,
Name: schema.Name,
Description: schema.Description,
AutoID: schema.AutoID,
Fields: model.UnmarshalFieldModels(schema.Fields),
FieldIDToIndexID: make([]common.Int64Tuple, 0, 16),
VirtualChannelNames: vchanNames,
PhysicalChannelNames: chanNames,
ShardsNum: 0, // intend to set zero
Partitions: []*model.Partition{
{
PartitionID: partID,
PartitionName: Params.CommonCfg.DefaultPartitionName,
PartitionCreatedTimestamp: 0,
},
},
}
if modifyFunc != nil {
modifyFunc(&collInfo)
}
idxInfo := make([]*etcdpb.IndexInfo, 0, 16)
// schema is modified (add RowIDField and TimestampField),
// so need Marshal again
schemaBytes, err := proto.Marshal(&schema)
@ -596,7 +601,7 @@ func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32
// clear ddl timetick in all conditions
defer core.chanTimeTick.removeDdlTimeTick(ts, reason)
err = core.MetaTable.AddCollection(&collInfo, ts, idxInfo, ddOpStr)
err = core.MetaTable.AddCollection(&collInfo, ts, ddOpStr)
if err != nil {
return fmt.Errorf("meta table add collection failed,error = %w", err)
}
@ -773,22 +778,21 @@ func TestRootCoordInitData(t *testing.T) {
err = core.MetaTable.DeleteCredential(util.UserRoot)
assert.NoError(t, err)
snapshotKV, err := newMetaSnapshot(etcdCli, Params.EtcdCfg.MetaRootPath, TimestampPrefix, 7)
snapshotKV, err := kvmetestore.NewMetaSnapshot(etcdCli, Params.EtcdCfg.MetaRootPath, TimestampPrefix, 7)
assert.NotNil(t, snapshotKV)
assert.NoError(t, err)
txnKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
mt, err := NewMetaTable(txnKV, snapshotKV)
mt, err := NewMetaTable(context.TODO(), txnKV, snapshotKV)
assert.NoError(t, err)
mockTxnKV := &mockTestTxnKV{
TxnKV: mt.txn,
save: func(key, value string) error { return txnKV.Save(key, value) },
TxnKV: mt.txn,
save: func(key, value string) error {
return fmt.Errorf("save error")
},
remove: func(key string) error { return txnKV.Remove(key) },
}
mt.txn = mockTxnKV
// mock save data error
mockTxnKV.save = func(key, value string) error {
return fmt.Errorf("save error")
}
//mt.txn = mockTxnKV
mt.catalog = &kvmetestore.Catalog{Txn: mockTxnKV, Snapshot: snapshotKV}
core.MetaTable = mt
err = core.initData()
assert.Error(t, err)
@ -955,20 +959,19 @@ func TestRootCoord_Base(t *testing.T) {
dmlStream.Start()
pChanMap := core.MetaTable.ListCollectionPhysicalChannels()
assert.Greater(t, len(pChanMap[createMeta.ID]), 0)
assert.Greater(t, len(pChanMap[createMeta.CollectionID]), 0)
vChanMap := core.MetaTable.ListCollectionVirtualChannels()
assert.Greater(t, len(vChanMap[createMeta.ID]), 0)
assert.Greater(t, len(vChanMap[createMeta.CollectionID]), 0)
// get CreateCollectionMsg
msgs := getNotTtMsg(ctx, 1, dmlStream.Chan())
assert.Equal(t, 1, len(msgs))
createMsg, ok := (msgs[0]).(*msgstream.CreateCollectionMsg)
assert.True(t, ok)
assert.Equal(t, createMeta.ID, createMsg.CollectionID)
assert.Equal(t, 1, len(createMeta.PartitionIDs))
assert.Equal(t, createMeta.PartitionIDs[0], createMsg.PartitionID)
assert.Equal(t, 1, len(createMeta.PartitionNames))
assert.Equal(t, createMeta.PartitionNames[0], createMsg.PartitionName)
assert.Equal(t, createMeta.CollectionID, createMsg.CollectionID)
assert.Equal(t, 1, len(createMeta.Partitions))
assert.Equal(t, createMeta.Partitions[0].PartitionID, createMsg.PartitionID)
assert.Equal(t, createMeta.Partitions[0].PartitionName, createMsg.PartitionName)
assert.Equal(t, shardsNum, int32(len(createMeta.VirtualChannelNames)))
assert.Equal(t, shardsNum, int32(len(createMeta.PhysicalChannelNames)))
assert.Equal(t, shardsNum, createMeta.ShardsNum)
@ -994,23 +997,6 @@ func TestRootCoord_Base(t *testing.T) {
}
core.chanTimeTick.lock.Unlock()
// check DD operation info
flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix)
assert.NoError(t, err)
assert.Equal(t, "true", flag)
ddOpStr, err := core.MetaTable.txn.Load(DDOperationPrefix)
assert.NoError(t, err)
var ddOp DdOperation
err = DecodeDdOperation(ddOpStr, &ddOp)
assert.NoError(t, err)
assert.Equal(t, CreateCollectionDDType, ddOp.Type)
var ddCollReq = internalpb.CreateCollectionRequest{}
err = proto.Unmarshal(ddOp.Body, &ddCollReq)
assert.NoError(t, err)
assert.Equal(t, createMeta.ID, ddCollReq.CollectionID)
assert.Equal(t, createMeta.PartitionIDs[0], ddCollReq.PartitionID)
// check invalid operation
req.Base.MsgID = 101
req.Base.Timestamp = 101
@ -1111,7 +1097,7 @@ func TestRootCoord_Base(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, collName, rsp.Schema.Name)
assert.Equal(t, collMeta.ID, rsp.CollectionID)
assert.Equal(t, collMeta.CollectionID, rsp.CollectionID)
assert.Equal(t, shardsNum, int32(len(rsp.VirtualChannelNames)))
assert.Equal(t, shardsNum, int32(len(rsp.PhysicalChannelNames)))
assert.Equal(t, shardsNum, rsp.ShardsNum)
@ -1157,8 +1143,8 @@ func TestRootCoord_Base(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(collMeta.PartitionIDs))
partNameIdx1, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[1], 0)
assert.Equal(t, 2, len(collMeta.Partitions))
partNameIdx1, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0)
assert.NoError(t, err)
assert.Equal(t, partName, partNameIdx1)
@ -1166,28 +1152,11 @@ func TestRootCoord_Base(t *testing.T) {
assert.Equal(t, 1, len(msgs))
partMsg, ok := (msgs[0]).(*msgstream.CreatePartitionMsg)
assert.True(t, ok)
assert.Equal(t, collMeta.ID, partMsg.CollectionID)
assert.Equal(t, collMeta.PartitionIDs[1], partMsg.PartitionID)
assert.Equal(t, collMeta.CollectionID, partMsg.CollectionID)
assert.Equal(t, collMeta.Partitions[1].PartitionID, partMsg.PartitionID)
assert.Equal(t, 1, len(pnm.GetCollIDs()))
assert.Equal(t, collMeta.ID, pnm.GetCollIDs()[0])
// check DD operation info
flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix)
assert.NoError(t, err)
assert.Equal(t, "true", flag)
ddOpStr, err := core.MetaTable.txn.Load(DDOperationPrefix)
assert.NoError(t, err)
var ddOp DdOperation
err = DecodeDdOperation(ddOpStr, &ddOp)
assert.NoError(t, err)
assert.Equal(t, CreatePartitionDDType, ddOp.Type)
var ddReq = internalpb.CreatePartitionRequest{}
err = proto.Unmarshal(ddOp.Body, &ddReq)
assert.NoError(t, err)
assert.Equal(t, collMeta.ID, ddReq.CollectionID)
assert.Equal(t, collMeta.PartitionIDs[1], ddReq.PartitionID)
assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[0])
err = core.reSendDdMsg(core.ctx, true)
assert.NoError(t, err)
@ -1227,7 +1196,7 @@ func TestRootCoord_Base(t *testing.T) {
},
DbName: dbName,
CollectionName: collName,
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
}
rsp, err := core.ShowPartitions(ctx, req)
assert.NoError(t, err)
@ -1241,7 +1210,7 @@ func TestRootCoord_Base(t *testing.T) {
defer wg.Done()
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
partID := coll.PartitionIDs[1]
partID := coll.Partitions[1].PartitionID
dm.mu.Lock()
dm.segs = []typeutil.UniqueID{1000, 1001, 1002, 1003, 1004, 1005}
dm.mu.Unlock()
@ -1253,7 +1222,7 @@ func TestRootCoord_Base(t *testing.T) {
Timestamp: 170,
SourceID: 170,
},
CollectionID: coll.GetID(),
CollectionID: coll.CollectionID,
PartitionID: partID,
}
rsp, err := core.ShowSegments(ctx, req)
@ -1290,7 +1259,7 @@ func TestRootCoord_Base(t *testing.T) {
}
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
assert.Equal(t, 0, len(collMeta.FieldIndexes))
assert.Equal(t, 0, len(collMeta.FieldIDToIndexID))
rsp, err := core.CreateIndex(ctx, req)
assert.NoError(t, err)
@ -1307,8 +1276,8 @@ func TestRootCoord_Base(t *testing.T) {
"file0-100", "file1-100", "file2-100"})
collMeta, err = core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(collMeta.FieldIndexes))
idxMeta, err := core.MetaTable.GetIndexByID(collMeta.FieldIndexes[0].IndexID)
assert.Equal(t, 1, len(collMeta.FieldIDToIndexID))
idxMeta, err := core.MetaTable.GetIndexByID(collMeta.FieldIDToIndexID[0].Value)
assert.NoError(t, err)
assert.Equal(t, Params.CommonCfg.DefaultIndexName, idxMeta.IndexName)
@ -1331,7 +1300,7 @@ func TestRootCoord_Base(t *testing.T) {
Timestamp: 190,
SourceID: 190,
},
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
SegmentID: 1000,
}
rsp, err := core.DescribeSegment(ctx, req)
@ -1391,24 +1360,24 @@ func TestRootCoord_Base(t *testing.T) {
assert.NoError(t, err)
// Normal case.
done, err := core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, ""),
collName, coll.ID, []UniqueID{1000, 1001, 1002})
collName, coll.CollectionID, []UniqueID{1000, 1001, 1002})
assert.NoError(t, err)
assert.Equal(t, true, done)
// Case with an empty result.
done, err = core.CountCompleteIndex(ctx, collName, coll.ID, []UniqueID{})
done, err = core.CountCompleteIndex(ctx, collName, coll.CollectionID, []UniqueID{})
assert.NoError(t, err)
assert.Equal(t, true, done)
// Case where GetIndexStates failed with error.
_, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, returnError),
collName, coll.ID, []UniqueID{1000, 1001, 1002})
collName, coll.CollectionID, []UniqueID{1000, 1001, 1002})
assert.Error(t, err)
// Case where GetIndexStates failed with bad status.
_, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, returnUnsuccessfulStatus),
collName, coll.ID, []UniqueID{1000, 1001, 1002})
collName, coll.CollectionID, []UniqueID{1000, 1001, 1002})
assert.Error(t, err)
// Case where describing segment fails, which is not considered as an error.
_, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, ""),
collName, coll.ID, []UniqueID{9000, 9001, 9002})
collName, coll.CollectionID, []UniqueID{9000, 9001, 9002})
assert.NoError(t, err)
})
@ -1417,7 +1386,7 @@ func TestRootCoord_Base(t *testing.T) {
defer wg.Done()
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
partID := coll.PartitionIDs[1]
partID := coll.Partitions[1].PartitionID
flushMsg := datapb.SegmentFlushCompletedMsg{
Base: &commonpb.MsgBase{
@ -1425,7 +1394,7 @@ func TestRootCoord_Base(t *testing.T) {
},
Segment: &datapb.SegmentInfo{
ID: segID,
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
PartitionID: partID,
},
}
@ -1455,7 +1424,7 @@ func TestRootCoord_Base(t *testing.T) {
t.Run("flush segment from compaction", func(t *testing.T) {
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
partID := coll.PartitionIDs[1]
partID := coll.Partitions[1].PartitionID
flushMsg := datapb.SegmentFlushCompletedMsg{
Base: &commonpb.MsgBase{
@ -1463,7 +1432,7 @@ func TestRootCoord_Base(t *testing.T) {
},
Segment: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
PartitionID: partID,
CompactionFrom: []int64{segID},
CreatedByCompaction: true,
@ -1508,7 +1477,7 @@ func TestRootCoord_Base(t *testing.T) {
}
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
core.MetaTable.collName2ID[collName] = coll.GetID()
core.MetaTable.collName2ID[collName] = coll.CollectionID
rsp, err := core.Import(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
@ -1564,7 +1533,7 @@ func TestRootCoord_Base(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeSegment,
},
CollectionID: coll.ID,
CollectionID: coll.CollectionID,
SegmentID: segmentID,
}
segDesc, err := core.DescribeSegment(ctx, describeSegmentRequest)
@ -1596,10 +1565,15 @@ func TestRootCoord_Base(t *testing.T) {
return tID, 0, nil
}
core.MetaTable.collName2ID["new"+collName] = 123
core.MetaTable.collID2Meta[123] = etcdpb.CollectionInfo{
ID: 123,
PartitionIDs: []int64{456},
PartitionNames: []string{"testPartition"}}
core.MetaTable.collID2Meta[123] = model.Collection{
CollectionID: 123,
Partitions: []*model.Partition{
{
PartitionID: 456,
PartitionName: "testPartition",
},
},
}
req := &milvuspb.ImportRequest{
CollectionName: "new" + collName,
PartitionName: partName,
@ -1741,7 +1715,7 @@ func TestRootCoord_Base(t *testing.T) {
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(collMeta.FieldIndexes))
assert.Equal(t, 1, len(collMeta.FieldIDToIndexID))
rsp, err := core.CreateIndex(ctx, req)
assert.NoError(t, err)
@ -1803,14 +1777,14 @@ func TestRootCoord_Base(t *testing.T) {
}
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
dropPartID := collMeta.PartitionIDs[1]
dropPartID := collMeta.Partitions[1].PartitionID
status, err := core.DropPartition(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
collMeta, err = core.MetaTable.GetCollectionByName(collName, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(collMeta.PartitionIDs))
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[0], 0)
assert.Equal(t, 1, len(collMeta.Partitions))
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0)
assert.NoError(t, err)
assert.Equal(t, Params.CommonCfg.DefaultPartitionName, partName)
@ -1818,28 +1792,11 @@ func TestRootCoord_Base(t *testing.T) {
assert.Equal(t, 1, len(msgs))
dmsg, ok := (msgs[0]).(*msgstream.DropPartitionMsg)
assert.True(t, ok)
assert.Equal(t, collMeta.ID, dmsg.CollectionID)
assert.Equal(t, collMeta.CollectionID, dmsg.CollectionID)
assert.Equal(t, dropPartID, dmsg.PartitionID)
assert.Equal(t, 2, len(pnm.GetCollIDs()))
assert.Equal(t, collMeta.ID, pnm.GetCollIDs()[1])
// check DD operation info
flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix)
assert.NoError(t, err)
assert.Equal(t, "true", flag)
ddOpStr, err := core.MetaTable.txn.Load(DDOperationPrefix)
assert.NoError(t, err)
var ddOp DdOperation
err = DecodeDdOperation(ddOpStr, &ddOp)
assert.NoError(t, err)
assert.Equal(t, DropPartitionDDType, ddOp.Type)
var ddReq = internalpb.DropPartitionRequest{}
err = proto.Unmarshal(ddOp.Body, &ddReq)
assert.NoError(t, err)
assert.Equal(t, collMeta.ID, ddReq.CollectionID)
assert.Equal(t, dropPartID, ddReq.PartitionID)
assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[1])
err = core.reSendDdMsg(core.ctx, true)
assert.NoError(t, err)
@ -1856,7 +1813,7 @@ func TestRootCoord_Base(t *testing.T) {
MsgType: commonpb.MsgType_RemoveQueryChannels,
SourceID: core.session.ServerID,
},
CollectionID: collMeta.ID,
CollectionID: collMeta.CollectionID,
}
status, err := core.ReleaseDQLMessageStream(core.ctx, req)
assert.NoError(t, err)
@ -1889,15 +1846,15 @@ func TestRootCoord_Base(t *testing.T) {
assert.Equal(t, 1, len(msgs))
dmsg, ok := (msgs[0]).(*msgstream.DropCollectionMsg)
assert.True(t, ok)
assert.Equal(t, collMeta.ID, dmsg.CollectionID)
assert.Equal(t, collMeta.CollectionID, dmsg.CollectionID)
collIDs := pnm.GetCollIDs()
assert.Equal(t, 3, len(collIDs))
assert.Equal(t, collMeta.ID, collIDs[2])
assert.Equal(t, collMeta.CollectionID, collIDs[2])
time.Sleep(100 * time.Millisecond)
qm.mutex.Lock()
assert.Equal(t, 1, len(qm.collID))
assert.Equal(t, collMeta.ID, qm.collID[0])
assert.Equal(t, collMeta.CollectionID, qm.collID[0])
qm.mutex.Unlock()
req = &milvuspb.DropCollectionRequest{
@ -1916,23 +1873,7 @@ func TestRootCoord_Base(t *testing.T) {
time.Sleep(100 * time.Millisecond)
collIDs = pnm.GetCollIDs()
assert.Equal(t, 3, len(collIDs))
assert.Equal(t, collMeta.ID, collIDs[2])
// check DD operation info
flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix)
assert.NoError(t, err)
assert.Equal(t, "true", flag)
ddOpStr, err := core.MetaTable.txn.Load(DDOperationPrefix)
assert.NoError(t, err)
var ddOp DdOperation
err = DecodeDdOperation(ddOpStr, &ddOp)
assert.NoError(t, err)
assert.Equal(t, DropCollectionDDType, ddOp.Type)
var ddReq = internalpb.DropCollectionRequest{}
err = proto.Unmarshal(ddOp.Body, &ddReq)
assert.NoError(t, err)
assert.Equal(t, collMeta.ID, ddReq.CollectionID)
assert.Equal(t, collMeta.CollectionID, collIDs[2])
err = core.reSendDdMsg(core.ctx, true)
assert.NoError(t, err)
@ -2320,7 +2261,7 @@ func TestRootCoord_Base(t *testing.T) {
p2 := sessionutil.Session{
ServerID: 101,
}
ctx2, cancel2 := context.WithTimeout(ctx, RequestTimeout)
ctx2, cancel2 := context.WithTimeout(ctx, kvmetestore.RequestTimeout)
defer cancel2()
s1, err := json.Marshal(&p1)
assert.NoError(t, err)
@ -2985,7 +2926,7 @@ func TestRootCoord2(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, collName, rsp.Schema.Name)
assert.Equal(t, collMeta.ID, rsp.CollectionID)
assert.Equal(t, collMeta.CollectionID, rsp.CollectionID)
assert.Equal(t, common.DefaultShardsNum, int32(len(rsp.VirtualChannelNames)))
assert.Equal(t, common.DefaultShardsNum, int32(len(rsp.PhysicalChannelNames)))
assert.Equal(t, common.DefaultShardsNum, rsp.ShardsNum)
@ -3083,7 +3024,7 @@ func TestCheckInit(t *testing.T) {
err = c.checkInit()
assert.Error(t, err)
c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (typeutil.UniqueID, error) {
c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (typeutil.UniqueID, error) {
return 0, nil
}
err = c.checkInit()
@ -3225,25 +3166,26 @@ func TestCheckFlushedSegments(t *testing.T) {
var segID int64 = 1001
var fieldID int64 = 101
var indexID int64 = 6001
core.MetaTable.segID2IndexMeta[segID] = make(map[int64]etcdpb.SegmentIndexInfo)
core.MetaTable.partID2SegID[partID] = make(map[int64]bool)
core.MetaTable.collID2Meta[collID] = etcdpb.CollectionInfo{ID: collID}
core.MetaTable.partID2IndexedSegID[partID] = make(map[int64]bool)
core.MetaTable.collID2Meta[collID] = model.Collection{CollectionID: collID}
// do nothing, since collection has 0 index
core.checkFlushedSegments(ctx)
// get field schema by id fail
core.MetaTable.collID2Meta[collID] = etcdpb.CollectionInfo{
ID: collID,
PartitionIDs: []int64{partID},
FieldIndexes: []*etcdpb.FieldIndexInfo{
core.MetaTable.collID2Meta[collID] = model.Collection{
CollectionID: collID,
Partitions: []*model.Partition{
{
FiledID: fieldID,
IndexID: indexID,
PartitionID: partID,
},
},
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{},
FieldIDToIndexID: []common.Int64Tuple{
{
Key: fieldID,
Value: indexID,
},
},
Fields: []*model.Field{},
}
core.checkFlushedSegments(ctx)
@ -3259,23 +3201,26 @@ func TestCheckFlushedSegments(t *testing.T) {
core.checkFlushedSegments(core.ctx)
// missing index info
core.MetaTable.collID2Meta[collID] = etcdpb.CollectionInfo{
ID: collID,
PartitionIDs: []int64{partID},
FieldIndexes: []*etcdpb.FieldIndexInfo{
core.MetaTable.collID2Meta[collID] = model.Collection{
CollectionID: collID,
Fields: []*model.Field{
{
FiledID: fieldID,
IndexID: indexID,
FieldID: fieldID,
},
},
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: fieldID,
},
FieldIDToIndexID: []common.Int64Tuple{
{
Key: fieldID,
Value: indexID,
},
},
Partitions: []*model.Partition{
{
PartitionID: partID,
},
},
}
core.checkFlushedSegments(ctx)
// existing segID, buildIndex failed
core.CallGetFlushedSegmentsService = func(_ context.Context, cid, pid int64) ([]int64, error) {
@ -3283,10 +3228,10 @@ func TestCheckFlushedSegments(t *testing.T) {
assert.Equal(t, partID, pid)
return []int64{segID}, nil
}
core.MetaTable.indexID2Meta[indexID] = etcdpb.IndexInfo{
core.MetaTable.indexID2Meta[indexID] = &model.Index{
IndexID: indexID,
}
core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idx *etcdpb.IndexInfo, numRows int64) (int64, error) {
core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *model.Field, idx *model.Index, numRows int64) (int64, error) {
assert.Equal(t, fieldID, field.FieldID)
assert.Equal(t, indexID, idx.IndexID)
return -1, errors.New("build index build")
@ -3295,7 +3240,7 @@ func TestCheckFlushedSegments(t *testing.T) {
core.checkFlushedSegments(ctx)
var indexBuildID int64 = 10001
core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idx *etcdpb.IndexInfo, numRows int64) (int64, error) {
core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *model.Field, idx *model.Index, numRows int64) (int64, error) {
return indexBuildID, nil
}
core.checkFlushedSegments(core.ctx)
@ -3371,7 +3316,7 @@ func TestRootCoord_CheckZeroShardsNum(t *testing.T) {
time.Sleep(100 * time.Millisecond)
modifyFunc := func(collInfo *etcdpb.CollectionInfo) {
modifyFunc := func(collInfo *model.Collection) {
collInfo.ShardsNum = 0
}
@ -3394,7 +3339,7 @@ func TestRootCoord_CheckZeroShardsNum(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, collName, rsp.Schema.Name)
assert.Equal(t, collMeta.ID, rsp.CollectionID)
assert.Equal(t, collMeta.CollectionID, rsp.CollectionID)
assert.Equal(t, shardsNum, int32(len(rsp.VirtualChannelNames)))
assert.Equal(t, shardsNum, int32(len(rsp.PhysicalChannelNames)))
assert.Equal(t, shardsNum, rsp.ShardsNum)
@ -3451,24 +3396,23 @@ func TestCore_DescribeSegments(t *testing.T) {
// success.
c.MetaTable = &MetaTable{
segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{
segID: {
indexID: {
CollectionID: collID,
PartitionID: partID,
SegmentID: segID,
FieldID: fieldID,
IndexID: indexID,
BuildID: buildID,
EnableIndex: true,
},
},
},
indexID2Meta: map[typeutil.UniqueID]etcdpb.IndexInfo{
segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{segID: indexID},
indexID2Meta: map[typeutil.UniqueID]*model.Index{
indexID: {
IndexName: indexName,
IndexID: indexID,
IndexParams: nil,
IndexName: indexName,
IndexID: indexID,
IndexParams: nil,
CollectionID: collID,
FieldID: fieldID,
SegmentIndexes: map[int64]model.SegmentIndex{
segID: {
Segment: model.Segment{
PartitionID: partID,
SegmentID: segID,
},
BuildID: buildID,
EnableIndex: true},
},
},
},
}
@ -3505,7 +3449,7 @@ func TestCore_DescribeSegments(t *testing.T) {
func TestCore_getCollectionName(t *testing.T) {
mt := &MetaTable{
ddLock: sync.RWMutex{},
collID2Meta: make(map[int64]etcdpb.CollectionInfo),
collID2Meta: make(map[int64]model.Collection),
}
core := &Core{
@ -3517,14 +3461,9 @@ func TestCore_getCollectionName(t *testing.T) {
assert.Empty(t, collName)
assert.Empty(t, partName)
ids := make([]int64, 0)
names := make([]string, 0)
mt.collID2Meta[1] = etcdpb.CollectionInfo{
Schema: &schemapb.CollectionSchema{
Name: "dummy",
},
PartitionIDs: ids,
PartitionNames: names,
mt.collID2Meta[1] = model.Collection{
Name: "dummy",
Partitions: make([]*model.Partition, 0),
}
collName, partName, err = core.getCollectionName(1, 2)
@ -3532,14 +3471,14 @@ func TestCore_getCollectionName(t *testing.T) {
assert.Equal(t, "dummy", collName)
assert.Empty(t, partName)
ids = append(ids, 2)
names = append(names, "p2")
mt.collID2Meta[1] = etcdpb.CollectionInfo{
Schema: &schemapb.CollectionSchema{
Name: "dummy",
mt.collID2Meta[1] = model.Collection{
Name: "dummy",
Partitions: []*model.Partition{
{
PartitionID: 2,
PartitionName: "p2",
},
},
PartitionIDs: ids,
PartitionNames: names,
}
collName, partName, err = core.getCollectionName(1, 2)
@ -3556,12 +3495,12 @@ func TestCore_GetIndexState(t *testing.T) {
)
mt := &MetaTable{
ddLock: sync.RWMutex{},
collID2Meta: map[typeutil.UniqueID]etcdpb.CollectionInfo{
collID2Meta: map[typeutil.UniqueID]model.Collection{
1: {
FieldIndexes: []*etcdpb.FieldIndexInfo{
FieldIDToIndexID: []common.Int64Tuple{
{
FiledID: 1,
IndexID: 1,
Key: 1,
Value: 1,
},
},
},
@ -3569,21 +3508,22 @@ func TestCore_GetIndexState(t *testing.T) {
collName2ID: map[string]typeutil.UniqueID{
collName: 2,
},
indexID2Meta: map[typeutil.UniqueID]etcdpb.IndexInfo{
indexID2Meta: map[typeutil.UniqueID]*model.Index{
1: {
IndexID: 1,
IndexName: indexName,
},
},
segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{
3: {
1: {
SegmentID: 3,
BuildID: 1,
EnableIndex: false,
SegmentIndexes: map[int64]model.SegmentIndex{
3: {
Segment: model.Segment{
SegmentID: 3,
},
EnableIndex: false,
BuildID: 1,
},
},
},
},
segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{3: 1},
}
core := &Core{
@ -3622,12 +3562,12 @@ func TestCore_GetIndexState(t *testing.T) {
})
t.Run("CallGetIndexStatesService failed", func(t *testing.T) {
core.MetaTable.segID2IndexMeta[3] = map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{
1: {
SegmentID: 3,
BuildID: 1,
EnableIndex: true,
core.MetaTable.indexID2Meta[1].SegmentIndexes[3] = model.SegmentIndex{
Segment: model.Segment{
SegmentID: 3,
},
EnableIndex: true,
BuildID: 1,
}
core.CallGetIndexStatesService = func(ctx context.Context, IndexBuildIDs []int64) ([]*indexpb.IndexInfo, error) {
return nil, errors.New("error occurred")

View File

@ -24,6 +24,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
model "github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
@ -169,21 +170,6 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
}
}
collInfo := etcdpb.CollectionInfo{
ID: collID,
Schema: &schema,
PartitionIDs: []typeutil.UniqueID{partID},
PartitionNames: []string{Params.CommonCfg.DefaultPartitionName},
FieldIndexes: make([]*etcdpb.FieldIndexInfo, 0, 16),
VirtualChannelNames: vchanNames,
PhysicalChannelNames: chanNames,
ShardsNum: t.Req.ShardsNum,
PartitionCreatedTimestamps: []uint64{0},
ConsistencyLevel: t.Req.ConsistencyLevel,
}
idxInfo := make([]*etcdpb.IndexInfo, 0, 16)
// schema is modified (add RowIDField and TimestampField),
// so need Marshal again
schemaBytes, err := proto.Marshal(&schema)
@ -218,6 +204,27 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
return fmt.Errorf("encodeDdOperation fail, error = %w", err)
}
collInfo := model.Collection{
CollectionID: collID,
Name: schema.Name,
Description: schema.Description,
AutoID: schema.AutoID,
Fields: model.UnmarshalFieldModels(schema.Fields),
VirtualChannelNames: vchanNames,
PhysicalChannelNames: chanNames,
ShardsNum: t.Req.ShardsNum,
ConsistencyLevel: t.Req.ConsistencyLevel,
FieldIDToIndexID: make([]common.Int64Tuple, 0, 16),
CreateTime: ts,
Partitions: []*model.Partition{
{
PartitionID: partID,
PartitionName: Params.CommonCfg.DefaultPartitionName,
PartitionCreatedTimestamp: ts,
},
},
}
// use lambda function here to guarantee all resources to be released
createCollectionFn := func() error {
// lock for ddl operation
@ -246,7 +253,7 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
}
// update meta table after send dd operation
if err = t.core.MetaTable.AddCollection(&collInfo, ts, idxInfo, ddOpStr); err != nil {
if err = t.core.MetaTable.AddCollection(&collInfo, ts, ddOpStr); err != nil {
t.core.chanTimeTick.removeDmlChannels(chanNames...)
t.core.chanTimeTick.removeDeltaChannels(deltaChanNames...)
// it's ok just to leave create collection message sent, datanode and querynode does't process CreateCollection logic
@ -304,26 +311,26 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error {
DbName: t.Req.DbName,
CollectionName: t.Req.CollectionName,
DbID: 0, //not used
CollectionID: collMeta.ID,
CollectionID: collMeta.CollectionID,
}
reason := fmt.Sprintf("drop collection %d", collMeta.ID)
reason := fmt.Sprintf("drop collection %d", collMeta.CollectionID)
ts, err := t.core.TSOAllocator(1)
if err != nil {
return fmt.Errorf("TSO alloc fail, error = %w", err)
}
//notify query service to release collection
if err = t.core.CallReleaseCollectionService(t.core.ctx, ts, 0, collMeta.ID); err != nil {
if err = t.core.CallReleaseCollectionService(t.core.ctx, ts, 0, collMeta.CollectionID); err != nil {
log.Error("Failed to CallReleaseCollectionService", zap.Error(err))
return err
}
// drop all indices
for _, fieldIndex := range collMeta.FieldIndexes {
if err := t.core.CallDropIndexService(t.core.ctx, fieldIndex.IndexID); err != nil {
for _, tuple := range collMeta.FieldIDToIndexID {
if err := t.core.CallDropIndexService(t.core.ctx, tuple.Value); err != nil {
log.Error("DropCollection CallDropIndexService fail", zap.String("collName", t.Req.CollectionName),
zap.Int64("indexID", fieldIndex.IndexID), zap.Error(err))
zap.Int64("indexID", tuple.Value), zap.Error(err))
return err
}
}
@ -357,7 +364,7 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error {
}
// update meta table after send dd operation
if err = t.core.MetaTable.DeleteCollection(collMeta.ID, ts, ddOpStr); err != nil {
if err = t.core.MetaTable.DeleteCollection(collMeta.CollectionID, ts, ddOpStr); err != nil {
return err
}
@ -391,7 +398,7 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error {
}
// invalidate all the collection meta cache with the specified collectionID
err = t.core.ExpireMetaCache(ctx, nil, collMeta.ID, ts)
err = t.core.ExpireMetaCache(ctx, nil, collMeta.CollectionID, ts)
if err != nil {
return err
}
@ -443,7 +450,7 @@ func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error {
if t.Type() != commonpb.MsgType_DescribeCollection {
return fmt.Errorf("describe collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())])
}
var collInfo *etcdpb.CollectionInfo
var collInfo *model.Collection
var err error
if t.Req.CollectionName != "" {
@ -458,8 +465,13 @@ func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error {
}
}
t.Rsp.Schema = proto.Clone(collInfo.Schema).(*schemapb.CollectionSchema)
t.Rsp.CollectionID = collInfo.ID
t.Rsp.Schema = &schemapb.CollectionSchema{
Name: collInfo.Name,
Description: collInfo.Description,
AutoID: collInfo.AutoID,
Fields: model.MarshalFieldModels(collInfo.Fields),
}
t.Rsp.CollectionID = collInfo.CollectionID
t.Rsp.VirtualChannelNames = collInfo.VirtualChannelNames
t.Rsp.PhysicalChannelNames = collInfo.PhysicalChannelNames
if collInfo.ShardsNum == 0 {
@ -471,8 +483,8 @@ func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error {
t.Rsp.CreatedTimestamp = collInfo.CreateTime
createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime)
t.Rsp.CreatedUtcTimestamp = uint64(createdPhysicalTime)
t.Rsp.Aliases = t.core.MetaTable.ListAliases(collInfo.ID)
t.Rsp.StartPositions = collInfo.GetStartPositions()
t.Rsp.Aliases = t.core.MetaTable.ListAliases(collInfo.CollectionID)
t.Rsp.StartPositions = collInfo.StartPositions
t.Rsp.CollectionName = t.Rsp.Schema.Name
return nil
}
@ -500,7 +512,7 @@ func (t *ShowCollectionReqTask) Execute(ctx context.Context) error {
}
for name, meta := range coll {
t.Rsp.CollectionNames = append(t.Rsp.CollectionNames, name)
t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.ID)
t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.CollectionID)
t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, meta.CreateTime)
physical, _ := tsoutil.ParseHybridTs(meta.CreateTime)
t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical))
@ -539,7 +551,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error {
CollectionName: t.Req.CollectionName,
PartitionName: t.Req.PartitionName,
DbID: 0, // todo, not used
CollectionID: collMeta.ID,
CollectionID: collMeta.CollectionID,
PartitionID: partID,
}
@ -572,7 +584,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error {
}
// update meta table after send dd operation
if err = t.core.MetaTable.AddPartition(collMeta.ID, t.Req.PartitionName, partID, ts, ddOpStr); err != nil {
if err = t.core.MetaTable.AddPartition(collMeta.CollectionID, t.Req.PartitionName, partID, ts, ddOpStr); err != nil {
return err
}
@ -590,7 +602,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error {
}
// invalidate all the collection meta cache with the specified collectionID
err = t.core.ExpireMetaCache(ctx, nil, collMeta.ID, ts)
err = t.core.ExpireMetaCache(ctx, nil, collMeta.CollectionID, ts)
if err != nil {
return err
}
@ -619,7 +631,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
if err != nil {
return err
}
partID, err := t.core.MetaTable.GetPartitionByName(collInfo.ID, t.Req.PartitionName, 0)
partID, err := t.core.MetaTable.GetPartitionByName(collInfo.CollectionID, t.Req.PartitionName, 0)
if err != nil {
return err
}
@ -630,7 +642,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
CollectionName: t.Req.CollectionName,
PartitionName: t.Req.PartitionName,
DbID: 0, //todo,not used
CollectionID: collInfo.ID,
CollectionID: collInfo.CollectionID,
PartitionID: partID,
}
@ -663,7 +675,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
}
// update meta table after send dd operation
if _, err = t.core.MetaTable.DeletePartition(collInfo.ID, t.Req.PartitionName, ts, ddOpStr); err != nil {
if _, err = t.core.MetaTable.DeletePartition(collInfo.CollectionID, t.Req.PartitionName, ts, ddOpStr); err != nil {
return err
}
@ -681,7 +693,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
}
// invalidate all the collection meta cache with the specified collectionID
err = t.core.ExpireMetaCache(ctx, nil, collInfo.ID, ts)
err = t.core.ExpireMetaCache(ctx, nil, collInfo.CollectionID, ts)
if err != nil {
return err
}
@ -718,7 +730,7 @@ func (t *HasPartitionReqTask) Execute(ctx context.Context) error {
if err != nil {
return err
}
t.HasPartition = t.core.MetaTable.HasPartition(coll.ID, t.Req.PartitionName, 0)
t.HasPartition = t.core.MetaTable.HasPartition(coll.CollectionID, t.Req.PartitionName, 0)
return nil
}
@ -739,7 +751,7 @@ func (t *ShowPartitionReqTask) Execute(ctx context.Context) error {
if t.Type() != commonpb.MsgType_ShowPartitions {
return fmt.Errorf("show partition, msg type = %s", commonpb.MsgType_name[int32(t.Type())])
}
var coll *etcdpb.CollectionInfo
var coll *model.Collection
var err error
if t.Req.CollectionName == "" {
coll, err = t.core.MetaTable.GetCollectionByID(t.Req.CollectionID, 0)
@ -749,12 +761,13 @@ func (t *ShowPartitionReqTask) Execute(ctx context.Context) error {
if err != nil {
return err
}
t.Rsp.PartitionIDs = coll.PartitionIDs
t.Rsp.PartitionNames = coll.PartitionNames
t.Rsp.CreatedTimestamps = coll.PartitionCreatedTimestamps
t.Rsp.CreatedUtcTimestamps = make([]uint64, 0, len(coll.PartitionCreatedTimestamps))
for _, ts := range coll.PartitionCreatedTimestamps {
physical, _ := tsoutil.ParseHybridTs(ts)
for _, part := range coll.Partitions {
t.Rsp.PartitionIDs = append(t.Rsp.PartitionIDs, part.PartitionID)
t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, part.PartitionName)
t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, part.PartitionCreatedTimestamp)
physical, _ := tsoutil.ParseHybridTs(part.PartitionCreatedTimestamp)
t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical))
}
@ -785,7 +798,7 @@ func (t *DescribeSegmentReqTask) Execute(ctx context.Context) error {
segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, -1)
if err != nil {
log.Debug("Get flushed segment from data coord failed", zap.String("collection_name", coll.Schema.Name), zap.Error(err))
log.Debug("Get flushed segment from data coord failed", zap.String("collection_name", coll.Name), zap.Error(err))
return err
}
@ -801,16 +814,16 @@ func (t *DescribeSegmentReqTask) Execute(ctx context.Context) error {
return fmt.Errorf("segment id %d not belong to collection id %d", t.Req.SegmentID, t.Req.CollectionID)
}
//TODO, get filed_id and index_name from request
segIdxInfo, err := t.core.MetaTable.GetSegmentIndexInfoByID(t.Req.SegmentID, -1, "")
index, err := t.core.MetaTable.GetSegmentIndexInfoByID(t.Req.SegmentID, -1, "")
log.Debug("RootCoord DescribeSegmentReqTask, MetaTable.GetSegmentIndexInfoByID", zap.Any("SegmentID", t.Req.SegmentID),
zap.Any("segIdxInfo", segIdxInfo), zap.Error(err))
zap.Any("index", index), zap.Error(err))
if err != nil {
return err
}
t.Rsp.IndexID = segIdxInfo.IndexID
t.Rsp.BuildID = segIdxInfo.BuildID
t.Rsp.EnableIndex = segIdxInfo.EnableIndex
t.Rsp.FieldID = segIdxInfo.FieldID
t.Rsp.IndexID = index.IndexID
t.Rsp.BuildID = index.SegmentIndexes[t.Req.SegmentID].BuildID
t.Rsp.EnableIndex = index.SegmentIndexes[t.Req.SegmentID].EnableIndex
t.Rsp.FieldID = index.FieldID
return nil
}
@ -836,8 +849,8 @@ func (t *ShowSegmentReqTask) Execute(ctx context.Context) error {
return err
}
exist := false
for _, partID := range coll.PartitionIDs {
if partID == t.Req.PartitionID {
for _, partition := range coll.Partitions {
if partition.PartitionID == t.Req.PartitionID {
exist = true
break
}
@ -847,7 +860,7 @@ func (t *ShowSegmentReqTask) Execute(ctx context.Context) error {
}
segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, t.Req.PartitionID)
if err != nil {
log.Debug("Get flushed segments from data coord failed", zap.String("collection name", coll.Schema.Name), zap.Int64("partition id", t.Req.PartitionID), zap.Error(err))
log.Debug("Get flushed segments from data coord failed", zap.String("collection name", coll.Name), zap.Int64("partition id", t.Req.PartitionID), zap.Error(err))
return err
}
@ -896,7 +909,7 @@ func (t *DescribeSegmentsReqTask) Execute(ctx context.Context) error {
t.Rsp.SegmentInfos[segID] = &rootcoordpb.SegmentInfos{
BaseInfo: &rootcoordpb.SegmentBaseInfo{
CollectionID: collectionID,
PartitionID: 0, // TODO: change this after MetaTable.partID2SegID been fixed.
PartitionID: 0, // TODO: change this after MetaTable.partID2IndexedSegID been fixed.
SegmentID: segID,
},
IndexInfos: nil,
@ -904,34 +917,33 @@ func (t *DescribeSegmentsReqTask) Execute(ctx context.Context) error {
}
}
segmentInfo, err := t.core.MetaTable.GetSegmentIndexInfos(segID)
index, err := t.core.MetaTable.GetSegmentIndexInfos(segID)
if err != nil {
continue
}
for indexID, indexInfo := range segmentInfo {
t.Rsp.SegmentInfos[segID].IndexInfos =
append(t.Rsp.SegmentInfos[segID].IndexInfos,
&etcdpb.SegmentIndexInfo{
CollectionID: indexInfo.CollectionID,
PartitionID: indexInfo.PartitionID,
SegmentID: indexInfo.SegmentID,
FieldID: indexInfo.FieldID,
IndexID: indexInfo.IndexID,
BuildID: indexInfo.BuildID,
EnableIndex: indexInfo.EnableIndex,
})
extraIndexInfo, err := t.core.MetaTable.GetIndexByID(indexID)
if err != nil {
log.Warn("index not found in meta table, maybe index has been deleted",
zap.Error(err),
zap.Int64("indexID", indexID),
zap.Int64("collection", collectionID),
zap.Int64("segment", segID))
continue
}
t.Rsp.SegmentInfos[segID].ExtraIndexInfos[indexID] = extraIndexInfo
segIdxMeta, ok := index.SegmentIndexes[segID]
if !ok {
log.Error("requested segment index not found",
zap.Int64("collection", collectionID),
zap.Int64("indexID", index.IndexID),
zap.Int64("segment", segID))
return fmt.Errorf("segment index not found, collection: %d, segment: %d", collectionID, segID)
}
t.Rsp.SegmentInfos[segID].IndexInfos = append(
t.Rsp.SegmentInfos[segID].IndexInfos,
&etcdpb.SegmentIndexInfo{
CollectionID: index.CollectionID,
PartitionID: segIdxMeta.Segment.PartitionID,
SegmentID: segIdxMeta.Segment.SegmentID,
FieldID: index.FieldID,
IndexID: index.IndexID,
BuildID: segIdxMeta.BuildID,
EnableIndex: segIdxMeta.EnableIndex,
})
t.Rsp.SegmentInfos[segID].ExtraIndexInfos[index.IndexID] = model.MarshalIndexModel(&index)
}
return nil
@ -962,11 +974,13 @@ func (t *CreateIndexReqTask) Execute(ctx context.Context) error {
if err != nil {
return err
}
createTS, err := t.core.TSOAllocator(1)
if err != nil {
return err
}
idxInfo := &etcdpb.IndexInfo{
idxInfo := &model.Index{
IndexName: indexName,
IndexID: indexID,
IndexParams: t.Req.ExtraParams,
@ -982,46 +996,61 @@ func (t *CreateIndexReqTask) Execute(ctx context.Context) error {
if err != nil {
return err
}
segID2PartID, segID2Binlog, err := t.core.getSegments(ctx, collMeta.ID)
segID2PartID, segID2Binlog, err := t.core.getSegments(ctx, collMeta.CollectionID)
flushedSegs := make([]typeutil.UniqueID, 0, len(segID2PartID))
for k := range segID2PartID {
flushedSegs = append(flushedSegs, k)
}
if err != nil {
log.Debug("Get flushed segments from data coord failed", zap.String("collection_name", collMeta.Schema.Name), zap.Error(err))
log.Debug("get flushed segments from data coord failed", zap.String("collection_name", collMeta.Name), zap.Error(err))
return err
}
alreadyExists, err := t.core.MetaTable.AddIndex(t.Req.CollectionName, t.Req.FieldName, idxInfo, flushedSegs)
if err != nil {
log.Debug("add index into metastore failed", zap.Int64("collection_id", collMeta.CollectionID), zap.Int64("index_id", idxInfo.IndexID), zap.Error(err))
return err
}
// backward compatible with support create the same index
if alreadyExists {
return nil
}
segIDs, field, err := t.core.MetaTable.GetNotIndexedSegments(t.Req.CollectionName, t.Req.FieldName, idxInfo, flushedSegs)
if err != nil {
log.Debug("RootCoord CreateIndexReqTask metaTable.GetNotIndexedSegments", zap.Error(err))
log.Debug("get not indexed segments failed", zap.Int64("collection_id", collMeta.CollectionID), zap.Error(err))
return err
}
collectionID := collMeta.ID
cnt := 0
for _, segID := range segIDs {
info := etcdpb.SegmentIndexInfo{
CollectionID: collectionID,
PartitionID: segID2PartID[segID],
SegmentID: segID,
FieldID: field.FieldID,
IndexID: idxInfo.IndexID,
EnableIndex: false,
CreateTime: createTS,
segmentIndex := model.SegmentIndex{
Segment: model.Segment{
SegmentID: segID,
PartitionID: segID2PartID[segID],
},
EnableIndex: false,
CreateTime: createTS,
}
info.BuildID, err = t.core.BuildIndex(ctx, segID, segID2Binlog[segID].GetNumOfRows(), segID2Binlog[segID].GetFieldBinlogs(), &field, idxInfo, false)
segmentIndex.BuildID, err = t.core.BuildIndex(ctx, segID, segID2Binlog[segID].GetNumOfRows(), segID2Binlog[segID].GetFieldBinlogs(), &field, idxInfo, false)
if err != nil {
return err
}
if info.BuildID != 0 {
info.EnableIndex = true
if segmentIndex.BuildID != 0 {
segmentIndex.EnableIndex = true
}
if err := t.core.MetaTable.AddIndex(&info); err != nil {
log.Debug("Add index into meta table failed", zap.Int64("collection_id", collMeta.ID), zap.Int64("index_id", info.IndexID), zap.Int64("build_id", info.BuildID), zap.Error(err))
index := &model.Index{
CollectionID: collMeta.CollectionID,
FieldID: field.FieldID,
IndexID: idxInfo.IndexID,
SegmentIndexes: map[int64]model.SegmentIndex{segID: segmentIndex},
}
if err := t.core.MetaTable.AlterIndex(index); err != nil {
log.Error("alter index into meta table failed", zap.Int64("collection_id", collMeta.CollectionID), zap.Int64("index_id", index.IndexID), zap.Int64("build_id", segmentIndex.BuildID), zap.Error(err))
return err
}
cnt++
}
return nil

View File

@ -6,18 +6,15 @@ import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
func TestDescribeSegmentReqTask_Type(t *testing.T) {
@ -72,48 +69,53 @@ func TestDescribeSegmentsReqTask_Execute(t *testing.T) {
return []typeutil.UniqueID{segID}, nil
}
c.MetaTable = &MetaTable{
segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{},
segID2IndexID: make(map[typeutil.UniqueID]typeutil.UniqueID, 1),
}
assert.NoError(t, tsk.Execute(context.Background()))
// index not found in meta. no return error
// index not found in meta
c.MetaTable = &MetaTable{
segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{
segID: {
indexID: {
CollectionID: collID,
PartitionID: partID,
SegmentID: segID,
FieldID: fieldID,
IndexID: indexID,
BuildID: buildID,
EnableIndex: true,
segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{segID: indexID},
indexID2Meta: map[typeutil.UniqueID]*model.Index{
indexID: {
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
SegmentIndexes: map[int64]model.SegmentIndex{
segID + 1: {
Segment: model.Segment{
SegmentID: segID,
PartitionID: partID,
},
BuildID: buildID,
EnableIndex: true,
},
},
},
},
}
assert.NoError(t, tsk.Execute(context.Background()))
assert.Error(t, tsk.Execute(context.Background()))
// success.
c.MetaTable = &MetaTable{
segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{
segID: {
indexID: {
CollectionID: collID,
PartitionID: partID,
SegmentID: segID,
FieldID: fieldID,
IndexID: indexID,
BuildID: buildID,
EnableIndex: true,
},
},
},
indexID2Meta: map[typeutil.UniqueID]etcdpb.IndexInfo{
segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{segID: indexID},
indexID2Meta: map[typeutil.UniqueID]*model.Index{
indexID: {
IndexName: indexName,
IndexID: indexID,
IndexParams: nil,
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
IndexName: indexName,
IndexParams: nil,
SegmentIndexes: map[int64]model.SegmentIndex{
segID: {
Segment: model.Segment{
SegmentID: segID,
PartitionID: partID,
},
BuildID: buildID,
EnableIndex: true,
},
},
},
},
}

View File

@ -20,11 +20,13 @@ import (
"encoding/json"
"fmt"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -50,8 +52,8 @@ func EqualKeyPairArray(p1 []*commonpb.KeyValuePair, p2 []*commonpb.KeyValuePair)
}
// GetFieldSchemaByID return field schema by id
func GetFieldSchemaByID(coll *etcdpb.CollectionInfo, fieldID typeutil.UniqueID) (*schemapb.FieldSchema, error) {
for _, f := range coll.Schema.Fields {
func GetFieldSchemaByID(coll *model.Collection, fieldID typeutil.UniqueID) (*model.Field, error) {
for _, f := range coll.Fields {
if f.FieldID == fieldID {
return f, nil
}
@ -60,12 +62,12 @@ func GetFieldSchemaByID(coll *etcdpb.CollectionInfo, fieldID typeutil.UniqueID)
}
// GetFieldSchemaByIndexID return field schema by it's index id
func GetFieldSchemaByIndexID(coll *etcdpb.CollectionInfo, idxID typeutil.UniqueID) (*schemapb.FieldSchema, error) {
func GetFieldSchemaByIndexID(coll *model.Collection, idxID typeutil.UniqueID) (*model.Field, error) {
var fieldID typeutil.UniqueID
exist := false
for _, f := range coll.FieldIndexes {
if f.IndexID == idxID {
fieldID = f.FiledID
for _, t := range coll.FieldIDToIndexID {
if t.Value == idxID {
fieldID = t.Key
exist = true
break
}
@ -98,16 +100,6 @@ func DecodeDdOperation(str string, ddOp *DdOperation) error {
return json.Unmarshal([]byte(str), ddOp)
}
// SegmentIndexInfoEqual return true if SegmentIndexInfos are identical
func SegmentIndexInfoEqual(info1 *etcdpb.SegmentIndexInfo, info2 *etcdpb.SegmentIndexInfo) bool {
return info1.CollectionID == info2.CollectionID &&
info1.PartitionID == info2.PartitionID &&
info1.SegmentID == info2.SegmentID &&
info1.FieldID == info2.FieldID &&
info1.IndexID == info2.IndexID &&
info1.EnableIndex == info2.EnableIndex
}
// EncodeMsgPositions serialize []*MsgPosition into string
func EncodeMsgPositions(msgPositions []*msgstream.MsgPosition) (string, error) {
if len(msgPositions) == 0 {
@ -127,3 +119,19 @@ func DecodeMsgPositions(str string, msgPositions *[]*msgstream.MsgPosition) erro
}
return json.Unmarshal([]byte(str), msgPositions)
}
func Int64TupleSliceToMap(s []common.Int64Tuple) map[int]common.Int64Tuple {
ret := make(map[int]common.Int64Tuple, len(s))
for i, e := range s {
ret[i] = e
}
return ret
}
func Int64TupleMapToSlice(s map[int]common.Int64Tuple) []common.Int64Tuple {
ret := make([]common.Int64Tuple, 0, len(s))
for _, e := range s {
ret = append(ret, e)
}
return ret
}

View File

@ -19,10 +19,12 @@ package rootcoord
import (
"testing"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/stretchr/testify/assert"
)
@ -60,12 +62,10 @@ func Test_EqualKeyPairArray(t *testing.T) {
}
func Test_GetFieldSchemaByID(t *testing.T) {
coll := &etcdpb.CollectionInfo{
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: 1,
},
coll := &model.Collection{
Fields: []*model.Field{
{
FieldID: 1,
},
},
}
@ -76,18 +76,16 @@ func Test_GetFieldSchemaByID(t *testing.T) {
}
func Test_GetFieldSchemaByIndexID(t *testing.T) {
coll := &etcdpb.CollectionInfo{
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: 1,
},
coll := &model.Collection{
Fields: []*model.Field{
{
FieldID: 1,
},
},
FieldIndexes: []*etcdpb.FieldIndexInfo{
FieldIDToIndexID: []common.Int64Tuple{
{
FiledID: 1,
IndexID: 2,
Key: 1,
Value: 2,
},
},
}