mirror of https://github.com/milvus-io/milvus.git
remove msgstream of segment info (#6280)
Signed-off-by: yefu.chen <yefu.chen@zilliz.com>pull/6286/head
parent
8259404576
commit
111a24a49f
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -321,9 +321,3 @@ func (c *GrpcClient) SegmentFlushCompleted(ctx context.Context, in *datapb.Segme
|
|||
})
|
||||
return ret.(*commonpb.Status), err
|
||||
}
|
||||
func (c *GrpcClient) AddNewSegment(ctx context.Context, in *datapb.SegmentMsg) (*commonpb.Status, error) {
|
||||
ret, err := c.recall(func() (interface{}, error) {
|
||||
return c.grpcClient.AddNewSegment(ctx, in)
|
||||
})
|
||||
return ret.(*commonpb.Status), err
|
||||
}
|
||||
|
|
|
@ -353,6 +353,3 @@ func (s *Server) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.Releas
|
|||
func (s *Server) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error) {
|
||||
return s.rootCoord.SegmentFlushCompleted(ctx, in)
|
||||
}
|
||||
func (s *Server) AddNewSegment(ctx context.Context, in *datapb.SegmentMsg) (*commonpb.Status, error) {
|
||||
return s.rootCoord.AddNewSegment(ctx, in)
|
||||
}
|
||||
|
|
|
@ -130,7 +130,6 @@ func TestGrpcService(t *testing.T) {
|
|||
rootcoord.Params.MsgChannelSubName = fmt.Sprintf("msgChannel%d", randVal)
|
||||
rootcoord.Params.TimeTickChannel = fmt.Sprintf("timeTick%d", randVal)
|
||||
rootcoord.Params.StatisticsChannel = fmt.Sprintf("stateChannel%d", randVal)
|
||||
rootcoord.Params.DataCoordSegmentChannel = fmt.Sprintf("segmentChannel%d", randVal)
|
||||
|
||||
rootcoord.Params.MaxPartitionNum = 64
|
||||
rootcoord.Params.DefaultPartitionName = "_default"
|
||||
|
@ -166,11 +165,6 @@ func TestGrpcService(t *testing.T) {
|
|||
err = core.Init()
|
||||
assert.Nil(t, err)
|
||||
|
||||
FlushedSegmentChan := make(chan *msgstream.MsgPack, 8)
|
||||
core.DataNodeFlushedSegmentChan = FlushedSegmentChan
|
||||
SegmentInfoChan := make(chan *msgstream.MsgPack, 8)
|
||||
core.DataCoordSegmentChan = SegmentInfoChan
|
||||
|
||||
timeTickArray := make([]typeutil.Timestamp, 0, 16)
|
||||
timeTickLock := sync.Mutex{}
|
||||
core.SendTimeTick = func(ts typeutil.Timestamp) error {
|
||||
|
@ -214,6 +208,15 @@ func TestGrpcService(t *testing.T) {
|
|||
core.CallGetNumRowsService = func(ctx context.Context, segID typeutil.UniqueID, isFromFlushedChan bool) (int64, error) {
|
||||
return rootcoord.Params.MinSegmentSizeToEnableIndex, nil
|
||||
}
|
||||
segs := []typeutil.UniqueID{}
|
||||
segLock := sync.Mutex{}
|
||||
core.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) {
|
||||
segLock.Lock()
|
||||
defer segLock.Unlock()
|
||||
ret := []typeutil.UniqueID{}
|
||||
ret = append(ret, segs...)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
var binlogLock sync.Mutex
|
||||
binlogPathArray := make([]string, 0, 16)
|
||||
|
@ -502,9 +505,9 @@ func TestGrpcService(t *testing.T) {
|
|||
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(collMeta.PartitionIDs))
|
||||
partMeta, err := core.MetaTable.GetPartitionByID(1, collMeta.PartitionIDs[1], 0)
|
||||
partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[1], 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, partName, partMeta.PartitionName)
|
||||
assert.Equal(t, partName, partName2)
|
||||
assert.Equal(t, 1, len(collectionMetaCache))
|
||||
})
|
||||
|
||||
|
@ -551,28 +554,12 @@ func TestGrpcService(t *testing.T) {
|
|||
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
partID := coll.PartitionIDs[1]
|
||||
part, err := core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
_, err = core.MetaTable.GetPartitionNameByID(coll.ID, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Zero(t, len(part.SegmentIDs))
|
||||
seg := &datapb.SegmentInfo{
|
||||
ID: 1000,
|
||||
CollectionID: coll.ID,
|
||||
PartitionID: part.PartitionID,
|
||||
}
|
||||
segInfoMsgPack := GenSegInfoMsgPack(seg)
|
||||
SegmentInfoChan <- segInfoMsgPack
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
part, err = core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(part.SegmentIDs))
|
||||
|
||||
// send msg twice, partition still contains 1 segment
|
||||
segInfoMsgPack1 := GenSegInfoMsgPack(seg)
|
||||
SegmentInfoChan <- segInfoMsgPack1
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
part1, err := core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(part1.SegmentIDs))
|
||||
segLock.Lock()
|
||||
segs = []typeutil.UniqueID{1000}
|
||||
segLock.Unlock()
|
||||
|
||||
req := &milvuspb.ShowSegmentsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -674,33 +661,30 @@ func TestGrpcService(t *testing.T) {
|
|||
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
partID := coll.PartitionIDs[1]
|
||||
part, err := core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(part.SegmentIDs))
|
||||
seg := &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
CollectionID: coll.ID,
|
||||
PartitionID: part.PartitionID,
|
||||
}
|
||||
segInfoMsgPack := GenSegInfoMsgPack(seg)
|
||||
SegmentInfoChan <- segInfoMsgPack
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
part, err = core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(part.SegmentIDs))
|
||||
flushedSegMsgPack := GenFlushedSegMsgPack(segID)
|
||||
FlushedSegmentChan <- flushedSegMsgPack
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
segIdxInfo, err := core.MetaTable.GetSegmentIndexInfoByID(segID, -1, "")
|
||||
_, err = core.MetaTable.GetPartitionNameByID(coll.ID, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// send msg twice, segIdxInfo should not change
|
||||
flushedSegMsgPack1 := GenFlushedSegMsgPack(segID)
|
||||
FlushedSegmentChan <- flushedSegMsgPack1
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
segIdxInfo1, err := core.MetaTable.GetSegmentIndexInfoByID(segID, -1, "")
|
||||
segLock.Lock()
|
||||
segs = append(segs, segID)
|
||||
segLock.Unlock()
|
||||
|
||||
flushReq := &datapb.SegmentFlushCompletedMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SegmentFlushDone,
|
||||
},
|
||||
Segment: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
CollectionID: coll.ID,
|
||||
PartitionID: partID,
|
||||
},
|
||||
}
|
||||
flushRsp, err := cli.SegmentFlushCompleted(ctx, flushReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, segIdxInfo, segIdxInfo1)
|
||||
assert.Equal(t, flushRsp.ErrorCode, commonpb.ErrorCode_Success)
|
||||
|
||||
flushRsp, err = cli.SegmentFlushCompleted(ctx, flushReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, flushRsp.ErrorCode, commonpb.ErrorCode_Success)
|
||||
|
||||
req := &milvuspb.DescribeIndexRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -766,9 +750,9 @@ func TestGrpcService(t *testing.T) {
|
|||
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(collMeta.PartitionIDs))
|
||||
partMeta, err := core.MetaTable.GetPartitionByID(1, collMeta.PartitionIDs[0], 0)
|
||||
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[0], 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rootcoord.Params.DefaultPartitionName, partMeta.PartitionName)
|
||||
assert.Equal(t, rootcoord.Params.DefaultPartitionName, partName)
|
||||
assert.Equal(t, 2, len(collectionMetaCache))
|
||||
})
|
||||
|
||||
|
|
|
@ -18,12 +18,6 @@ message ProxyMeta {
|
|||
repeated string result_channelIDs = 3;
|
||||
}
|
||||
|
||||
message PartitionInfo {
|
||||
string partition_name = 1;
|
||||
int64 partitionID = 2;
|
||||
repeated int64 segmentIDs = 3;
|
||||
}
|
||||
|
||||
message IndexInfo {
|
||||
string index_name = 1;
|
||||
int64 indexID = 2;
|
||||
|
@ -40,9 +34,10 @@ message CollectionInfo {
|
|||
schema.CollectionSchema schema = 2;
|
||||
uint64 create_time = 3;
|
||||
repeated int64 partitionIDs = 4;
|
||||
repeated FieldIndexInfo field_indexes = 5;
|
||||
repeated string virtual_channel_names = 6;
|
||||
repeated string physical_channel_names = 7;
|
||||
repeated string partitonNames = 5;
|
||||
repeated FieldIndexInfo field_indexes = 6;
|
||||
repeated string virtual_channel_names = 7;
|
||||
repeated string physical_channel_names = 8;
|
||||
}
|
||||
|
||||
message SegmentIndexInfo {
|
||||
|
@ -61,30 +56,3 @@ message CollectionMeta {
|
|||
repeated string partition_tags=5;
|
||||
repeated int64 partitionIDs=6;
|
||||
}
|
||||
|
||||
message FieldBinlogFiles {
|
||||
int64 fieldID = 1;
|
||||
repeated string binlog_files = 2;
|
||||
}
|
||||
|
||||
message SegmentMeta {
|
||||
int64 segmentID=1;
|
||||
int64 collectionID =2;
|
||||
string partition_tag=3;
|
||||
int32 channel_start=4;
|
||||
int32 channel_end=5;
|
||||
uint64 open_time=6;
|
||||
uint64 close_time=7;
|
||||
int64 num_rows=8;
|
||||
int64 mem_size=9;
|
||||
repeated FieldBinlogFiles binlog_file_paths = 10;
|
||||
}
|
||||
|
||||
message FieldIndexMeta {
|
||||
int64 segmentID = 1;
|
||||
int64 fieldID = 2;
|
||||
int64 indexID = 3;
|
||||
repeated common.KeyValuePair index_params = 4;
|
||||
common.IndexState state = 5;
|
||||
repeated string index_file_paths = 6;
|
||||
}
|
||||
|
|
|
@ -140,61 +140,6 @@ func (m *ProxyMeta) GetResultChannelIDs() []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
type PartitionInfo struct {
|
||||
PartitionName string `protobuf:"bytes,1,opt,name=partition_name,json=partitionName,proto3" json:"partition_name,omitempty"`
|
||||
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
|
||||
SegmentIDs []int64 `protobuf:"varint,3,rep,packed,name=segmentIDs,proto3" json:"segmentIDs,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PartitionInfo) Reset() { *m = PartitionInfo{} }
|
||||
func (m *PartitionInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*PartitionInfo) ProtoMessage() {}
|
||||
func (*PartitionInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{2}
|
||||
}
|
||||
|
||||
func (m *PartitionInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PartitionInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PartitionInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PartitionInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PartitionInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PartitionInfo.Merge(m, src)
|
||||
}
|
||||
func (m *PartitionInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_PartitionInfo.Size(m)
|
||||
}
|
||||
func (m *PartitionInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PartitionInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PartitionInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *PartitionInfo) GetPartitionName() string {
|
||||
if m != nil {
|
||||
return m.PartitionName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PartitionInfo) GetPartitionID() int64 {
|
||||
if m != nil {
|
||||
return m.PartitionID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *PartitionInfo) GetSegmentIDs() []int64 {
|
||||
if m != nil {
|
||||
return m.SegmentIDs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type IndexInfo struct {
|
||||
IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
|
||||
IndexID int64 `protobuf:"varint,2,opt,name=indexID,proto3" json:"indexID,omitempty"`
|
||||
|
@ -208,7 +153,7 @@ func (m *IndexInfo) Reset() { *m = IndexInfo{} }
|
|||
func (m *IndexInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*IndexInfo) ProtoMessage() {}
|
||||
func (*IndexInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{3}
|
||||
return fileDescriptor_975d306d62b73e88, []int{2}
|
||||
}
|
||||
|
||||
func (m *IndexInfo) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -262,7 +207,7 @@ func (m *FieldIndexInfo) Reset() { *m = FieldIndexInfo{} }
|
|||
func (m *FieldIndexInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*FieldIndexInfo) ProtoMessage() {}
|
||||
func (*FieldIndexInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{4}
|
||||
return fileDescriptor_975d306d62b73e88, []int{3}
|
||||
}
|
||||
|
||||
func (m *FieldIndexInfo) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -302,9 +247,10 @@ type CollectionInfo struct {
|
|||
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
|
||||
CreateTime uint64 `protobuf:"varint,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
|
||||
PartitionIDs []int64 `protobuf:"varint,4,rep,packed,name=partitionIDs,proto3" json:"partitionIDs,omitempty"`
|
||||
FieldIndexes []*FieldIndexInfo `protobuf:"bytes,5,rep,name=field_indexes,json=fieldIndexes,proto3" json:"field_indexes,omitempty"`
|
||||
VirtualChannelNames []string `protobuf:"bytes,6,rep,name=virtual_channel_names,json=virtualChannelNames,proto3" json:"virtual_channel_names,omitempty"`
|
||||
PhysicalChannelNames []string `protobuf:"bytes,7,rep,name=physical_channel_names,json=physicalChannelNames,proto3" json:"physical_channel_names,omitempty"`
|
||||
PartitonNames []string `protobuf:"bytes,5,rep,name=partitonNames,proto3" json:"partitonNames,omitempty"`
|
||||
FieldIndexes []*FieldIndexInfo `protobuf:"bytes,6,rep,name=field_indexes,json=fieldIndexes,proto3" json:"field_indexes,omitempty"`
|
||||
VirtualChannelNames []string `protobuf:"bytes,7,rep,name=virtual_channel_names,json=virtualChannelNames,proto3" json:"virtual_channel_names,omitempty"`
|
||||
PhysicalChannelNames []string `protobuf:"bytes,8,rep,name=physical_channel_names,json=physicalChannelNames,proto3" json:"physical_channel_names,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -314,7 +260,7 @@ func (m *CollectionInfo) Reset() { *m = CollectionInfo{} }
|
|||
func (m *CollectionInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*CollectionInfo) ProtoMessage() {}
|
||||
func (*CollectionInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{5}
|
||||
return fileDescriptor_975d306d62b73e88, []int{4}
|
||||
}
|
||||
|
||||
func (m *CollectionInfo) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -363,6 +309,13 @@ func (m *CollectionInfo) GetPartitionIDs() []int64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *CollectionInfo) GetPartitonNames() []string {
|
||||
if m != nil {
|
||||
return m.PartitonNames
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CollectionInfo) GetFieldIndexes() []*FieldIndexInfo {
|
||||
if m != nil {
|
||||
return m.FieldIndexes
|
||||
|
@ -399,7 +352,7 @@ func (m *SegmentIndexInfo) Reset() { *m = SegmentIndexInfo{} }
|
|||
func (m *SegmentIndexInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentIndexInfo) ProtoMessage() {}
|
||||
func (*SegmentIndexInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{6}
|
||||
return fileDescriptor_975d306d62b73e88, []int{5}
|
||||
}
|
||||
|
||||
func (m *SegmentIndexInfo) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -471,7 +424,7 @@ func (m *CollectionMeta) Reset() { *m = CollectionMeta{} }
|
|||
func (m *CollectionMeta) String() string { return proto.CompactTextString(m) }
|
||||
func (*CollectionMeta) ProtoMessage() {}
|
||||
func (*CollectionMeta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{7}
|
||||
return fileDescriptor_975d306d62b73e88, []int{6}
|
||||
}
|
||||
|
||||
func (m *CollectionMeta) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -534,316 +487,59 @@ func (m *CollectionMeta) GetPartitionIDs() []int64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
type FieldBinlogFiles struct {
|
||||
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
|
||||
BinlogFiles []string `protobuf:"bytes,2,rep,name=binlog_files,json=binlogFiles,proto3" json:"binlog_files,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FieldBinlogFiles) Reset() { *m = FieldBinlogFiles{} }
|
||||
func (m *FieldBinlogFiles) String() string { return proto.CompactTextString(m) }
|
||||
func (*FieldBinlogFiles) ProtoMessage() {}
|
||||
func (*FieldBinlogFiles) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{8}
|
||||
}
|
||||
|
||||
func (m *FieldBinlogFiles) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FieldBinlogFiles.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FieldBinlogFiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FieldBinlogFiles.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FieldBinlogFiles) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FieldBinlogFiles.Merge(m, src)
|
||||
}
|
||||
func (m *FieldBinlogFiles) XXX_Size() int {
|
||||
return xxx_messageInfo_FieldBinlogFiles.Size(m)
|
||||
}
|
||||
func (m *FieldBinlogFiles) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FieldBinlogFiles.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FieldBinlogFiles proto.InternalMessageInfo
|
||||
|
||||
func (m *FieldBinlogFiles) GetFieldID() int64 {
|
||||
if m != nil {
|
||||
return m.FieldID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FieldBinlogFiles) GetBinlogFiles() []string {
|
||||
if m != nil {
|
||||
return m.BinlogFiles
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SegmentMeta struct {
|
||||
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
|
||||
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
PartitionTag string `protobuf:"bytes,3,opt,name=partition_tag,json=partitionTag,proto3" json:"partition_tag,omitempty"`
|
||||
ChannelStart int32 `protobuf:"varint,4,opt,name=channel_start,json=channelStart,proto3" json:"channel_start,omitempty"`
|
||||
ChannelEnd int32 `protobuf:"varint,5,opt,name=channel_end,json=channelEnd,proto3" json:"channel_end,omitempty"`
|
||||
OpenTime uint64 `protobuf:"varint,6,opt,name=open_time,json=openTime,proto3" json:"open_time,omitempty"`
|
||||
CloseTime uint64 `protobuf:"varint,7,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"`
|
||||
NumRows int64 `protobuf:"varint,8,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"`
|
||||
MemSize int64 `protobuf:"varint,9,opt,name=mem_size,json=memSize,proto3" json:"mem_size,omitempty"`
|
||||
BinlogFilePaths []*FieldBinlogFiles `protobuf:"bytes,10,rep,name=binlog_file_paths,json=binlogFilePaths,proto3" json:"binlog_file_paths,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) Reset() { *m = SegmentMeta{} }
|
||||
func (m *SegmentMeta) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentMeta) ProtoMessage() {}
|
||||
func (*SegmentMeta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{9}
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SegmentMeta.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SegmentMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SegmentMeta.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SegmentMeta) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SegmentMeta.Merge(m, src)
|
||||
}
|
||||
func (m *SegmentMeta) XXX_Size() int {
|
||||
return xxx_messageInfo_SegmentMeta.Size(m)
|
||||
}
|
||||
func (m *SegmentMeta) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SegmentMeta.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SegmentMeta proto.InternalMessageInfo
|
||||
|
||||
func (m *SegmentMeta) GetSegmentID() int64 {
|
||||
if m != nil {
|
||||
return m.SegmentID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetCollectionID() int64 {
|
||||
if m != nil {
|
||||
return m.CollectionID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetPartitionTag() string {
|
||||
if m != nil {
|
||||
return m.PartitionTag
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetChannelStart() int32 {
|
||||
if m != nil {
|
||||
return m.ChannelStart
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetChannelEnd() int32 {
|
||||
if m != nil {
|
||||
return m.ChannelEnd
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetOpenTime() uint64 {
|
||||
if m != nil {
|
||||
return m.OpenTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetCloseTime() uint64 {
|
||||
if m != nil {
|
||||
return m.CloseTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetNumRows() int64 {
|
||||
if m != nil {
|
||||
return m.NumRows
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetMemSize() int64 {
|
||||
if m != nil {
|
||||
return m.MemSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentMeta) GetBinlogFilePaths() []*FieldBinlogFiles {
|
||||
if m != nil {
|
||||
return m.BinlogFilePaths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FieldIndexMeta struct {
|
||||
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
|
||||
FieldID int64 `protobuf:"varint,2,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
|
||||
IndexID int64 `protobuf:"varint,3,opt,name=indexID,proto3" json:"indexID,omitempty"`
|
||||
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,4,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
|
||||
State commonpb.IndexState `protobuf:"varint,5,opt,name=state,proto3,enum=milvus.proto.common.IndexState" json:"state,omitempty"`
|
||||
IndexFilePaths []string `protobuf:"bytes,6,rep,name=index_file_paths,json=indexFilePaths,proto3" json:"index_file_paths,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) Reset() { *m = FieldIndexMeta{} }
|
||||
func (m *FieldIndexMeta) String() string { return proto.CompactTextString(m) }
|
||||
func (*FieldIndexMeta) ProtoMessage() {}
|
||||
func (*FieldIndexMeta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_975d306d62b73e88, []int{10}
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FieldIndexMeta.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FieldIndexMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FieldIndexMeta.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FieldIndexMeta) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FieldIndexMeta.Merge(m, src)
|
||||
}
|
||||
func (m *FieldIndexMeta) XXX_Size() int {
|
||||
return xxx_messageInfo_FieldIndexMeta.Size(m)
|
||||
}
|
||||
func (m *FieldIndexMeta) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FieldIndexMeta.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FieldIndexMeta proto.InternalMessageInfo
|
||||
|
||||
func (m *FieldIndexMeta) GetSegmentID() int64 {
|
||||
if m != nil {
|
||||
return m.SegmentID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) GetFieldID() int64 {
|
||||
if m != nil {
|
||||
return m.FieldID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) GetIndexID() int64 {
|
||||
if m != nil {
|
||||
return m.IndexID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) GetIndexParams() []*commonpb.KeyValuePair {
|
||||
if m != nil {
|
||||
return m.IndexParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) GetState() commonpb.IndexState {
|
||||
if m != nil {
|
||||
return m.State
|
||||
}
|
||||
return commonpb.IndexState_IndexStateNone
|
||||
}
|
||||
|
||||
func (m *FieldIndexMeta) GetIndexFilePaths() []string {
|
||||
if m != nil {
|
||||
return m.IndexFilePaths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TenantMeta)(nil), "milvus.proto.etcd.TenantMeta")
|
||||
proto.RegisterType((*ProxyMeta)(nil), "milvus.proto.etcd.ProxyMeta")
|
||||
proto.RegisterType((*PartitionInfo)(nil), "milvus.proto.etcd.PartitionInfo")
|
||||
proto.RegisterType((*IndexInfo)(nil), "milvus.proto.etcd.IndexInfo")
|
||||
proto.RegisterType((*FieldIndexInfo)(nil), "milvus.proto.etcd.FieldIndexInfo")
|
||||
proto.RegisterType((*CollectionInfo)(nil), "milvus.proto.etcd.CollectionInfo")
|
||||
proto.RegisterType((*SegmentIndexInfo)(nil), "milvus.proto.etcd.SegmentIndexInfo")
|
||||
proto.RegisterType((*CollectionMeta)(nil), "milvus.proto.etcd.CollectionMeta")
|
||||
proto.RegisterType((*FieldBinlogFiles)(nil), "milvus.proto.etcd.FieldBinlogFiles")
|
||||
proto.RegisterType((*SegmentMeta)(nil), "milvus.proto.etcd.SegmentMeta")
|
||||
proto.RegisterType((*FieldIndexMeta)(nil), "milvus.proto.etcd.FieldIndexMeta")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("etcd_meta.proto", fileDescriptor_975d306d62b73e88) }
|
||||
|
||||
var fileDescriptor_975d306d62b73e88 = []byte{
|
||||
// 899 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcd, 0x8e, 0x1b, 0x45,
|
||||
0x10, 0xd6, 0xf8, 0x77, 0xa7, 0xfc, 0xb3, 0xbb, 0xcd, 0x8f, 0x86, 0x90, 0x10, 0xef, 0x44, 0x01,
|
||||
0x4b, 0x88, 0x5d, 0xb1, 0x01, 0x6e, 0x1c, 0x48, 0xcc, 0x4a, 0x16, 0x22, 0x31, 0xe3, 0x15, 0x07,
|
||||
0x2e, 0xa3, 0xb6, 0x5d, 0x6b, 0xb7, 0x34, 0xdd, 0x63, 0xa6, 0x7b, 0x92, 0xdd, 0x9c, 0xb8, 0xc2,
|
||||
0x23, 0x70, 0xe3, 0xa5, 0x78, 0x09, 0x9e, 0x01, 0x09, 0xf5, 0xcf, 0x8c, 0x67, 0x76, 0x1d, 0x84,
|
||||
0x22, 0x71, 0x73, 0x7d, 0x55, 0x35, 0x55, 0xf5, 0x7d, 0x55, 0x6d, 0x38, 0x44, 0xb5, 0x5c, 0xc5,
|
||||
0x1c, 0x15, 0x3d, 0xdd, 0x66, 0xa9, 0x4a, 0xc9, 0x31, 0x67, 0xc9, 0xcb, 0x5c, 0x5a, 0xeb, 0x54,
|
||||
0x7b, 0xef, 0xf5, 0x97, 0x29, 0xe7, 0xa9, 0xb0, 0xd0, 0xbd, 0xbe, 0x5c, 0x6e, 0x90, 0xbb, 0xf0,
|
||||
0xf0, 0x77, 0x0f, 0xe0, 0x12, 0x05, 0x15, 0xea, 0x7b, 0x54, 0x94, 0x0c, 0xa1, 0x31, 0x9d, 0x04,
|
||||
0xde, 0xc8, 0x1b, 0x37, 0xa3, 0xc6, 0x74, 0x42, 0x3e, 0x86, 0x43, 0x91, 0xf3, 0xf8, 0xe7, 0x1c,
|
||||
0xb3, 0x9b, 0x58, 0xa4, 0x2b, 0x94, 0x41, 0xc3, 0x38, 0x07, 0x22, 0xe7, 0x3f, 0x68, 0xf4, 0xb9,
|
||||
0x06, 0xc9, 0xa7, 0x70, 0xcc, 0x84, 0xc4, 0x4c, 0xc5, 0xcb, 0x0d, 0x15, 0x02, 0x93, 0xe9, 0x44,
|
||||
0x06, 0xcd, 0x51, 0x73, 0xec, 0x47, 0x47, 0xd6, 0xf1, 0xac, 0xc4, 0xc9, 0x27, 0x70, 0x68, 0x3f,
|
||||
0x58, 0xc6, 0x06, 0xad, 0x91, 0x37, 0xf6, 0xa3, 0xa1, 0x81, 0xcb, 0xc8, 0xf0, 0x17, 0x0f, 0xfc,
|
||||
0x59, 0x96, 0x5e, 0xdf, 0xec, 0xed, 0xed, 0x2b, 0xe8, 0xd2, 0xd5, 0x2a, 0x43, 0x69, 0x7b, 0xea,
|
||||
0x9d, 0xdf, 0x3f, 0xad, 0xcd, 0xee, 0xa6, 0xfe, 0xc6, 0xc6, 0x44, 0x45, 0xb0, 0xee, 0x35, 0x43,
|
||||
0x99, 0x27, 0xfb, 0x7a, 0xb5, 0x8e, 0x5d, 0xaf, 0xe1, 0x35, 0x0c, 0x66, 0x34, 0x53, 0x4c, 0xb1,
|
||||
0x54, 0x4c, 0xc5, 0x55, 0x4a, 0x1e, 0xc3, 0x70, 0x5b, 0x00, 0xb1, 0xa0, 0x1c, 0x4d, 0x47, 0x7e,
|
||||
0x34, 0x28, 0xd1, 0xe7, 0x94, 0x23, 0x19, 0x41, 0xaf, 0x04, 0xa6, 0x13, 0x47, 0x5a, 0x15, 0x22,
|
||||
0x1f, 0x01, 0x48, 0x5c, 0x73, 0x14, 0xaa, 0xa8, 0xdf, 0x8c, 0x2a, 0x48, 0xf8, 0x9b, 0x07, 0xfe,
|
||||
0x54, 0xac, 0xf0, 0xda, 0x94, 0x7d, 0x00, 0xc0, 0xb4, 0x51, 0x2d, 0xe9, 0x1b, 0xc4, 0x94, 0x0b,
|
||||
0xa0, 0x6b, 0x8c, 0xb2, 0x54, 0x61, 0x92, 0x09, 0xf4, 0x6d, 0xe2, 0x96, 0x66, 0x94, 0xdb, 0x42,
|
||||
0xbd, 0xf3, 0x93, 0xbd, 0x54, 0x7d, 0x87, 0x37, 0x3f, 0xd2, 0x24, 0xc7, 0x19, 0x65, 0x59, 0xd4,
|
||||
0x33, 0x69, 0x33, 0x93, 0x15, 0x4e, 0x60, 0x78, 0xc1, 0x30, 0x59, 0xed, 0x1a, 0x0a, 0xa0, 0x7b,
|
||||
0xc5, 0x12, 0x5c, 0x95, 0x92, 0x14, 0xe6, 0x9b, 0x7b, 0x09, 0xff, 0x6c, 0xc0, 0xf0, 0x59, 0x9a,
|
||||
0x24, 0xb8, 0x2c, 0xe9, 0xbc, 0x2d, 0xea, 0xd7, 0xd0, 0xb1, 0xfb, 0xe9, 0x34, 0x7d, 0x5c, 0x6f,
|
||||
0xd4, 0xed, 0xee, 0xee, 0x23, 0x73, 0x03, 0x44, 0x2e, 0x89, 0x3c, 0x84, 0xde, 0x32, 0x43, 0xaa,
|
||||
0x30, 0x56, 0x8c, 0x63, 0xd0, 0x1c, 0x79, 0xe3, 0x56, 0x04, 0x16, 0xba, 0x64, 0x1c, 0x49, 0x08,
|
||||
0xfd, 0x8a, 0x08, 0x32, 0x68, 0x19, 0xde, 0x6b, 0x18, 0xb9, 0x80, 0xc1, 0x95, 0x1e, 0x36, 0x36,
|
||||
0x7d, 0xa3, 0x0c, 0xda, 0xfb, 0x38, 0xd3, 0xa7, 0x75, 0x5a, 0x27, 0x25, 0xea, 0x5f, 0x95, 0x36,
|
||||
0x4a, 0x72, 0x0e, 0xef, 0xbd, 0x64, 0x99, 0xca, 0x69, 0x52, 0x6c, 0x9a, 0x51, 0x4f, 0x06, 0x1d,
|
||||
0xb3, 0x6c, 0xef, 0x38, 0xa7, 0xdb, 0x36, 0xad, 0xa3, 0x24, 0x5f, 0xc0, 0xfb, 0xdb, 0xcd, 0x8d,
|
||||
0x64, 0xcb, 0x3b, 0x49, 0x5d, 0x93, 0xf4, 0x6e, 0xe1, 0xad, 0x66, 0x85, 0x7f, 0x78, 0x70, 0x34,
|
||||
0x77, 0xab, 0x53, 0x2a, 0x74, 0x1f, 0xfc, 0x72, 0x9d, 0x1c, 0xc3, 0x3b, 0xc0, 0xea, 0xa7, 0x9b,
|
||||
0x2d, 0x55, 0x72, 0x66, 0x55, 0xbf, 0x66, 0x7d, 0x97, 0x02, 0xe8, 0x2e, 0x72, 0x66, 0x72, 0x5a,
|
||||
0xd6, 0xe3, 0x4c, 0x72, 0x02, 0x7d, 0x14, 0x74, 0x91, 0xa0, 0xe5, 0x2c, 0x68, 0x8f, 0xbc, 0xf1,
|
||||
0x41, 0xd4, 0xb3, 0x98, 0x69, 0x29, 0xfc, 0xcb, 0xab, 0x8a, 0xbf, 0xf7, 0xa2, 0xff, 0x6f, 0xf1,
|
||||
0xeb, 0x27, 0xd7, 0xba, 0x7d, 0x72, 0xf5, 0xdb, 0x56, 0x74, 0x6d, 0x95, 0xaf, 0xde, 0xf6, 0x25,
|
||||
0x5d, 0xcb, 0x3b, 0x3b, 0xd4, 0xb9, 0xbb, 0x43, 0xe1, 0x0b, 0x38, 0x32, 0xbb, 0xf1, 0x94, 0x89,
|
||||
0x24, 0x5d, 0x5f, 0xb0, 0x04, 0x65, 0x95, 0x72, 0xaf, 0x4e, 0xf9, 0x09, 0xf4, 0x17, 0x26, 0x30,
|
||||
0xd6, 0x47, 0xa4, 0xdf, 0x33, 0x5d, 0xb6, 0xb7, 0xd8, 0x25, 0x87, 0x7f, 0x37, 0xa0, 0xe7, 0x24,
|
||||
0x36, 0xdc, 0xfd, 0xbb, 0xba, 0x21, 0xf4, 0x97, 0xbb, 0x43, 0x2b, 0x24, 0xae, 0x61, 0xe4, 0x11,
|
||||
0x0c, 0x6a, 0xd3, 0x1a, 0xc2, 0xfc, 0xca, 0x1c, 0x97, 0x74, 0xad, 0x83, 0x8a, 0x35, 0x94, 0x8a,
|
||||
0x66, 0xca, 0x08, 0xdf, 0x8e, 0xfa, 0x0e, 0x9c, 0x6b, 0xcc, 0x10, 0xef, 0x82, 0x50, 0xac, 0x8c,
|
||||
0xf8, 0xed, 0x08, 0x1c, 0xf4, 0xad, 0x58, 0x91, 0x0f, 0xc1, 0x4f, 0xb7, 0x28, 0xac, 0x2e, 0x1d,
|
||||
0xa3, 0xcb, 0x81, 0x06, 0x8c, 0x2a, 0x0f, 0x00, 0x96, 0x49, 0x2a, 0x9d, 0x6a, 0x5d, 0xe3, 0xf5,
|
||||
0x0d, 0x62, 0xdc, 0x1f, 0xc0, 0x81, 0xfe, 0x0b, 0xca, 0xd2, 0x57, 0x32, 0x38, 0xb0, 0xb4, 0x89,
|
||||
0x9c, 0x47, 0xe9, 0x2b, 0xa9, 0x5d, 0x1c, 0x79, 0x2c, 0xd9, 0x6b, 0x0c, 0x7c, 0xeb, 0xe2, 0xc8,
|
||||
0xe7, 0xec, 0x35, 0x92, 0x17, 0x70, 0x5c, 0x61, 0x34, 0xde, 0x52, 0xb5, 0x91, 0x01, 0x98, 0x3b,
|
||||
0x7e, 0xf4, 0xa6, 0x3b, 0xae, 0x68, 0x15, 0x1d, 0xee, 0xb8, 0x9f, 0xe9, 0xdc, 0xf0, 0xd7, 0x46,
|
||||
0xf5, 0x09, 0xfc, 0x0f, 0x12, 0xbc, 0xcd, 0x81, 0xdd, 0x7e, 0xac, 0x5b, 0x6f, 0xf3, 0x58, 0x93,
|
||||
0x2f, 0xa1, 0x2d, 0x15, 0x55, 0x68, 0x84, 0x18, 0x9e, 0x3f, 0xdc, 0x9b, 0x6e, 0xc6, 0x98, 0xeb,
|
||||
0xb0, 0xc8, 0x46, 0x93, 0x31, 0x1c, 0xd9, 0xe2, 0x15, 0xc6, 0xec, 0x4b, 0x35, 0x34, 0x78, 0xc9,
|
||||
0xc5, 0xd3, 0x27, 0x3f, 0x7d, 0xbe, 0x66, 0x6a, 0x93, 0x2f, 0xf4, 0xc7, 0xce, 0xec, 0xd7, 0x3f,
|
||||
0x63, 0xa9, 0xfb, 0x75, 0xc6, 0x84, 0xc2, 0x4c, 0xd0, 0xe4, 0xcc, 0x14, 0x3c, 0xd3, 0x04, 0x6f,
|
||||
0x17, 0x8b, 0x8e, 0xb1, 0x9e, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x3a, 0xd8, 0x74, 0xb2,
|
||||
0x08, 0x00, 0x00,
|
||||
// 642 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x41, 0x6f, 0xd3, 0x4c,
|
||||
0x10, 0x95, 0x93, 0x34, 0xa9, 0x27, 0x69, 0xda, 0xee, 0xf7, 0x81, 0xac, 0xaa, 0x40, 0x6a, 0x51,
|
||||
0x88, 0x84, 0x48, 0x44, 0x8b, 0xb8, 0x71, 0x80, 0x5a, 0x95, 0x22, 0x44, 0x55, 0xdc, 0x8a, 0x03,
|
||||
0x17, 0x6b, 0x63, 0x4f, 0x92, 0x95, 0xbc, 0xeb, 0xe0, 0x5d, 0x57, 0xcd, 0x8d, 0x33, 0x3f, 0x81,
|
||||
0x1b, 0xbf, 0x8f, 0xbf, 0xc0, 0x01, 0x79, 0xd7, 0x76, 0x92, 0x36, 0x1c, 0xb9, 0xf9, 0xbd, 0xd9,
|
||||
0xf1, 0xbe, 0x79, 0xf3, 0x16, 0x76, 0x51, 0x85, 0x51, 0xc0, 0x51, 0xd1, 0xc1, 0x3c, 0x4d, 0x54,
|
||||
0x42, 0xf6, 0x39, 0x8b, 0x6f, 0x32, 0x69, 0xd0, 0x20, 0xaf, 0x1e, 0x74, 0xc2, 0x84, 0xf3, 0x44,
|
||||
0x18, 0xea, 0xa0, 0x23, 0xc3, 0x19, 0xf2, 0xe2, 0xb8, 0xfb, 0xc3, 0x02, 0xb8, 0x46, 0x41, 0x85,
|
||||
0xfa, 0x88, 0x8a, 0x92, 0x2e, 0xd4, 0x46, 0x9e, 0x63, 0xf5, 0xac, 0x7e, 0xdd, 0xaf, 0x8d, 0x3c,
|
||||
0xf2, 0x0c, 0x76, 0x45, 0xc6, 0x83, 0xaf, 0x19, 0xa6, 0x8b, 0x40, 0x24, 0x11, 0x4a, 0xa7, 0xa6,
|
||||
0x8b, 0x3b, 0x22, 0xe3, 0x9f, 0x72, 0xf6, 0x22, 0x27, 0xc9, 0x0b, 0xd8, 0x67, 0x42, 0x62, 0xaa,
|
||||
0x82, 0x70, 0x46, 0x85, 0xc0, 0x78, 0xe4, 0x49, 0xa7, 0xde, 0xab, 0xf7, 0x6d, 0x7f, 0xcf, 0x14,
|
||||
0xce, 0x2a, 0x9e, 0x3c, 0x87, 0x5d, 0xf3, 0xc3, 0xea, 0xac, 0xd3, 0xe8, 0x59, 0x7d, 0xdb, 0xef,
|
||||
0x6a, 0xba, 0x3a, 0xe9, 0x7e, 0xb3, 0xc0, 0xbe, 0x4c, 0x93, 0xdb, 0xc5, 0x46, 0x6d, 0x6f, 0xa0,
|
||||
0x45, 0xa3, 0x28, 0x45, 0x69, 0x34, 0xb5, 0x4f, 0x0e, 0x07, 0x6b, 0xb3, 0x17, 0x53, 0xbf, 0x33,
|
||||
0x67, 0xfc, 0xf2, 0x70, 0xae, 0x35, 0x45, 0x99, 0xc5, 0x9b, 0xb4, 0x9a, 0xc2, 0x52, 0xab, 0xfb,
|
||||
0xdd, 0x02, 0x7b, 0x24, 0x22, 0xbc, 0x1d, 0x89, 0x49, 0x42, 0x1e, 0x01, 0xb0, 0x1c, 0x04, 0x82,
|
||||
0x72, 0xd4, 0x52, 0x6c, 0xdf, 0xd6, 0xcc, 0x05, 0xe5, 0x48, 0x1c, 0x68, 0x69, 0x30, 0xf2, 0x0a,
|
||||
0x97, 0x4a, 0x48, 0x3c, 0xe8, 0x98, 0xc6, 0x39, 0x4d, 0x29, 0x37, 0xd7, 0xb5, 0x4f, 0x8e, 0x36,
|
||||
0x0a, 0xfe, 0x80, 0x8b, 0xcf, 0x34, 0xce, 0xf0, 0x92, 0xb2, 0xd4, 0x6f, 0xeb, 0xb6, 0x4b, 0xdd,
|
||||
0xe5, 0x7a, 0xd0, 0x3d, 0x67, 0x18, 0x47, 0x4b, 0x41, 0x0e, 0xb4, 0x26, 0x2c, 0xc6, 0xa8, 0x32,
|
||||
0xa6, 0x84, 0x7f, 0xd7, 0xe2, 0xfe, 0xae, 0x41, 0xf7, 0x2c, 0x89, 0x63, 0x0c, 0x15, 0x4b, 0x84,
|
||||
0xfe, 0xcd, 0x5d, 0x6b, 0xdf, 0x42, 0xd3, 0xa4, 0xa4, 0x70, 0xf6, 0x78, 0x5d, 0x68, 0x91, 0xa0,
|
||||
0xe5, 0x4f, 0xae, 0x34, 0xe1, 0x17, 0x4d, 0xe4, 0x09, 0xb4, 0xc3, 0x14, 0xa9, 0xc2, 0x40, 0x31,
|
||||
0x8e, 0x4e, 0xbd, 0x67, 0xf5, 0x1b, 0x3e, 0x18, 0xea, 0x9a, 0x71, 0x24, 0x2e, 0x74, 0xe6, 0x34,
|
||||
0x55, 0x4c, 0x0b, 0xf0, 0xa4, 0xd3, 0xe8, 0xd5, 0xfb, 0x75, 0x7f, 0x8d, 0x23, 0x4f, 0x61, 0xc7,
|
||||
0xe0, 0x44, 0xe4, 0xe6, 0x4a, 0x67, 0x4b, 0xaf, 0x68, 0x9d, 0x24, 0xe7, 0xb0, 0x33, 0xc9, 0x2d,
|
||||
0x09, 0xf4, 0x74, 0x28, 0x9d, 0xe6, 0x26, 0x67, 0xf3, 0x67, 0x30, 0x58, 0xb7, 0xce, 0xef, 0x4c,
|
||||
0x2a, 0x8c, 0x92, 0x9c, 0xc0, 0x83, 0x1b, 0x96, 0xaa, 0x8c, 0xc6, 0x65, 0x2a, 0xf4, 0x8e, 0xa5,
|
||||
0xd3, 0xd2, 0xb7, 0xfe, 0x57, 0x14, 0x8b, 0x64, 0x98, 0xbb, 0x5f, 0xc3, 0xc3, 0xf9, 0x6c, 0x21,
|
||||
0x59, 0x78, 0xaf, 0x69, 0x5b, 0x37, 0xfd, 0x5f, 0x56, 0x57, 0xbb, 0xdc, 0x9f, 0x16, 0xec, 0x5d,
|
||||
0xe1, 0x94, 0xa3, 0x50, 0xcb, 0x3d, 0x1e, 0x82, 0x2d, 0x0b, 0xae, 0xdc, 0xc3, 0x92, 0x30, 0x5b,
|
||||
0xce, 0xc5, 0x56, 0xbb, 0x2c, 0xe0, 0xea, 0x96, 0xeb, 0xeb, 0x89, 0x73, 0xa0, 0x35, 0xce, 0x98,
|
||||
0xee, 0x69, 0x98, 0x4a, 0x01, 0xc9, 0x11, 0x74, 0x50, 0xd0, 0x71, 0x8c, 0xc6, 0x33, 0x67, 0xab,
|
||||
0x67, 0xf5, 0xb7, 0xfd, 0xb6, 0xe1, 0xb4, 0x24, 0xf7, 0x97, 0xb5, 0x1a, 0x91, 0x8d, 0xaf, 0xef,
|
||||
0x5f, 0x47, 0xe4, 0x31, 0x40, 0x65, 0x40, 0x19, 0x90, 0x15, 0x86, 0x1c, 0x43, 0xb7, 0x8a, 0x4b,
|
||||
0xa0, 0xe8, 0xf4, 0x4e, 0x3e, 0x58, 0x22, 0xae, 0xe9, 0x54, 0xde, 0x4b, 0x5a, 0xf3, 0x7e, 0xd2,
|
||||
0xde, 0x9f, 0x7e, 0x79, 0x35, 0x65, 0x6a, 0x96, 0x8d, 0xf3, 0x17, 0x38, 0x34, 0x63, 0xbc, 0x64,
|
||||
0x49, 0xf1, 0x35, 0x64, 0x42, 0x61, 0x2a, 0x68, 0x3c, 0xd4, 0x93, 0x0d, 0xf3, 0x2c, 0xcd, 0xc7,
|
||||
0xe3, 0xa6, 0x46, 0xa7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc7, 0x82, 0x43, 0x81, 0x05,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -98,8 +98,6 @@ service RootCoord {
|
|||
rpc UpdateChannelTimeTick(internal.ChannelTimeTickMsg) returns (common.Status) {}
|
||||
rpc ReleaseDQLMessageStream(proxy.ReleaseDQLMessageStreamRequest) returns (common.Status) {}
|
||||
rpc SegmentFlushCompleted(data.SegmentFlushCompletedMsg) returns (common.Status) {}
|
||||
rpc AddNewSegment(data.SegmentMsg) returns (common.Status) {}
|
||||
|
||||
}
|
||||
|
||||
message AllocTimestampRequest {
|
||||
|
|
|
@ -243,56 +243,55 @@ func init() {
|
|||
func init() { proto.RegisterFile("root_coord.proto", fileDescriptor_4513485a144f6b06) }
|
||||
|
||||
var fileDescriptor_4513485a144f6b06 = []byte{
|
||||
// 783 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x5b, 0x4f, 0xdb, 0x48,
|
||||
0x14, 0xc7, 0x49, 0x60, 0x59, 0x71, 0x48, 0x02, 0x1a, 0x01, 0x8b, 0xb2, 0xac, 0xc4, 0x66, 0xb5,
|
||||
0x90, 0x70, 0x71, 0x10, 0x48, 0x55, 0x5f, 0x21, 0x51, 0x21, 0x52, 0x53, 0x15, 0x07, 0xa4, 0xde,
|
||||
0x50, 0x34, 0x71, 0x8e, 0x12, 0x0b, 0xdb, 0x63, 0x3c, 0x93, 0x42, 0x1f, 0xfb, 0x85, 0xfa, 0x19,
|
||||
0x2b, 0x5f, 0xc6, 0xb1, 0x1d, 0x3b, 0x18, 0xb5, 0x6f, 0x19, 0xcf, 0x6f, 0xfe, 0xff, 0x39, 0x97,
|
||||
0x68, 0x0e, 0xac, 0x3b, 0x8c, 0x89, 0xbe, 0xc6, 0x98, 0x33, 0x54, 0x6c, 0x87, 0x09, 0x46, 0xb6,
|
||||
0x4c, 0xdd, 0xf8, 0x3a, 0xe1, 0xfe, 0x4a, 0x71, 0xb7, 0xbd, 0xdd, 0x6a, 0x49, 0x63, 0xa6, 0xc9,
|
||||
0x2c, 0xff, 0x7b, 0xb5, 0x14, 0xa5, 0xaa, 0x15, 0xdd, 0x12, 0xe8, 0x58, 0xd4, 0x08, 0xd6, 0xab,
|
||||
0xb6, 0xc3, 0x9e, 0xbe, 0x05, 0x8b, 0xf5, 0x21, 0x15, 0x34, 0x6a, 0x51, 0xeb, 0xc3, 0xe6, 0xb9,
|
||||
0x61, 0x30, 0xed, 0x46, 0x37, 0x91, 0x0b, 0x6a, 0xda, 0x2a, 0x3e, 0x4c, 0x90, 0x0b, 0x72, 0x02,
|
||||
0x4b, 0x03, 0xca, 0x71, 0xbb, 0xb0, 0x5b, 0xa8, 0xaf, 0x9e, 0xee, 0x28, 0xb1, 0xab, 0x04, 0xfe,
|
||||
0x5d, 0x3e, 0xba, 0xa0, 0x1c, 0x55, 0x8f, 0x24, 0x1b, 0xf0, 0x87, 0xc6, 0x26, 0x96, 0xd8, 0x5e,
|
||||
0xdc, 0x2d, 0xd4, 0xcb, 0xaa, 0xbf, 0xa8, 0x7d, 0x2f, 0xc0, 0x56, 0xd2, 0x81, 0xdb, 0xcc, 0xe2,
|
||||
0x48, 0xce, 0x60, 0x99, 0x0b, 0x2a, 0x26, 0x3c, 0x30, 0xf9, 0x3b, 0xd5, 0xa4, 0xe7, 0x21, 0x6a,
|
||||
0x80, 0x92, 0x1d, 0x58, 0x11, 0x52, 0x69, 0xbb, 0xb8, 0x5b, 0xa8, 0x2f, 0xa9, 0xd3, 0x0f, 0x19,
|
||||
0x77, 0xf8, 0x00, 0x15, 0xef, 0x0a, 0x9d, 0xf6, 0x6f, 0x88, 0xae, 0x18, 0x55, 0x36, 0x60, 0x2d,
|
||||
0x54, 0xfe, 0x95, 0xa8, 0x2a, 0x50, 0xec, 0xb4, 0x3d, 0xe9, 0x45, 0xb5, 0xd8, 0x69, 0xa7, 0xc7,
|
||||
0x71, 0xfa, 0x83, 0xc0, 0x8a, 0xca, 0x98, 0x68, 0xb9, 0x05, 0x24, 0x36, 0x90, 0x4b, 0x14, 0x2d,
|
||||
0x66, 0xda, 0xcc, 0x42, 0x4b, 0xb8, 0x8a, 0xc8, 0xc9, 0x49, 0xdc, 0x2e, 0xec, 0x86, 0x59, 0x34,
|
||||
0xc8, 0x45, 0x75, 0x2f, 0xe3, 0x44, 0x02, 0xaf, 0x2d, 0x10, 0xd3, 0x73, 0x74, 0x0b, 0x79, 0xa3,
|
||||
0x6b, 0xf7, 0xad, 0x31, 0xb5, 0x2c, 0x34, 0xe6, 0x39, 0x26, 0x50, 0xe9, 0xf8, 0x5f, 0xfc, 0x44,
|
||||
0xb0, 0xe8, 0x09, 0x47, 0xb7, 0x46, 0x32, 0x8f, 0xb5, 0x05, 0xf2, 0x00, 0x1b, 0x97, 0xe8, 0xb9,
|
||||
0xeb, 0x5c, 0xe8, 0x1a, 0x97, 0x86, 0xa7, 0xd9, 0x86, 0x33, 0xf0, 0x0b, 0x2d, 0xfb, 0xb0, 0xde,
|
||||
0x72, 0x90, 0x0a, 0x6c, 0x31, 0xc3, 0x40, 0x4d, 0xe8, 0xcc, 0x22, 0x47, 0xa9, 0x47, 0x93, 0x98,
|
||||
0x34, 0x9a, 0x57, 0xee, 0xda, 0x02, 0xf9, 0x0c, 0x95, 0xb6, 0xc3, 0xec, 0x88, 0xfc, 0x41, 0xaa,
|
||||
0x7c, 0x1c, 0xca, 0x29, 0xde, 0x87, 0xf2, 0x15, 0xe5, 0x11, 0xed, 0x46, 0xaa, 0x76, 0x8c, 0x91,
|
||||
0xd2, 0xff, 0xa6, 0xa2, 0x17, 0x8c, 0x19, 0x91, 0xf4, 0x3c, 0x02, 0x69, 0x23, 0xd7, 0x1c, 0x7d,
|
||||
0x10, 0x4d, 0x90, 0x92, 0x1e, 0xc1, 0x0c, 0x28, 0xad, 0x9a, 0xb9, 0xf9, 0xd0, 0xd8, 0x82, 0xb5,
|
||||
0xde, 0x98, 0x3d, 0x4e, 0xf7, 0x38, 0x39, 0x4c, 0xaf, 0x68, 0x9c, 0x92, 0x96, 0x47, 0xf9, 0xe0,
|
||||
0xd0, 0xef, 0x0e, 0xd6, 0xfc, 0x02, 0xbf, 0xa7, 0x8e, 0xd0, 0xbd, 0x28, 0x0f, 0xe7, 0xb4, 0x41,
|
||||
0x48, 0xe5, 0x2c, 0xd4, 0x47, 0x28, 0xbb, 0x05, 0x9e, 0x8a, 0x37, 0x32, 0x9b, 0xe0, 0xa5, 0xd2,
|
||||
0x77, 0x50, 0xba, 0xa2, 0x7c, 0xaa, 0x5c, 0xcf, 0x6a, 0x81, 0x19, 0xe1, 0x5c, 0x1d, 0x70, 0x0f,
|
||||
0x15, 0x37, 0x6b, 0xe1, 0x61, 0x9e, 0xd1, 0xbf, 0x71, 0x48, 0x5a, 0x1c, 0xe6, 0x62, 0xa3, 0x55,
|
||||
0x97, 0x5d, 0xd1, 0xc3, 0x91, 0x89, 0x96, 0xc8, 0xa8, 0x42, 0x82, 0x9a, 0x5f, 0xf5, 0x19, 0x38,
|
||||
0xf4, 0x43, 0x28, 0xb9, 0x77, 0x09, 0x36, 0x78, 0x46, 0xee, 0xa2, 0x88, 0x74, 0x6a, 0xe4, 0x20,
|
||||
0x43, 0x9b, 0x5b, 0x58, 0xf5, 0xdb, 0xa6, 0x63, 0x0d, 0xf1, 0x89, 0xec, 0xcf, 0x69, 0x2c, 0x8f,
|
||||
0xc8, 0x59, 0xf9, 0x31, 0x94, 0x65, 0x68, 0xbe, 0x70, 0x63, 0x6e, 0xf8, 0x31, 0xe9, 0x83, 0x3c,
|
||||
0x68, 0x18, 0xc0, 0x35, 0xac, 0xb8, 0xad, 0xe9, 0xbb, 0xfc, 0x9f, 0xd9, 0xba, 0x2f, 0xb9, 0xfc,
|
||||
0x43, 0xf0, 0x44, 0x87, 0x53, 0x02, 0x39, 0x56, 0xd2, 0xa7, 0x1f, 0x25, 0x75, 0x5e, 0xa9, 0x2a,
|
||||
0x79, 0xf1, 0x30, 0x8a, 0x2f, 0xf0, 0x67, 0xf0, 0x76, 0x93, 0xbd, 0xb9, 0x87, 0xc3, 0xb1, 0xa1,
|
||||
0xba, 0xff, 0x2c, 0x17, 0xaa, 0x53, 0xd8, 0xbc, 0xb5, 0x87, 0xee, 0x13, 0xe1, 0x3f, 0x44, 0xf2,
|
||||
0x29, 0x4c, 0x56, 0x65, 0xfa, 0xdc, 0xc6, 0xb9, 0x2e, 0x1f, 0x3d, 0x97, 0x33, 0x03, 0xfe, 0x52,
|
||||
0xd1, 0x40, 0xca, 0xb1, 0x7d, 0xfd, 0xb6, 0x8b, 0x9c, 0xd3, 0x11, 0xf6, 0x84, 0x83, 0xd4, 0x4c,
|
||||
0x3e, 0x91, 0xfe, 0x0c, 0x98, 0x01, 0xe7, 0xac, 0x90, 0x06, 0x9b, 0x41, 0x2f, 0xbf, 0x31, 0x26,
|
||||
0x7c, 0xec, 0x4e, 0x07, 0x06, 0x0a, 0x1c, 0x26, 0xff, 0x92, 0xee, 0x88, 0xa9, 0xa4, 0x92, 0x39,
|
||||
0x42, 0xea, 0x42, 0xf9, 0x7c, 0x38, 0x7c, 0x87, 0xf2, 0x6f, 0x43, 0xfe, 0xc9, 0x16, 0x7f, 0x5e,
|
||||
0xee, 0xe2, 0xf5, 0xa7, 0x57, 0x23, 0x5d, 0x8c, 0x27, 0x03, 0x77, 0xa7, 0xe9, 0xa3, 0xc7, 0x3a,
|
||||
0x0b, 0x7e, 0x35, 0x65, 0xee, 0x9b, 0xde, 0xe9, 0x66, 0x58, 0x4e, 0x7b, 0x30, 0x58, 0xf6, 0x3e,
|
||||
0x9d, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x08, 0x58, 0x69, 0x9b, 0x95, 0x0b, 0x00, 0x00,
|
||||
// 765 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x5b, 0x4f, 0xdb, 0x30,
|
||||
0x14, 0xc7, 0x69, 0x61, 0x4c, 0x1c, 0xda, 0xc2, 0x2c, 0x60, 0xa8, 0xe3, 0x81, 0x75, 0x1a, 0xb4,
|
||||
0x5c, 0x52, 0x04, 0xd2, 0xb4, 0xd7, 0xd1, 0x6a, 0x50, 0x69, 0x48, 0x23, 0x05, 0x69, 0x37, 0x54,
|
||||
0xb9, 0xe9, 0x51, 0x1b, 0x91, 0xc4, 0x21, 0x76, 0x07, 0x7b, 0xdc, 0x27, 0xdd, 0x57, 0x99, 0x72,
|
||||
0x73, 0x93, 0x34, 0x29, 0x41, 0xdb, 0x5b, 0x1d, 0xff, 0xfc, 0xff, 0xfb, 0x5c, 0x2a, 0x1f, 0x58,
|
||||
0x75, 0x18, 0x13, 0x3d, 0x8d, 0x31, 0x67, 0xa0, 0xd8, 0x0e, 0x13, 0x8c, 0x6c, 0x98, 0xba, 0xf1,
|
||||
0x73, 0xcc, 0xfd, 0x95, 0xe2, 0x6e, 0x7b, 0xbb, 0xd5, 0x92, 0xc6, 0x4c, 0x93, 0x59, 0xfe, 0xf7,
|
||||
0x6a, 0x29, 0x4a, 0x55, 0x2b, 0xba, 0x25, 0xd0, 0xb1, 0xa8, 0x11, 0xac, 0x97, 0x6d, 0x87, 0x3d,
|
||||
0xfc, 0x0a, 0x16, 0xab, 0x03, 0x2a, 0x68, 0xd4, 0xa2, 0xd6, 0x83, 0xf5, 0x0f, 0x86, 0xc1, 0xb4,
|
||||
0x2b, 0xdd, 0x44, 0x2e, 0xa8, 0x69, 0xab, 0x78, 0x37, 0x46, 0x2e, 0xc8, 0x11, 0x2c, 0xf4, 0x29,
|
||||
0xc7, 0xcd, 0xc2, 0x76, 0xa1, 0xbe, 0x7c, 0xbc, 0xa5, 0xc4, 0xae, 0x12, 0xf8, 0x5f, 0xf0, 0xe1,
|
||||
0x29, 0xe5, 0xa8, 0x7a, 0x24, 0x59, 0x83, 0x67, 0x1a, 0x1b, 0x5b, 0x62, 0x73, 0x7e, 0xbb, 0x50,
|
||||
0x2f, 0xab, 0xfe, 0xa2, 0xf6, 0xbb, 0x00, 0x1b, 0x49, 0x07, 0x6e, 0x33, 0x8b, 0x23, 0x39, 0x81,
|
||||
0x45, 0x2e, 0xa8, 0x18, 0xf3, 0xc0, 0xe4, 0x55, 0xaa, 0x49, 0xd7, 0x43, 0xd4, 0x00, 0x25, 0x5b,
|
||||
0xb0, 0x24, 0x42, 0xa5, 0xcd, 0xe2, 0x76, 0xa1, 0xbe, 0xa0, 0x4e, 0x3e, 0x64, 0xdc, 0xe1, 0x0b,
|
||||
0x54, 0xbc, 0x2b, 0x74, 0xda, 0xff, 0x21, 0xba, 0x62, 0x54, 0xd9, 0x80, 0x15, 0xa9, 0xfc, 0x2f,
|
||||
0x51, 0x55, 0xa0, 0xd8, 0x69, 0x7b, 0xd2, 0xf3, 0x6a, 0xb1, 0xd3, 0x4e, 0x8f, 0xe3, 0xf8, 0xcf,
|
||||
0x0b, 0x58, 0x52, 0x19, 0x13, 0x2d, 0xb7, 0x80, 0xc4, 0x06, 0x72, 0x86, 0xa2, 0xc5, 0x4c, 0x9b,
|
||||
0x59, 0x68, 0x09, 0x57, 0x11, 0x39, 0x39, 0x8a, 0xdb, 0xc9, 0x6e, 0x98, 0x46, 0x83, 0x5c, 0x54,
|
||||
0x77, 0x32, 0x4e, 0x24, 0xf0, 0xda, 0x1c, 0x31, 0x3d, 0x47, 0xb7, 0x90, 0x57, 0xba, 0x76, 0xdb,
|
||||
0x1a, 0x51, 0xcb, 0x42, 0x63, 0x96, 0x63, 0x02, 0x0d, 0x1d, 0xdf, 0xc4, 0x4f, 0x04, 0x8b, 0xae,
|
||||
0x70, 0x74, 0x6b, 0x18, 0xe6, 0xb1, 0x36, 0x47, 0xee, 0x60, 0xed, 0x0c, 0x3d, 0x77, 0x9d, 0x0b,
|
||||
0x5d, 0xe3, 0xa1, 0xe1, 0x71, 0xb6, 0xe1, 0x14, 0xfc, 0x44, 0xcb, 0x1e, 0xac, 0xb6, 0x1c, 0xa4,
|
||||
0x02, 0x5b, 0xcc, 0x30, 0x50, 0x13, 0x3a, 0xb3, 0xc8, 0x41, 0xea, 0xd1, 0x24, 0x16, 0x1a, 0xcd,
|
||||
0x2a, 0x77, 0x6d, 0x8e, 0x7c, 0x87, 0x4a, 0xdb, 0x61, 0x76, 0x44, 0x7e, 0x2f, 0x55, 0x3e, 0x0e,
|
||||
0xe5, 0x14, 0xef, 0x41, 0xf9, 0x9c, 0xf2, 0x88, 0x76, 0x23, 0x55, 0x3b, 0xc6, 0x84, 0xd2, 0xaf,
|
||||
0x53, 0xd1, 0x53, 0xc6, 0x8c, 0x48, 0x7a, 0xee, 0x81, 0xb4, 0x91, 0x6b, 0x8e, 0xde, 0x8f, 0x26,
|
||||
0x48, 0x49, 0x8f, 0x60, 0x0a, 0x0c, 0xad, 0x9a, 0xb9, 0x79, 0x69, 0x6c, 0xc1, 0x4a, 0x77, 0xc4,
|
||||
0xee, 0x27, 0x7b, 0x9c, 0xec, 0xa7, 0x57, 0x34, 0x4e, 0x85, 0x96, 0x07, 0xf9, 0x60, 0xe9, 0x77,
|
||||
0x03, 0x2b, 0x7e, 0x81, 0x3f, 0x53, 0x47, 0xe8, 0x5e, 0x94, 0xfb, 0x33, 0xda, 0x40, 0x52, 0x39,
|
||||
0x0b, 0xf5, 0x15, 0xca, 0x6e, 0x81, 0x27, 0xe2, 0x8d, 0xcc, 0x26, 0x78, 0xaa, 0xf4, 0x0d, 0x94,
|
||||
0xce, 0x29, 0x9f, 0x28, 0xd7, 0xb3, 0x5a, 0x60, 0x4a, 0x38, 0x57, 0x07, 0xdc, 0x42, 0xc5, 0xcd,
|
||||
0x9a, 0x3c, 0xcc, 0x33, 0xfa, 0x37, 0x0e, 0x85, 0x16, 0xfb, 0xb9, 0xd8, 0x68, 0xd5, 0xc3, 0xae,
|
||||
0xe8, 0xe2, 0xd0, 0x44, 0x4b, 0x64, 0x54, 0x21, 0x41, 0xcd, 0xae, 0xfa, 0x14, 0x2c, 0xfd, 0x10,
|
||||
0x4a, 0xee, 0x5d, 0x82, 0x0d, 0x9e, 0x91, 0xbb, 0x28, 0x12, 0x3a, 0x35, 0x72, 0x90, 0xd2, 0xe6,
|
||||
0x1a, 0x96, 0xfd, 0xb6, 0xe9, 0x58, 0x03, 0x7c, 0x20, 0xbb, 0x33, 0x1a, 0xcb, 0x23, 0x72, 0x56,
|
||||
0x7e, 0x04, 0xe5, 0x30, 0x34, 0x5f, 0xb8, 0x31, 0x33, 0xfc, 0x98, 0xf4, 0x5e, 0x1e, 0x54, 0x06,
|
||||
0x70, 0x09, 0x4b, 0x6e, 0x6b, 0xfa, 0x2e, 0x6f, 0x33, 0x5b, 0xf7, 0x29, 0x97, 0xbf, 0x0b, 0x9e,
|
||||
0x68, 0x39, 0x25, 0x90, 0x43, 0x25, 0x7d, 0xfa, 0x51, 0x52, 0xe7, 0x95, 0xaa, 0x92, 0x17, 0x97,
|
||||
0x51, 0xfc, 0x80, 0xe7, 0xc1, 0xdb, 0x4d, 0x76, 0x66, 0x1e, 0x96, 0x63, 0x43, 0x75, 0xf7, 0x51,
|
||||
0x4e, 0xaa, 0x53, 0x58, 0xbf, 0xb6, 0x07, 0xee, 0x13, 0xe1, 0x3f, 0x44, 0xe1, 0x53, 0x98, 0xac,
|
||||
0xca, 0xe4, 0xb9, 0x8d, 0x73, 0x17, 0x7c, 0xf8, 0x58, 0xce, 0x0c, 0x78, 0xa9, 0xa2, 0x81, 0x94,
|
||||
0x63, 0xfb, 0xf2, 0xd3, 0x05, 0x72, 0x4e, 0x87, 0xd8, 0x15, 0x0e, 0x52, 0x33, 0xf9, 0x44, 0xfa,
|
||||
0x33, 0x60, 0x06, 0x9c, 0xb3, 0x42, 0x1a, 0xac, 0x07, 0xbd, 0xfc, 0xd1, 0x18, 0xf3, 0x91, 0x3b,
|
||||
0x1d, 0x18, 0x28, 0x70, 0x90, 0xfc, 0x4b, 0xba, 0x23, 0xa6, 0x92, 0x4a, 0x3e, 0x1e, 0xd2, 0xe9,
|
||||
0xfb, 0x6f, 0xef, 0x86, 0xba, 0x18, 0x8d, 0xfb, 0xee, 0x4e, 0xd3, 0x47, 0x0f, 0x75, 0x16, 0xfc,
|
||||
0x6a, 0x86, 0xc9, 0x6a, 0x7a, 0xa7, 0x9b, 0x32, 0xff, 0x76, 0xbf, 0xbf, 0xe8, 0x7d, 0x3a, 0xf9,
|
||||
0x1b, 0x00, 0x00, 0xff, 0xff, 0x39, 0x40, 0xa4, 0x9c, 0x46, 0x0b, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -375,7 +374,6 @@ type RootCoordClient interface {
|
|||
UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlushCompletedMsg, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
AddNewSegment(ctx context.Context, in *datapb.SegmentMsg, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type rootCoordClient struct {
|
||||
|
@ -584,15 +582,6 @@ func (c *rootCoordClient) SegmentFlushCompleted(ctx context.Context, in *datapb.
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *rootCoordClient) AddNewSegment(ctx context.Context, in *datapb.SegmentMsg, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
out := new(commonpb.Status)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.rootcoord.RootCoord/AddNewSegment", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// RootCoordServer is the server API for RootCoord service.
|
||||
type RootCoordServer interface {
|
||||
GetComponentStates(context.Context, *internalpb.GetComponentStatesRequest) (*internalpb.ComponentStates, error)
|
||||
|
@ -663,7 +652,6 @@ type RootCoordServer interface {
|
|||
UpdateChannelTimeTick(context.Context, *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error)
|
||||
ReleaseDQLMessageStream(context.Context, *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error)
|
||||
SegmentFlushCompleted(context.Context, *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error)
|
||||
AddNewSegment(context.Context, *datapb.SegmentMsg) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
// UnimplementedRootCoordServer can be embedded to have forward compatible implementations.
|
||||
|
@ -736,9 +724,6 @@ func (*UnimplementedRootCoordServer) ReleaseDQLMessageStream(ctx context.Context
|
|||
func (*UnimplementedRootCoordServer) SegmentFlushCompleted(ctx context.Context, req *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SegmentFlushCompleted not implemented")
|
||||
}
|
||||
func (*UnimplementedRootCoordServer) AddNewSegment(ctx context.Context, req *datapb.SegmentMsg) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AddNewSegment not implemented")
|
||||
}
|
||||
|
||||
func RegisterRootCoordServer(s *grpc.Server, srv RootCoordServer) {
|
||||
s.RegisterService(&_RootCoord_serviceDesc, srv)
|
||||
|
@ -1140,24 +1125,6 @@ func _RootCoord_SegmentFlushCompleted_Handler(srv interface{}, ctx context.Conte
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _RootCoord_AddNewSegment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(datapb.SegmentMsg)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(RootCoordServer).AddNewSegment(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.rootcoord.RootCoord/AddNewSegment",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(RootCoordServer).AddNewSegment(ctx, req.(*datapb.SegmentMsg))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _RootCoord_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "milvus.proto.rootcoord.RootCoord",
|
||||
HandlerType: (*RootCoordServer)(nil),
|
||||
|
@ -1250,10 +1217,6 @@ var _RootCoord_serviceDesc = grpc.ServiceDesc{
|
|||
MethodName: "SegmentFlushCompleted",
|
||||
Handler: _RootCoord_SegmentFlushCompleted_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AddNewSegment",
|
||||
Handler: _RootCoord_AddNewSegment_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "root_coord.proto",
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/kv"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
@ -35,17 +34,11 @@ const (
|
|||
TenantMetaPrefix = ComponentPrefix + "/tenant"
|
||||
ProxyMetaPrefix = ComponentPrefix + "/proxy"
|
||||
CollectionMetaPrefix = ComponentPrefix + "/collection"
|
||||
PartitionMetaPrefix = ComponentPrefix + "/partition"
|
||||
SegmentIndexMetaPrefix = ComponentPrefix + "/segment-index"
|
||||
IndexMetaPrefix = ComponentPrefix + "/index"
|
||||
|
||||
TimestampPrefix = ComponentPrefix + "/timestamp"
|
||||
|
||||
SegInfoMsgStartPosPrefix = ComponentPrefix + "/seg-info-msg-start-position"
|
||||
SegInfoMsgEndPosPrefix = ComponentPrefix + "/seg-info-msg-end-position"
|
||||
FlushedSegMsgStartPosPrefix = ComponentPrefix + "/flushed-seg-msg-start-position"
|
||||
FlushedSegMsgEndPosPrefix = ComponentPrefix + "/flushed-seg-msg-end-position"
|
||||
|
||||
DDOperationPrefix = ComponentPrefix + "/dd-operation"
|
||||
DDMsgSendPrefix = ComponentPrefix + "/dd-msg-send"
|
||||
|
||||
|
@ -56,18 +49,13 @@ const (
|
|||
)
|
||||
|
||||
type metaTable struct {
|
||||
client kv.SnapShotKV // client of a reliable kv service, i.e. etcd client
|
||||
tenantID2Meta map[typeutil.UniqueID]pb.TenantMeta // tenant id to tenant meta
|
||||
proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
|
||||
collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection_id -> meta
|
||||
collName2ID map[string]typeutil.UniqueID // collection name to collection id
|
||||
partitionID2Meta map[typeutil.UniqueID]pb.PartitionInfo // collection_id/partition_id -> meta
|
||||
segID2IndexMeta map[typeutil.UniqueID]*map[typeutil.UniqueID]pb.SegmentIndexInfo // collection_id/index_id/partition_id/segment_id -> meta
|
||||
indexID2Meta map[typeutil.UniqueID]pb.IndexInfo // collection_id/index_id -> meta
|
||||
segID2CollID map[typeutil.UniqueID]typeutil.UniqueID // segment id -> collection id
|
||||
segID2PartitionID map[typeutil.UniqueID]typeutil.UniqueID // segment id -> partition id
|
||||
flushedSegID map[typeutil.UniqueID]bool // flushed segment id
|
||||
partitionID2CollID map[typeutil.UniqueID]typeutil.UniqueID // partition id -> collection id
|
||||
client kv.SnapShotKV // client of a reliable kv service, i.e. etcd client
|
||||
tenantID2Meta map[typeutil.UniqueID]pb.TenantMeta // tenant id to tenant meta
|
||||
proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
|
||||
collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection_id -> meta
|
||||
collName2ID map[string]typeutil.UniqueID // collection name to collection id
|
||||
segID2IndexMeta map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo // collection_id/index_id/partition_id/segment_id -> meta
|
||||
indexID2Meta map[typeutil.UniqueID]pb.IndexInfo // collection_id/index_id -> meta
|
||||
|
||||
tenantLock sync.RWMutex
|
||||
proxyLock sync.RWMutex
|
||||
|
@ -94,13 +82,8 @@ func (mt *metaTable) reloadFromKV() error {
|
|||
mt.proxyID2Meta = make(map[typeutil.UniqueID]pb.ProxyMeta)
|
||||
mt.collID2Meta = make(map[typeutil.UniqueID]pb.CollectionInfo)
|
||||
mt.collName2ID = make(map[string]typeutil.UniqueID)
|
||||
mt.partitionID2Meta = make(map[typeutil.UniqueID]pb.PartitionInfo)
|
||||
mt.segID2IndexMeta = make(map[typeutil.UniqueID]*map[typeutil.UniqueID]pb.SegmentIndexInfo)
|
||||
mt.segID2IndexMeta = make(map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo)
|
||||
mt.indexID2Meta = make(map[typeutil.UniqueID]pb.IndexInfo)
|
||||
mt.partitionID2CollID = make(map[typeutil.UniqueID]typeutil.UniqueID)
|
||||
mt.segID2CollID = make(map[typeutil.UniqueID]typeutil.UniqueID)
|
||||
mt.segID2PartitionID = make(map[typeutil.UniqueID]typeutil.UniqueID)
|
||||
mt.flushedSegID = make(map[typeutil.UniqueID]bool)
|
||||
|
||||
_, values, err := mt.client.LoadWithPrefix(TenantMetaPrefix, 0)
|
||||
if err != nil {
|
||||
|
@ -143,32 +126,6 @@ func (mt *metaTable) reloadFromKV() error {
|
|||
}
|
||||
mt.collID2Meta[collInfo.ID] = collInfo
|
||||
mt.collName2ID[collInfo.Schema.Name] = collInfo.ID
|
||||
for _, partID := range collInfo.PartitionIDs {
|
||||
mt.partitionID2CollID[partID] = collInfo.ID
|
||||
}
|
||||
}
|
||||
|
||||
_, values, err = mt.client.LoadWithPrefix(PartitionMetaPrefix, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, value := range values {
|
||||
partitionInfo := pb.PartitionInfo{}
|
||||
err = proto.UnmarshalText(value, &partitionInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("RootCoord UnmarshalText pb.PartitionInfo err:%w", err)
|
||||
}
|
||||
collID, ok := mt.partitionID2CollID[partitionInfo.PartitionID]
|
||||
if !ok {
|
||||
log.Warn("partition does not belong to any collection", zap.Int64("partition id", partitionInfo.PartitionID))
|
||||
continue
|
||||
}
|
||||
mt.partitionID2Meta[partitionInfo.PartitionID] = partitionInfo
|
||||
for _, segID := range partitionInfo.SegmentIDs {
|
||||
mt.segID2CollID[segID] = collID
|
||||
mt.segID2PartitionID[segID] = partitionInfo.PartitionID
|
||||
mt.flushedSegID[segID] = true
|
||||
}
|
||||
}
|
||||
|
||||
_, values, err = mt.client.LoadWithPrefix(SegmentIndexMetaPrefix, 0)
|
||||
|
@ -183,11 +140,11 @@ func (mt *metaTable) reloadFromKV() error {
|
|||
}
|
||||
idx, ok := mt.segID2IndexMeta[segmentIndexInfo.SegmentID]
|
||||
if ok {
|
||||
(*idx)[segmentIndexInfo.IndexID] = segmentIndexInfo
|
||||
idx[segmentIndexInfo.IndexID] = segmentIndexInfo
|
||||
} else {
|
||||
meta := make(map[typeutil.UniqueID]pb.SegmentIndexInfo)
|
||||
meta[segmentIndexInfo.IndexID] = segmentIndexInfo
|
||||
mt.segID2IndexMeta[segmentIndexInfo.SegmentID] = &meta
|
||||
mt.segID2IndexMeta[segmentIndexInfo.SegmentID] = meta
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,14 +208,11 @@ func (mt *metaTable) AddProxy(po *pb.ProxyMeta) (typeutil.Timestamp, error) {
|
|||
return ts, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, part *pb.PartitionInfo, idx []*pb.IndexInfo, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error) {
|
||||
func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, partID typeutil.UniqueID, partName string, idx []*pb.IndexInfo, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
if len(part.SegmentIDs) != 0 {
|
||||
return 0, errors.New("segment should be empty when creating collection")
|
||||
}
|
||||
if len(coll.PartitionIDs) != 0 {
|
||||
if len(coll.PartitionIDs) != 0 || len(coll.PartitonNames) != 0 {
|
||||
return 0, errors.New("partitions should be empty when creating collection")
|
||||
}
|
||||
if _, ok := mt.collName2ID[coll.Schema.Name]; ok {
|
||||
|
@ -267,24 +221,18 @@ func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, part *pb.PartitionIn
|
|||
if len(coll.FieldIndexes) != len(idx) {
|
||||
return 0, fmt.Errorf("incorrect index id when creating collection")
|
||||
}
|
||||
if _, ok := mt.partitionID2Meta[part.PartitionID]; ok {
|
||||
return 0, fmt.Errorf("partition id = %d exist", part.PartitionID)
|
||||
}
|
||||
|
||||
coll.PartitionIDs = append(coll.PartitionIDs, part.PartitionID)
|
||||
coll.PartitionIDs = append(coll.PartitionIDs, partID)
|
||||
coll.PartitonNames = append(coll.PartitonNames, partName)
|
||||
mt.collID2Meta[coll.ID] = *coll
|
||||
mt.collName2ID[coll.Schema.Name] = coll.ID
|
||||
mt.partitionID2Meta[part.PartitionID] = *part
|
||||
mt.partitionID2CollID[part.PartitionID] = coll.ID
|
||||
for _, i := range idx {
|
||||
mt.indexID2Meta[i.IndexID] = *i
|
||||
}
|
||||
|
||||
k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.ID)
|
||||
v1 := proto.MarshalTextString(coll)
|
||||
k2 := fmt.Sprintf("%s/%d/%d", PartitionMetaPrefix, coll.ID, part.PartitionID)
|
||||
v2 := proto.MarshalTextString(part)
|
||||
meta := map[string]string{k1: v1, k2: v2}
|
||||
meta := map[string]string{k1: v1}
|
||||
|
||||
for _, i := range idx {
|
||||
k := fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, coll.ID, i.IndexID)
|
||||
|
@ -303,7 +251,7 @@ func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, part *pb.PartitionIn
|
|||
return ts, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error) {
|
||||
func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID, segIDs []typeutil.UniqueID, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
|
@ -314,24 +262,13 @@ func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID, ddOpStr func(ts
|
|||
|
||||
delete(mt.collID2Meta, collID)
|
||||
delete(mt.collName2ID, collMeta.Schema.Name)
|
||||
for _, partID := range collMeta.PartitionIDs {
|
||||
partMeta, ok := mt.partitionID2Meta[partID]
|
||||
for _, segID := range segIDs {
|
||||
_, ok := mt.segID2IndexMeta[segID]
|
||||
if !ok {
|
||||
log.Warn("partition id not exist", zap.Int64("partition id", partID))
|
||||
log.Warn("segment id not exist", zap.Int64("segment id", segID))
|
||||
continue
|
||||
}
|
||||
delete(mt.partitionID2Meta, partID)
|
||||
for _, segID := range partMeta.SegmentIDs {
|
||||
delete(mt.segID2CollID, segID)
|
||||
delete(mt.segID2PartitionID, segID)
|
||||
delete(mt.flushedSegID, segID)
|
||||
_, ok := mt.segID2IndexMeta[segID]
|
||||
if !ok {
|
||||
log.Warn("segment id not exist", zap.Int64("segment id", segID))
|
||||
continue
|
||||
}
|
||||
delete(mt.segID2IndexMeta, segID)
|
||||
}
|
||||
delete(mt.segID2IndexMeta, segID)
|
||||
}
|
||||
for _, idxInfo := range collMeta.FieldIndexes {
|
||||
_, ok := mt.indexID2Meta[idxInfo.IndexID]
|
||||
|
@ -344,7 +281,6 @@ func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID, ddOpStr func(ts
|
|||
|
||||
delMetakeys := []string{
|
||||
fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID),
|
||||
fmt.Sprintf("%s/%d", PartitionMetaPrefix, collID),
|
||||
fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collID),
|
||||
fmt.Sprintf("%s/%d", IndexMetaPrefix, collID),
|
||||
}
|
||||
|
@ -432,22 +368,6 @@ func (mt *metaTable) GetCollectionByName(collectionName string, ts typeutil.Time
|
|||
return nil, fmt.Errorf("can't find collection: %s, at timestamp = %d", collectionName, ts)
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetCollectionBySegmentID(segID typeutil.UniqueID) (*pb.CollectionInfo, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
vid, ok := mt.segID2CollID[segID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("segment id %d not belong to any collection", segID)
|
||||
}
|
||||
col, ok := mt.collID2Meta[vid]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't find collection id: %d", vid)
|
||||
}
|
||||
colCopy := proto.Clone(&col)
|
||||
return colCopy.(*pb.CollectionInfo), nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) ListCollections(ts typeutil.Timestamp) (map[string]typeutil.UniqueID, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
@ -511,34 +431,27 @@ func (mt *metaTable) AddPartition(collID typeutil.UniqueID, partitionName string
|
|||
if int64(len(coll.PartitionIDs)) >= Params.MaxPartitionNum {
|
||||
return 0, fmt.Errorf("maximum partition's number should be limit to %d", Params.MaxPartitionNum)
|
||||
}
|
||||
for _, t := range coll.PartitionIDs {
|
||||
part, ok := mt.partitionID2Meta[t]
|
||||
if !ok {
|
||||
log.Warn("partition id not exist", zap.Int64("partition id", t))
|
||||
continue
|
||||
}
|
||||
if part.PartitionName == partitionName {
|
||||
return 0, fmt.Errorf("partition name = %s already exists", partitionName)
|
||||
}
|
||||
if part.PartitionID == partitionID {
|
||||
|
||||
if len(coll.PartitionIDs) != len(coll.PartitonNames) {
|
||||
return 0, fmt.Errorf("len(coll.PartitionIDs)=%d, len(coll.PartitonNames)=%d", len(coll.PartitionIDs), len(coll.PartitonNames))
|
||||
}
|
||||
|
||||
for idx := range coll.PartitionIDs {
|
||||
if coll.PartitionIDs[idx] == partitionID {
|
||||
return 0, fmt.Errorf("partition id = %d already exists", partitionID)
|
||||
}
|
||||
}
|
||||
partMeta := pb.PartitionInfo{
|
||||
PartitionName: partitionName,
|
||||
PartitionID: partitionID,
|
||||
SegmentIDs: make([]typeutil.UniqueID, 0, 16),
|
||||
if coll.PartitonNames[idx] == partitionName {
|
||||
return 0, fmt.Errorf("partition name = %s already exists", partitionName)
|
||||
}
|
||||
|
||||
}
|
||||
coll.PartitionIDs = append(coll.PartitionIDs, partitionID)
|
||||
mt.partitionID2Meta[partitionID] = partMeta
|
||||
coll.PartitonNames = append(coll.PartitonNames, partitionName)
|
||||
mt.collID2Meta[collID] = coll
|
||||
mt.partitionID2CollID[partitionID] = collID
|
||||
|
||||
k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
||||
v1 := proto.MarshalTextString(&coll)
|
||||
k2 := fmt.Sprintf("%s/%d/%d", PartitionMetaPrefix, collID, partitionID)
|
||||
v2 := proto.MarshalTextString(&partMeta)
|
||||
meta := map[string]string{k1: v1, k2: v2}
|
||||
meta := map[string]string{k1: v1}
|
||||
|
||||
// save ddOpStr into etcd
|
||||
addition := mt.getAdditionKV(ddOpStr, meta)
|
||||
|
@ -551,51 +464,71 @@ func (mt *metaTable) AddPartition(collID typeutil.UniqueID, partitionName string
|
|||
return ts, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) getPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (pb.PartitionInfo, error) {
|
||||
func (mt *metaTable) GetPartitionNameByID(collID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) (string, error) {
|
||||
if ts == 0 {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return pb.PartitionInfo{}, fmt.Errorf("can't find collection id = %d", collID)
|
||||
return "", fmt.Errorf("can't find collection id = %d", collID)
|
||||
}
|
||||
for _, id := range collMeta.PartitionIDs {
|
||||
partMeta, ok := mt.partitionID2Meta[id]
|
||||
if ok && partMeta.PartitionName == partitionName {
|
||||
return partMeta, nil
|
||||
for idx := range collMeta.PartitionIDs {
|
||||
if collMeta.PartitionIDs[idx] == partitionID {
|
||||
return collMeta.PartitonNames[idx], nil
|
||||
}
|
||||
}
|
||||
return pb.PartitionInfo{}, fmt.Errorf("partition %s does not exist", partitionName)
|
||||
return "", fmt.Errorf("partition %d does not exist", partitionID)
|
||||
}
|
||||
collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
||||
collVal, err := mt.client.Load(collKey, ts)
|
||||
if err != nil {
|
||||
return pb.PartitionInfo{}, err
|
||||
return "", err
|
||||
}
|
||||
collMeta := pb.CollectionMeta{}
|
||||
collMeta := pb.CollectionInfo{}
|
||||
err = proto.UnmarshalText(collVal, &collMeta)
|
||||
if err != nil {
|
||||
return pb.PartitionInfo{}, err
|
||||
return "", err
|
||||
}
|
||||
for _, id := range collMeta.PartitionIDs {
|
||||
partKey := fmt.Sprintf("%s/%d/%d", PartitionMetaPrefix, collID, id)
|
||||
partVal, err := mt.client.Load(partKey, ts)
|
||||
if err != nil {
|
||||
log.Debug("load partition meta failed", zap.String("collection name", collMeta.Schema.Name), zap.Int64("partition id", id))
|
||||
continue
|
||||
}
|
||||
partMeta := pb.PartitionInfo{}
|
||||
err = proto.UnmarshalText(partVal, &partMeta)
|
||||
if err != nil {
|
||||
log.Debug("unmarshal partition meta failed", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
if partMeta.PartitionName == partitionName {
|
||||
return partMeta, nil
|
||||
for idx := range collMeta.PartitionIDs {
|
||||
if collMeta.PartitionIDs[idx] == partitionID {
|
||||
return collMeta.PartitonNames[idx], nil
|
||||
}
|
||||
}
|
||||
return pb.PartitionInfo{}, fmt.Errorf("partition %s does not exist", partitionName)
|
||||
return "", fmt.Errorf("partition %d does not exist", partitionID)
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (pb.PartitionInfo, error) {
|
||||
func (mt *metaTable) getPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) {
|
||||
if ts == 0 {
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("can't find collection id = %d", collID)
|
||||
}
|
||||
for idx := range collMeta.PartitionIDs {
|
||||
if collMeta.PartitonNames[idx] == partitionName {
|
||||
return collMeta.PartitionIDs[idx], nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("partition %s does not exist", partitionName)
|
||||
}
|
||||
collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
||||
collVal, err := mt.client.Load(collKey, ts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
collMeta := pb.CollectionInfo{}
|
||||
err = proto.UnmarshalText(collVal, &collMeta)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for idx := range collMeta.PartitionIDs {
|
||||
if collMeta.PartitonNames[idx] == partitionName {
|
||||
return collMeta.PartitionIDs[idx], nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("partition %s does not exist", partitionName)
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
return mt.getPartitionByName(collID, partitionName, ts)
|
||||
|
@ -609,7 +542,7 @@ func (mt *metaTable) HasPartition(collID typeutil.UniqueID, partitionName string
|
|||
}
|
||||
|
||||
//return timestamp, partitionid, error
|
||||
func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName string, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, typeutil.UniqueID, error) {
|
||||
func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName string, segIDs []typeutil.UniqueID, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, typeutil.UniqueID, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
|
@ -626,30 +559,25 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
|
|||
exist := false
|
||||
|
||||
pd := make([]typeutil.UniqueID, 0, len(collMeta.PartitionIDs))
|
||||
var partMeta pb.PartitionInfo
|
||||
for _, t := range collMeta.PartitionIDs {
|
||||
pm, ok := mt.partitionID2Meta[t]
|
||||
if ok {
|
||||
if pm.PartitionName != partitionName {
|
||||
pd = append(pd, pm.PartitionID)
|
||||
} else {
|
||||
partMeta = pm
|
||||
exist = true
|
||||
}
|
||||
pn := make([]string, 0, len(collMeta.PartitonNames))
|
||||
var partID typeutil.UniqueID
|
||||
for idx := range collMeta.PartitionIDs {
|
||||
if collMeta.PartitonNames[idx] == partitionName {
|
||||
partID = collMeta.PartitionIDs[idx]
|
||||
exist = true
|
||||
} else {
|
||||
pd = append(pd, collMeta.PartitionIDs[idx])
|
||||
pn = append(pn, collMeta.PartitonNames[idx])
|
||||
}
|
||||
}
|
||||
if !exist {
|
||||
return 0, 0, fmt.Errorf("partition %s does not exist", partitionName)
|
||||
}
|
||||
delete(mt.partitionID2Meta, partMeta.PartitionID)
|
||||
collMeta.PartitionIDs = pd
|
||||
collMeta.PartitonNames = pn
|
||||
mt.collID2Meta[collID] = collMeta
|
||||
|
||||
for _, segID := range partMeta.SegmentIDs {
|
||||
delete(mt.segID2CollID, segID)
|
||||
delete(mt.segID2PartitionID, segID)
|
||||
delete(mt.flushedSegID, segID)
|
||||
|
||||
for _, segID := range segIDs {
|
||||
_, ok := mt.segID2IndexMeta[segID]
|
||||
if !ok {
|
||||
log.Warn("segment has no index meta", zap.Int64("segment id", segID))
|
||||
|
@ -658,11 +586,9 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
|
|||
delete(mt.segID2IndexMeta, segID)
|
||||
}
|
||||
meta := map[string]string{path.Join(CollectionMetaPrefix, strconv.FormatInt(collID, 10)): proto.MarshalTextString(&collMeta)}
|
||||
delMetaKeys := []string{
|
||||
fmt.Sprintf("%s/%d/%d", PartitionMetaPrefix, collMeta.ID, partMeta.PartitionID),
|
||||
}
|
||||
delMetaKeys := []string{}
|
||||
for _, idxInfo := range collMeta.FieldIndexes {
|
||||
k := fmt.Sprintf("%s/%d/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, idxInfo.IndexID, partMeta.PartitionID)
|
||||
k := fmt.Sprintf("%s/%d/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, idxInfo.IndexID, partID)
|
||||
delMetaKeys = append(delMetaKeys, k)
|
||||
}
|
||||
|
||||
|
@ -674,152 +600,50 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
|
|||
_ = mt.reloadFromKV()
|
||||
return 0, 0, err
|
||||
}
|
||||
return ts, partMeta.PartitionID, nil
|
||||
return ts, partID, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetPartitionByID(collID typeutil.UniqueID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) (pb.PartitionInfo, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
if ts == 0 {
|
||||
partMeta, ok := mt.partitionID2Meta[partitionID]
|
||||
if !ok {
|
||||
return pb.PartitionInfo{}, fmt.Errorf("partition id = %d not exist", partitionID)
|
||||
}
|
||||
return partMeta, nil
|
||||
}
|
||||
partKey := fmt.Sprintf("%s/%d/%d", PartitionMetaPrefix, collID, partitionID)
|
||||
partVal, err := mt.client.Load(partKey, ts)
|
||||
if err != nil {
|
||||
return pb.PartitionInfo{}, err
|
||||
}
|
||||
partInfo := pb.PartitionInfo{}
|
||||
err = proto.UnmarshalText(partVal, &partInfo)
|
||||
if err != nil {
|
||||
return pb.PartitionInfo{}, err
|
||||
}
|
||||
return partInfo, nil
|
||||
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddSegment(segInfos []*datapb.SegmentInfo, msgStartPos string, msgEndPos string) (typeutil.Timestamp, error) {
|
||||
func (mt *metaTable) AddIndex(segIdxInfo *pb.SegmentIndexInfo, collID, partID typeutil.UniqueID) (typeutil.Timestamp, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
meta := make(map[string]string)
|
||||
for _, segInfo := range segInfos {
|
||||
collMeta, ok := mt.collID2Meta[segInfo.CollectionID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("can't find collection id = %d", segInfo.CollectionID)
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("collection id = %d not found", collID)
|
||||
}
|
||||
exist := false
|
||||
for _, fidx := range collMeta.FieldIndexes {
|
||||
if fidx.IndexID == segIdxInfo.IndexID {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
partMeta, ok := mt.partitionID2Meta[segInfo.PartitionID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("can't find partition id = %d", segInfo.PartitionID)
|
||||
}
|
||||
exist := false
|
||||
for _, partID := range collMeta.PartitionIDs {
|
||||
if partID == segInfo.PartitionID {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exist {
|
||||
return 0, fmt.Errorf("partition id = %d, not belong to collection id = %d", segInfo.PartitionID, segInfo.CollectionID)
|
||||
}
|
||||
exist = false
|
||||
for _, segID := range partMeta.SegmentIDs {
|
||||
if segID == segInfo.ID {
|
||||
exist = true
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
return 0, fmt.Errorf("segment id = %d exist", segInfo.ID)
|
||||
}
|
||||
partMeta.SegmentIDs = append(partMeta.SegmentIDs, segInfo.ID)
|
||||
mt.partitionID2Meta[segInfo.PartitionID] = partMeta
|
||||
mt.segID2CollID[segInfo.ID] = segInfo.CollectionID
|
||||
mt.segID2PartitionID[segInfo.ID] = segInfo.PartitionID
|
||||
|
||||
k := fmt.Sprintf("%s/%d/%d", PartitionMetaPrefix, segInfo.CollectionID, segInfo.PartitionID)
|
||||
v := proto.MarshalTextString(&partMeta)
|
||||
meta[k] = v
|
||||
}
|
||||
if !exist {
|
||||
return 0, fmt.Errorf("index id = %d not found", segIdxInfo.IndexID)
|
||||
}
|
||||
|
||||
// AddSegment is invoked from DataCoord
|
||||
if msgStartPos != "" && msgEndPos != "" {
|
||||
meta[SegInfoMsgStartPosPrefix] = msgStartPos
|
||||
meta[SegInfoMsgEndPosPrefix] = msgEndPos
|
||||
}
|
||||
|
||||
ts, err := mt.client.MultiSave(meta, nil)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return 0, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddIndex(segIdxInfos []*pb.SegmentIndexInfo, msgStartPos string, msgEndPos string) (typeutil.Timestamp, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
meta := make(map[string]string)
|
||||
|
||||
for _, segIdxInfo := range segIdxInfos {
|
||||
collID, ok := mt.segID2CollID[segIdxInfo.SegmentID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("segment id = %d not belong to any collection", segIdxInfo.SegmentID)
|
||||
}
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("collection id = %d not found", collID)
|
||||
}
|
||||
partID, ok := mt.segID2PartitionID[segIdxInfo.SegmentID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("segment id = %d not belong to any partition", segIdxInfo.SegmentID)
|
||||
}
|
||||
exist := false
|
||||
for _, fidx := range collMeta.FieldIndexes {
|
||||
if fidx.IndexID == segIdxInfo.IndexID {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exist {
|
||||
return 0, fmt.Errorf("index id = %d not found", segIdxInfo.IndexID)
|
||||
}
|
||||
|
||||
segIdxMap, ok := mt.segID2IndexMeta[segIdxInfo.SegmentID]
|
||||
if !ok {
|
||||
idxMap := map[typeutil.UniqueID]pb.SegmentIndexInfo{segIdxInfo.IndexID: *segIdxInfo}
|
||||
mt.segID2IndexMeta[segIdxInfo.SegmentID] = &idxMap
|
||||
} else {
|
||||
tmpInfo, ok := (*segIdxMap)[segIdxInfo.IndexID]
|
||||
if ok {
|
||||
if SegmentIndexInfoEqual(segIdxInfo, &tmpInfo) {
|
||||
segIdxMap, ok := mt.segID2IndexMeta[segIdxInfo.SegmentID]
|
||||
if !ok {
|
||||
idxMap := map[typeutil.UniqueID]pb.SegmentIndexInfo{segIdxInfo.IndexID: *segIdxInfo}
|
||||
mt.segID2IndexMeta[segIdxInfo.SegmentID] = idxMap
|
||||
} else {
|
||||
tmpInfo, ok := segIdxMap[segIdxInfo.IndexID]
|
||||
if ok {
|
||||
if SegmentIndexInfoEqual(segIdxInfo, &tmpInfo) {
|
||||
if segIdxInfo.BuildID == tmpInfo.BuildID {
|
||||
log.Debug("Identical SegmentIndexInfo already exist", zap.Int64("IndexID", segIdxInfo.IndexID))
|
||||
continue
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("index id = %d exist", segIdxInfo.IndexID)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := mt.flushedSegID[segIdxInfo.SegmentID]; !ok {
|
||||
mt.flushedSegID[segIdxInfo.SegmentID] = true
|
||||
}
|
||||
|
||||
(*(mt.segID2IndexMeta[segIdxInfo.SegmentID]))[segIdxInfo.IndexID] = *segIdxInfo
|
||||
k := fmt.Sprintf("%s/%d/%d/%d/%d", SegmentIndexMetaPrefix, collID, segIdxInfo.IndexID, partID, segIdxInfo.SegmentID)
|
||||
v := proto.MarshalTextString(segIdxInfo)
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
// AddIndex is invoked from DataNode flush operation
|
||||
if msgStartPos != "" && msgEndPos != "" {
|
||||
meta[FlushedSegMsgStartPosPrefix] = msgStartPos
|
||||
meta[FlushedSegMsgEndPosPrefix] = msgEndPos
|
||||
}
|
||||
mt.segID2IndexMeta[segIdxInfo.SegmentID][segIdxInfo.IndexID] = *segIdxInfo
|
||||
k := fmt.Sprintf("%s/%d/%d/%d/%d", SegmentIndexMetaPrefix, collID, segIdxInfo.IndexID, partID, segIdxInfo.SegmentID)
|
||||
v := proto.MarshalTextString(segIdxInfo)
|
||||
|
||||
ts, err := mt.client.MultiSave(meta, nil)
|
||||
ts, err := mt.client.Save(k, v)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return 0, err
|
||||
|
@ -829,7 +653,7 @@ func (mt *metaTable) AddIndex(segIdxInfos []*pb.SegmentIndexInfo, msgStartPos st
|
|||
}
|
||||
|
||||
//return timestamp, index id, is dropped, error
|
||||
func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.Timestamp, typeutil.UniqueID, bool, error) {
|
||||
func (mt *metaTable) DropIndex(collName, fieldName, indexName string, segIDs []typeutil.UniqueID) (typeutil.Timestamp, typeutil.UniqueID, bool, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
|
@ -876,19 +700,12 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
|
|||
|
||||
delete(mt.indexID2Meta, dropIdxID)
|
||||
|
||||
for _, partID := range collMeta.PartitionIDs {
|
||||
partMeta, ok := mt.partitionID2Meta[partID]
|
||||
if !ok {
|
||||
log.Warn("partition not exist", zap.Int64("partition id", partID))
|
||||
continue
|
||||
}
|
||||
for _, segID := range partMeta.SegmentIDs {
|
||||
segInfo, ok := mt.segID2IndexMeta[segID]
|
||||
for _, segID := range segIDs {
|
||||
segInfo, ok := mt.segID2IndexMeta[segID]
|
||||
if ok {
|
||||
_, ok := segInfo[dropIdxID]
|
||||
if ok {
|
||||
_, ok := (*segInfo)[dropIdxID]
|
||||
if ok {
|
||||
delete(*segInfo, dropIdxID)
|
||||
}
|
||||
delete(segInfo, dropIdxID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -910,11 +727,6 @@ func (mt *metaTable) GetSegmentIndexInfoByID(segID typeutil.UniqueID, filedID in
|
|||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
_, ok := mt.flushedSegID[segID]
|
||||
if !ok {
|
||||
return pb.SegmentIndexInfo{}, fmt.Errorf("segment id %d hasn't flushed, there is no index meta", segID)
|
||||
}
|
||||
|
||||
segIdxMap, ok := mt.segID2IndexMeta[segID]
|
||||
if !ok {
|
||||
return pb.SegmentIndexInfo{
|
||||
|
@ -925,19 +737,19 @@ func (mt *metaTable) GetSegmentIndexInfoByID(segID typeutil.UniqueID, filedID in
|
|||
EnableIndex: false,
|
||||
}, nil
|
||||
}
|
||||
if len(*segIdxMap) == 0 {
|
||||
if len(segIdxMap) == 0 {
|
||||
return pb.SegmentIndexInfo{}, fmt.Errorf("segment id %d not has any index", segID)
|
||||
}
|
||||
|
||||
if filedID == -1 && idxName == "" { // return default index
|
||||
for _, seg := range *segIdxMap {
|
||||
for _, seg := range segIdxMap {
|
||||
info, ok := mt.indexID2Meta[seg.IndexID]
|
||||
if ok && info.IndexName == Params.DefaultIndexName {
|
||||
return seg, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for idxID, seg := range *segIdxMap {
|
||||
for idxID, seg := range segIdxMap {
|
||||
idxMeta, ok := mt.indexID2Meta[idxID]
|
||||
if ok {
|
||||
if idxMeta.IndexName != idxName {
|
||||
|
@ -991,7 +803,7 @@ func (mt *metaTable) unlockIsSegmentIndexed(segID typeutil.UniqueID, fieldSchema
|
|||
return false
|
||||
}
|
||||
exist := false
|
||||
for idxID, meta := range *segIdx {
|
||||
for idxID, meta := range segIdx {
|
||||
if meta.FieldID != fieldSchema.FieldID {
|
||||
continue
|
||||
}
|
||||
|
@ -1008,7 +820,7 @@ func (mt *metaTable) unlockIsSegmentIndexed(segID typeutil.UniqueID, fieldSchema
|
|||
}
|
||||
|
||||
// return segment ids, type params, error
|
||||
func (mt *metaTable) GetNotIndexedSegments(collName string, fieldName string, idxInfo *pb.IndexInfo) ([]typeutil.UniqueID, schemapb.FieldSchema, error) {
|
||||
func (mt *metaTable) GetNotIndexedSegments(collName string, fieldName string, idxInfo *pb.IndexInfo, segIDs []typeutil.UniqueID) ([]typeutil.UniqueID, schemapb.FieldSchema, error) {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
|
@ -1108,14 +920,9 @@ func (mt *metaTable) GetNotIndexedSegments(collName string, fieldName string, id
|
|||
}
|
||||
|
||||
rstID := make([]typeutil.UniqueID, 0, 16)
|
||||
for _, partID := range collMeta.PartitionIDs {
|
||||
partMeta, ok := mt.partitionID2Meta[partID]
|
||||
if ok {
|
||||
for _, segID := range partMeta.SegmentIDs {
|
||||
if exist := mt.unlockIsSegmentIndexed(segID, &fieldSchema, idxInfo.IndexParams); !exist {
|
||||
rstID = append(rstID, segID)
|
||||
}
|
||||
}
|
||||
for _, segID := range segIDs {
|
||||
if exist := mt.unlockIsSegmentIndexed(segID, &fieldSchema, idxInfo.IndexParams); !exist {
|
||||
rstID = append(rstID, segID)
|
||||
}
|
||||
}
|
||||
return rstID, fieldSchema, nil
|
||||
|
@ -1157,15 +964,3 @@ func (mt *metaTable) GetIndexByID(indexID typeutil.UniqueID) (*pb.IndexInfo, err
|
|||
}
|
||||
return &indexInfo, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddFlushedSegment(segID typeutil.UniqueID) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
_, ok := mt.flushedSegID[segID]
|
||||
if ok {
|
||||
return fmt.Errorf("segment id = %d exist", segID)
|
||||
}
|
||||
mt.flushedSegID[segID] = true
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package rootcoord
|
||||
|
||||
import (
|
||||
|
@ -20,7 +19,6 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
@ -97,15 +95,6 @@ func Test_MockKV(t *testing.T) {
|
|||
_, err = NewMetaTable(k1)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
prefix[PartitionMetaPrefix] = []string{"partition-meta"}
|
||||
_, err = NewMetaTable(k1)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "RootCoord UnmarshalText pb.PartitionInfo err:line 1.0: unknown field name \"partition-meta\" in milvus.proto.etcd.PartitionInfo")
|
||||
|
||||
prefix[PartitionMetaPrefix] = []string{proto.MarshalTextString(&pb.PartitionInfo{})}
|
||||
_, err = NewMetaTable(k1)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
prefix[SegmentIndexMetaPrefix] = []string{"segment-index-meta"}
|
||||
_, err = NewMetaTable(k1)
|
||||
assert.NotNil(t, err)
|
||||
|
@ -147,9 +136,11 @@ func Test_MockKV(t *testing.T) {
|
|||
func TestMetaTable(t *testing.T) {
|
||||
const (
|
||||
collID = typeutil.UniqueID(1)
|
||||
collName = "testColl"
|
||||
collIDInvalid = typeutil.UniqueID(2)
|
||||
partIDDefault = typeutil.UniqueID(10)
|
||||
partID = typeutil.UniqueID(20)
|
||||
partName = "testPart"
|
||||
partIDInvalid = typeutil.UniqueID(21)
|
||||
segID = typeutil.UniqueID(100)
|
||||
segID2 = typeutil.UniqueID(101)
|
||||
|
@ -222,18 +213,9 @@ func TestMetaTable(t *testing.T) {
|
|||
IndexID: indexID,
|
||||
},
|
||||
},
|
||||
CreateTime: 0,
|
||||
PartitionIDs: nil,
|
||||
}
|
||||
partInfoDefault := &pb.PartitionInfo{
|
||||
PartitionName: "_default",
|
||||
PartitionID: partIDDefault,
|
||||
SegmentIDs: nil,
|
||||
}
|
||||
partInfo := &pb.PartitionInfo{
|
||||
PartitionName: "testPart",
|
||||
PartitionID: partID,
|
||||
SegmentIDs: nil,
|
||||
CreateTime: 0,
|
||||
PartitionIDs: nil,
|
||||
PartitonNames: nil,
|
||||
}
|
||||
idxInfo := []*pb.IndexInfo{
|
||||
{
|
||||
|
@ -257,20 +239,15 @@ func TestMetaTable(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("add collection", func(t *testing.T) {
|
||||
partInfoDefault.SegmentIDs = []int64{segID}
|
||||
_, err = mt.AddCollection(collInfo, partInfoDefault, idxInfo, ddOp)
|
||||
assert.NotNil(t, err)
|
||||
partInfoDefault.SegmentIDs = []int64{}
|
||||
|
||||
collInfo.PartitionIDs = []int64{segID}
|
||||
_, err = mt.AddCollection(collInfo, partInfoDefault, idxInfo, ddOp)
|
||||
_, err = mt.AddCollection(collInfo, partIDDefault, Params.DefaultPartitionName, idxInfo, ddOp)
|
||||
assert.NotNil(t, err)
|
||||
collInfo.PartitionIDs = []int64{}
|
||||
|
||||
_, err = mt.AddCollection(collInfo, partInfoDefault, nil, ddOp)
|
||||
_, err = mt.AddCollection(collInfo, partIDDefault, Params.DefaultPartitionName, nil, ddOp)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
_, err = mt.AddCollection(collInfo, partInfoDefault, idxInfo, ddOp)
|
||||
_, err = mt.AddCollection(collInfo, partIDDefault, Params.DefaultPartitionName, idxInfo, ddOp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collMeta, err := mt.GetCollectionByName("testColl", 0)
|
||||
|
@ -290,7 +267,7 @@ func TestMetaTable(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("add partition", func(t *testing.T) {
|
||||
_, err := mt.AddPartition(collID, partInfo.PartitionName, partInfo.PartitionID, ddOp)
|
||||
_, err := mt.AddPartition(collID, partName, partID, ddOp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// check DD operation flag
|
||||
|
@ -299,33 +276,6 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Equal(t, "false", flag)
|
||||
})
|
||||
|
||||
t.Run("add segment", func(t *testing.T) {
|
||||
segInfo := &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
_, err := mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
|
||||
segInfo.ID = segID2
|
||||
segInfo.CollectionID = collIDInvalid
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
|
||||
segInfo.CollectionID = collID
|
||||
segInfo.PartitionID = partIDInvalid
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
|
||||
segInfo.PartitionID = partID
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("add segment index", func(t *testing.T) {
|
||||
segIdxInfo := pb.SegmentIndexInfo{
|
||||
SegmentID: segID,
|
||||
|
@ -333,15 +283,15 @@ func TestMetaTable(t *testing.T) {
|
|||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
}
|
||||
_, err := mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err := mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// it's legal to add index twice
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err = mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segIdxInfo.BuildID = 202
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err = mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("index id = %d exist", segIdxInfo.IndexID))
|
||||
})
|
||||
|
@ -374,9 +324,9 @@ func TestMetaTable(t *testing.T) {
|
|||
IndexParams: params,
|
||||
}
|
||||
|
||||
_, _, err := mt.GetNotIndexedSegments("collTest", "field110", idxInfo)
|
||||
_, _, err := mt.GetNotIndexedSegments("collTest", "field110", idxInfo, nil)
|
||||
assert.NotNil(t, err)
|
||||
seg, field, err := mt.GetNotIndexedSegments("testColl", "field110", idxInfo)
|
||||
seg, field, err := mt.GetNotIndexedSegments("testColl", "field110", idxInfo, []typeutil.UniqueID{segID, segID2})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(seg))
|
||||
assert.Equal(t, segID2, seg[0])
|
||||
|
@ -392,7 +342,7 @@ func TestMetaTable(t *testing.T) {
|
|||
idxInfo.IndexID = 2001
|
||||
idxInfo.IndexName = "field110-1"
|
||||
|
||||
seg, field, err = mt.GetNotIndexedSegments("testColl", "field110", idxInfo)
|
||||
seg, field, err = mt.GetNotIndexedSegments("testColl", "field110", idxInfo, []typeutil.UniqueID{segID, segID2})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(seg))
|
||||
assert.Equal(t, segID, seg[0])
|
||||
|
@ -440,12 +390,12 @@ func TestMetaTable(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("drop index", func(t *testing.T) {
|
||||
_, idx, ok, err := mt.DropIndex("testColl", "field110", "field110")
|
||||
_, idx, ok, err := mt.DropIndex("testColl", "field110", "field110", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, indexID, idx)
|
||||
|
||||
_, _, ok, err = mt.DropIndex("testColl", "field110", "field110-error")
|
||||
_, _, ok, err = mt.DropIndex("testColl", "field110", "field110-error", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
|
@ -463,7 +413,7 @@ func TestMetaTable(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("drop partition", func(t *testing.T) {
|
||||
_, id, err := mt.DeletePartition(collID, partInfo.PartitionName, nil)
|
||||
_, id, err := mt.DeletePartition(collID, partName, nil, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, partID, id)
|
||||
|
||||
|
@ -474,9 +424,9 @@ func TestMetaTable(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("drop collection", func(t *testing.T) {
|
||||
_, err = mt.DeleteCollection(collIDInvalid, nil)
|
||||
_, err = mt.DeleteCollection(collIDInvalid, nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
_, err = mt.DeleteCollection(collID, nil)
|
||||
_, err = mt.DeleteCollection(collID, nil, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// check DD operation flag
|
||||
|
@ -497,7 +447,8 @@ func TestMetaTable(t *testing.T) {
|
|||
return 0, fmt.Errorf("multi save error")
|
||||
}
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err := mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err := mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "multi save error")
|
||||
})
|
||||
|
@ -510,11 +461,11 @@ func TestMetaTable(t *testing.T) {
|
|||
return 0, fmt.Errorf("milti save and remove with prefix error")
|
||||
}
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err := mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err := mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
mt.partitionID2Meta = make(map[typeutil.UniqueID]pb.PartitionInfo)
|
||||
mt.indexID2Meta = make(map[int64]pb.IndexInfo)
|
||||
_, err = mt.DeleteCollection(collInfo.ID, nil)
|
||||
_, err = mt.DeleteCollection(collInfo.ID, nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "milti save and remove with prefix error")
|
||||
})
|
||||
|
@ -525,15 +476,8 @@ func TestMetaTable(t *testing.T) {
|
|||
}
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err := mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segInfo := &datapb.SegmentInfo{
|
||||
ID: 100,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
collInfo.PartitonNames = nil
|
||||
_, err := mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
mt.collID2Meta = make(map[int64]pb.CollectionInfo)
|
||||
|
@ -541,14 +485,6 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("can't find collection: %s", collInfo.Schema.Name))
|
||||
|
||||
_, err = mt.GetCollectionBySegmentID(segInfo.ID)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("can't find collection id: %d", collInfo.ID))
|
||||
|
||||
mt.segID2CollID = make(map[int64]int64)
|
||||
_, err = mt.GetCollectionBySegmentID(segInfo.ID)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("segment id %d not belong to any collection", segInfo.ID))
|
||||
})
|
||||
|
||||
t.Run("add partition failed", func(t *testing.T) {
|
||||
|
@ -562,7 +498,8 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, err = mt.AddPartition(2, "no-part", 22, nil)
|
||||
|
@ -576,9 +513,9 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("maximum partition's number should be limit to %d", Params.MaxPartitionNum))
|
||||
|
||||
coll.PartitionIDs = []int64{partInfo.PartitionID}
|
||||
coll.PartitionIDs = []int64{partID}
|
||||
coll.PartitonNames = []string{partName}
|
||||
mt.collID2Meta[coll.ID] = coll
|
||||
mt.partitionID2Meta = make(map[int64]pb.PartitionInfo)
|
||||
mockKV.multiSave = func(kvs map[string]string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("multi save error")
|
||||
}
|
||||
|
@ -590,15 +527,16 @@ func TestMetaTable(t *testing.T) {
|
|||
return 0, nil
|
||||
}
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
_, err = mt.AddPartition(coll.ID, partInfo.PartitionName, 22, nil)
|
||||
_, err = mt.AddPartition(coll.ID, partName, 22, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("partition name = %s already exists", partInfo.PartitionName))
|
||||
assert.EqualError(t, err, fmt.Sprintf("partition name = %s already exists", partName))
|
||||
|
||||
_, err = mt.AddPartition(coll.ID, "no-part", partInfo.PartitionID, nil)
|
||||
_, err = mt.AddPartition(coll.ID, "no-part", partID, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("partition id = %d already exists", partInfo.PartitionID))
|
||||
assert.EqualError(t, err, fmt.Sprintf("partition id = %d already exists", partID))
|
||||
})
|
||||
|
||||
t.Run("has partition failed", func(t *testing.T) {
|
||||
|
@ -612,14 +550,14 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
mt.partitionID2Meta = make(map[int64]pb.PartitionInfo)
|
||||
assert.False(t, mt.HasPartition(collInfo.ID, partInfo.PartitionName, 0))
|
||||
assert.False(t, mt.HasPartition(collInfo.ID, "no-partName", 0))
|
||||
|
||||
mt.collID2Meta = make(map[int64]pb.CollectionInfo)
|
||||
assert.False(t, mt.HasPartition(collInfo.ID, partInfo.PartitionName, 0))
|
||||
assert.False(t, mt.HasPartition(collInfo.ID, partName, 0))
|
||||
})
|
||||
|
||||
t.Run("delete partition failed", func(t *testing.T) {
|
||||
|
@ -633,78 +571,29 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, Params.DefaultPartitionName, nil)
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, Params.DefaultPartitionName, nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "default partition cannot be deleted")
|
||||
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, "abc", nil)
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, "abc", nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "partition abc does not exist")
|
||||
|
||||
pm := mt.partitionID2Meta[partInfo.PartitionID]
|
||||
pm.SegmentIDs = []int64{11, 12, 13}
|
||||
mt.partitionID2Meta[pm.PartitionID] = pm
|
||||
mockKV.multiSaveAndRemoveWithPrefix = func(saves map[string]string, removals []string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("multi save and remove with prefix error")
|
||||
}
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, pm.PartitionName, nil)
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, partName, nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "multi save and remove with prefix error")
|
||||
|
||||
mt.collID2Meta = make(map[int64]pb.CollectionInfo)
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, "abc", nil)
|
||||
_, _, err = mt.DeletePartition(collInfo.ID, "abc", nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("can't find collection id = %d", collInfo.ID))
|
||||
|
||||
_, err = mt.GetPartitionByID(1, 11, 0)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "partition id = 11 not exist")
|
||||
})
|
||||
|
||||
t.Run("add segment failed", func(t *testing.T) {
|
||||
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
mockKV.multiSave = func(kvs map[string]string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, nil
|
||||
}
|
||||
err := mt.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
noPart := pb.PartitionInfo{
|
||||
PartitionName: "no-part",
|
||||
PartitionID: partInfo.PartitionID + 1,
|
||||
SegmentIDs: nil,
|
||||
}
|
||||
mt.partitionID2Meta[noPart.PartitionID] = noPart
|
||||
|
||||
segInfo := &datapb.SegmentInfo{
|
||||
ID: 100,
|
||||
CollectionID: collInfo.ID,
|
||||
PartitionID: noPart.PartitionID,
|
||||
}
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("partition id = %d, not belong to collection id = %d", segInfo.PartitionID, segInfo.CollectionID))
|
||||
|
||||
segInfo = &datapb.SegmentInfo{
|
||||
ID: 11,
|
||||
CollectionID: collInfo.ID,
|
||||
PartitionID: partInfo.PartitionID,
|
||||
}
|
||||
mockKV.multiSave = func(kvs map[string]string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("save error")
|
||||
}
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "save error")
|
||||
})
|
||||
|
||||
t.Run("add index failed", func(t *testing.T) {
|
||||
|
@ -721,15 +610,8 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segInfo := &datapb.SegmentInfo{
|
||||
ID: 100,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segIdxInfo := pb.SegmentIndexInfo{
|
||||
|
@ -738,39 +620,28 @@ func TestMetaTable(t *testing.T) {
|
|||
IndexID: indexID2,
|
||||
BuildID: buildID,
|
||||
}
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err = mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("index id = %d not found", segIdxInfo.IndexID))
|
||||
|
||||
mt.segID2PartitionID = make(map[int64]int64)
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("segment id = %d not belong to any partition", segIdxInfo.SegmentID))
|
||||
|
||||
mt.collID2Meta = make(map[int64]pb.CollectionInfo)
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err = mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("collection id = %d not found", collInfo.ID))
|
||||
|
||||
mt.segID2CollID = make(map[int64]int64)
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("segment id = %d not belong to any collection", segIdxInfo.SegmentID))
|
||||
|
||||
err = mt.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segIdxInfo.IndexID = indexID
|
||||
mockKV.multiSave = func(kvs map[string]string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
mockKV.save = func(key, value string) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("save error")
|
||||
}
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err = mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "save error")
|
||||
})
|
||||
|
@ -789,19 +660,20 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, _, _, err = mt.DropIndex("abc", "abc", "abc")
|
||||
_, _, _, err = mt.DropIndex("abc", "abc", "abc", nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "collection name = abc not exist")
|
||||
|
||||
mt.collName2ID["abc"] = 2
|
||||
_, _, _, err = mt.DropIndex("abc", "abc", "abc")
|
||||
_, _, _, err = mt.DropIndex("abc", "abc", "abc", nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "collection name = abc not has meta")
|
||||
|
||||
_, _, _, err = mt.DropIndex(collInfo.Schema.Name, "abc", "abc")
|
||||
_, _, _, err = mt.DropIndex(collInfo.Schema.Name, "abc", "abc", nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("collection %s doesn't have filed abc", collInfo.Schema.Name))
|
||||
|
||||
|
@ -818,7 +690,7 @@ func TestMetaTable(t *testing.T) {
|
|||
}
|
||||
mt.collID2Meta[coll.ID] = coll
|
||||
mt.indexID2Meta = make(map[int64]pb.IndexInfo)
|
||||
_, idxID, isDroped, err := mt.DropIndex(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0].IndexName)
|
||||
_, idxID, isDroped, err := mt.DropIndex(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0].IndexName, nil)
|
||||
assert.Zero(t, idxID)
|
||||
assert.False(t, isDroped)
|
||||
assert.Nil(t, err)
|
||||
|
@ -826,13 +698,13 @@ func TestMetaTable(t *testing.T) {
|
|||
err = mt.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
mt.partitionID2Meta = make(map[int64]pb.PartitionInfo)
|
||||
mockKV.multiSaveAndRemoveWithPrefix = func(saves map[string]string, removals []string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("multi save and remove with prefix error")
|
||||
}
|
||||
_, _, _, err = mt.DropIndex(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0].IndexName)
|
||||
_, _, _, err = mt.DropIndex(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0].IndexName, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "multi save and remove with prefix error")
|
||||
})
|
||||
|
@ -851,35 +723,23 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, err = mt.GetSegmentIndexInfoByID(segID2, fieldID, "abc")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "segment id 101 hasn't flushed, there is no index meta")
|
||||
|
||||
err = mt.AddFlushedSegment(segID2)
|
||||
assert.Nil(t, err)
|
||||
seg, err := mt.GetSegmentIndexInfoByID(segID2, fieldID, "abc")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, segID2, seg.SegmentID)
|
||||
assert.Equal(t, fieldID, seg.FieldID)
|
||||
assert.Equal(t, false, seg.EnableIndex)
|
||||
|
||||
segInfo := &datapb.SegmentInfo{
|
||||
ID: 100,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
_, err = mt.AddSegment([]*datapb.SegmentInfo{segInfo}, "", "")
|
||||
assert.Nil(t, err)
|
||||
segIdxInfo := pb.SegmentIndexInfo{
|
||||
SegmentID: segID,
|
||||
FieldID: fieldID,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
}
|
||||
_, err = mt.AddIndex([]*pb.SegmentIndexInfo{&segIdxInfo}, "", "")
|
||||
_, err = mt.AddIndex(&segIdxInfo, collID, partID)
|
||||
assert.Nil(t, err)
|
||||
idx, err := mt.GetSegmentIndexInfoByID(segIdxInfo.SegmentID, segIdxInfo.FieldID, idxInfo[0].IndexName)
|
||||
assert.Nil(t, err)
|
||||
|
@ -908,7 +768,8 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
mt.collID2Meta = make(map[int64]pb.CollectionInfo)
|
||||
|
@ -935,8 +796,6 @@ func TestMetaTable(t *testing.T) {
|
|||
}
|
||||
idxMeta := make(map[int64]pb.SegmentIndexInfo)
|
||||
idxMeta[idx.IndexID] = *idx
|
||||
mt.segID2IndexMeta = make(map[int64]*map[int64]pb.SegmentIndexInfo)
|
||||
mt.segID2IndexMeta[idx.SegmentID] = &idxMeta
|
||||
|
||||
field := schemapb.FieldSchema{
|
||||
FieldID: 31,
|
||||
|
@ -965,7 +824,7 @@ func TestMetaTable(t *testing.T) {
|
|||
}
|
||||
|
||||
mt.collName2ID["abc"] = 123
|
||||
_, _, err = mt.GetNotIndexedSegments("abc", "no-field", idx)
|
||||
_, _, err = mt.GetNotIndexedSegments("abc", "no-field", idx, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "collection abc not found")
|
||||
|
||||
|
@ -979,16 +838,17 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, "no-field", idx)
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, "no-field", idx, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("collection %s doesn't have filed no-field", collInfo.Schema.Name))
|
||||
|
||||
bakMeta := mt.indexID2Meta
|
||||
mt.indexID2Meta = make(map[int64]pb.IndexInfo)
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idx)
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idx, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("index id = %d not found", idxInfo[0].IndexID))
|
||||
mt.indexID2Meta = bakMeta
|
||||
|
@ -996,7 +856,7 @@ func TestMetaTable(t *testing.T) {
|
|||
mockKV.multiSave = func(kvs map[string]string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("multi save error")
|
||||
}
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idx)
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idx, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "multi save error")
|
||||
|
||||
|
@ -1004,7 +864,8 @@ func TestMetaTable(t *testing.T) {
|
|||
return 0, nil
|
||||
}
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
coll := mt.collID2Meta[collInfo.ID]
|
||||
coll.FieldIndexes = append(coll.FieldIndexes, &pb.FieldIndexInfo{FiledID: coll.FieldIndexes[0].FiledID, IndexID: coll.FieldIndexes[0].IndexID + 1})
|
||||
|
@ -1025,7 +886,7 @@ func TestMetaTable(t *testing.T) {
|
|||
mockKV.multiSave = func(kvs map[string]string, addition func(ts typeutil.Timestamp) (string, string, error)) (typeutil.Timestamp, error) {
|
||||
return 0, fmt.Errorf("multi save error")
|
||||
}
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idx)
|
||||
_, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idx, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "multi save error")
|
||||
})
|
||||
|
@ -1052,7 +913,8 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
collInfo.PartitionIDs = nil
|
||||
_, err = mt.AddCollection(collInfo, partInfo, idxInfo, nil)
|
||||
collInfo.PartitonNames = nil
|
||||
_, err = mt.AddCollection(collInfo, partID, partName, idxInfo, nil)
|
||||
assert.Nil(t, err)
|
||||
mt.indexID2Meta = make(map[int64]pb.IndexInfo)
|
||||
_, _, err = mt.GetIndexByName(collInfo.Schema.Name, idxInfo[0].IndexName)
|
||||
|
@ -1063,19 +925,6 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, fmt.Sprintf("cannot find index, id = %d", idxInfo[0].IndexID))
|
||||
})
|
||||
|
||||
t.Run("add flused segment failed", func(t *testing.T) {
|
||||
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
err := mt.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddFlushedSegment(222)
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddFlushedSegment(222)
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "segment id = 222 exist")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMetaWithTimestamp(t *testing.T) {
|
||||
|
@ -1108,21 +957,15 @@ func TestMetaWithTimestamp(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
partInfo := &pb.PartitionInfo{
|
||||
PartitionName: "p1",
|
||||
PartitionID: 11,
|
||||
SegmentIDs: nil,
|
||||
}
|
||||
t1, err := mt.AddCollection(collInfo, partInfo, nil, nil)
|
||||
t1, err := mt.AddCollection(collInfo, 11, "p1", nil, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collInfo.ID = 2
|
||||
collInfo.PartitionIDs = nil
|
||||
collInfo.PartitonNames = nil
|
||||
collInfo.Schema.Name = "t2"
|
||||
partInfo.PartitionID = 12
|
||||
partInfo.PartitionName = "p2"
|
||||
|
||||
t2, err := mt.AddCollection(collInfo, partInfo, nil, nil)
|
||||
t2, err := mt.AddCollection(collInfo, 12, "p2", nil, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, mt.HasCollection(1, 0))
|
||||
|
@ -1218,51 +1061,25 @@ func TestMetaWithTimestamp(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
p2, err := mt.GetPartitionByName(2, "p2", 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(11), p1.PartitionID)
|
||||
assert.Equal(t, int64(12), p2.PartitionID)
|
||||
assert.Equal(t, int64(11), p1)
|
||||
assert.Equal(t, int64(12), p2)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p1, err = mt.GetPartitionByName(1, "p1", t2)
|
||||
assert.Nil(t, err)
|
||||
p2, err = mt.GetPartitionByName(2, "p2", t2)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(11), p1.PartitionID)
|
||||
assert.Equal(t, int64(12), p2.PartitionID)
|
||||
assert.Equal(t, int64(11), p1)
|
||||
assert.Equal(t, int64(12), p2)
|
||||
|
||||
p1, err = mt.GetPartitionByName(1, "p1", t1)
|
||||
assert.Nil(t, err)
|
||||
p2, err = mt.GetPartitionByName(2, "p2", t1)
|
||||
_, err = mt.GetPartitionByName(2, "p2", t1)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, int64(11), p1.PartitionID)
|
||||
assert.Equal(t, int64(11), p1)
|
||||
|
||||
p1, err = mt.GetPartitionByName(1, "p1", tsoStart)
|
||||
_, err = mt.GetPartitionByName(1, "p1", tsoStart)
|
||||
assert.NotNil(t, err)
|
||||
p2, err = mt.GetPartitionByName(2, "p2", tsoStart)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
p1, err = mt.GetPartitionByID(1, 11, 0)
|
||||
assert.Nil(t, err)
|
||||
p2, err = mt.GetPartitionByID(2, 12, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(11), p1.PartitionID)
|
||||
assert.Equal(t, int64(12), p2.PartitionID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p1, err = mt.GetPartitionByID(1, 11, t2)
|
||||
assert.Nil(t, err)
|
||||
p2, err = mt.GetPartitionByID(2, 12, t2)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(11), p1.PartitionID)
|
||||
assert.Equal(t, int64(12), p2.PartitionID)
|
||||
|
||||
p1, err = mt.GetPartitionByID(1, 11, t1)
|
||||
assert.Nil(t, err)
|
||||
p2, err = mt.GetPartitionByID(2, 12, t1)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, int64(11), p1.PartitionID)
|
||||
|
||||
p1, err = mt.GetPartitionByID(1, 11, tsoStart)
|
||||
assert.NotNil(t, err)
|
||||
p2, err = mt.GetPartitionByID(2, 12, tsoStart)
|
||||
_, err = mt.GetPartitionByName(2, "p2", tsoStart)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
|
|
@ -29,15 +29,14 @@ type ParamTable struct {
|
|||
Address string
|
||||
Port int
|
||||
|
||||
PulsarAddress string
|
||||
RocksmqPath string
|
||||
EtcdEndpoints []string
|
||||
MetaRootPath string
|
||||
KvRootPath string
|
||||
MsgChannelSubName string
|
||||
TimeTickChannel string
|
||||
StatisticsChannel string
|
||||
DataCoordSegmentChannel string // data service create segment, or data node flush segment
|
||||
PulsarAddress string
|
||||
RocksmqPath string
|
||||
EtcdEndpoints []string
|
||||
MetaRootPath string
|
||||
KvRootPath string
|
||||
MsgChannelSubName string
|
||||
TimeTickChannel string
|
||||
StatisticsChannel string
|
||||
|
||||
MaxPartitionNum int64
|
||||
DefaultPartitionName string
|
||||
|
@ -70,7 +69,6 @@ func (p *ParamTable) Init() {
|
|||
p.initMsgChannelSubName()
|
||||
p.initTimeTickChannel()
|
||||
p.initStatisticsChannelName()
|
||||
p.initSegmentInfoChannelName()
|
||||
|
||||
p.initMaxPartitionNum()
|
||||
p.initMinSegmentSizeToEnableIndex()
|
||||
|
@ -157,14 +155,6 @@ func (p *ParamTable) initStatisticsChannelName() {
|
|||
p.StatisticsChannel = channel
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSegmentInfoChannelName() {
|
||||
channel, err := p.Load("msgChannel.chanNamePrefix.dataCoordSegmentInfo")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.DataCoordSegmentChannel = channel
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMaxPartitionNum() {
|
||||
p.MaxPartitionNum = p.ParseInt64("rootcoord.maxPartitionNum")
|
||||
}
|
||||
|
|
|
@ -101,15 +101,10 @@ type Core struct {
|
|||
//setMsgStreams, send drop partition into dd channel
|
||||
SendDdDropPartitionReq func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error
|
||||
|
||||
// if rootcoord create segment, datacoord will put segment msg into this channel
|
||||
DataCoordSegmentChan <-chan *ms.MsgPack
|
||||
|
||||
// if segment flush completed, data node would put segment msg into this channel
|
||||
DataNodeFlushedSegmentChan <-chan *ms.MsgPack
|
||||
|
||||
//get binlog file path from data service,
|
||||
CallGetBinlogFilePathsService func(ctx context.Context, segID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]string, error)
|
||||
CallGetNumRowsService func(ctx context.Context, segID typeutil.UniqueID, isFromFlushedChan bool) (int64, error)
|
||||
CallGetFlushedSegmentsService func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error)
|
||||
|
||||
//call index builder's client to build index, return build id
|
||||
CallBuildIndexService func(ctx context.Context, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo) (typeutil.UniqueID, error)
|
||||
|
@ -214,6 +209,9 @@ func (c *Core) checkInit() error {
|
|||
if c.CallDropIndexService == nil {
|
||||
return fmt.Errorf("CallDropIndexService is nil")
|
||||
}
|
||||
if c.CallGetFlushedSegmentsService == nil {
|
||||
return fmt.Errorf("CallGetFlushedSegments is nil")
|
||||
}
|
||||
if c.NewProxyClient == nil {
|
||||
return fmt.Errorf("NewProxyClient is nil")
|
||||
}
|
||||
|
@ -223,12 +221,6 @@ func (c *Core) checkInit() error {
|
|||
if c.CallReleasePartitionService == nil {
|
||||
return fmt.Errorf("CallReleasePartitionService is nil")
|
||||
}
|
||||
if c.DataCoordSegmentChan == nil {
|
||||
return fmt.Errorf("DataCoordSegmentChan is nil")
|
||||
}
|
||||
if c.DataNodeFlushedSegmentChan == nil {
|
||||
return fmt.Errorf("DataNodeFlushedSegmentChan is nil")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -248,148 +240,6 @@ func (c *Core) startTimeTickLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
// datacoord send segment info msg to rootcoord when create segment
|
||||
func (c *Core) startDataCoordSegmentLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
log.Debug("close data service segment loop")
|
||||
return
|
||||
case segMsg, ok := <-c.DataCoordSegmentChan:
|
||||
if !ok {
|
||||
log.Debug("data service segment channel is closed, exit loop")
|
||||
return
|
||||
}
|
||||
var segInfos []*datapb.SegmentInfo
|
||||
for _, msg := range segMsg.Msgs {
|
||||
if msg.Type() != commonpb.MsgType_SegmentInfo {
|
||||
continue
|
||||
}
|
||||
segInfoMsg, ok := msg.(*ms.SegmentInfoMsg)
|
||||
if !ok {
|
||||
log.Debug("input msg is not SegmentInfoMsg")
|
||||
continue
|
||||
}
|
||||
if segInfoMsg.Segment != nil {
|
||||
segInfos = append(segInfos, segInfoMsg.Segment)
|
||||
log.Debug("open segment", zap.Int64("segmentID", segInfoMsg.Segment.ID))
|
||||
}
|
||||
}
|
||||
if len(segInfos) > 0 {
|
||||
startPosStr, err := EncodeMsgPositions(segMsg.StartPositions)
|
||||
if err != nil {
|
||||
log.Error("encode msg start positions fail", zap.String("err", err.Error()))
|
||||
continue
|
||||
}
|
||||
endPosStr, err := EncodeMsgPositions(segMsg.EndPositions)
|
||||
if err != nil {
|
||||
log.Error("encode msg end positions fail", zap.String("err", err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := c.MetaTable.AddSegment(segInfos, startPosStr, endPosStr); err != nil {
|
||||
//what if rootcoord add segment failed, but datacoord success?
|
||||
log.Debug("add segment info meta table failed ", zap.String("error", err.Error()))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// data node will put msg in this channel when flush segment
|
||||
func (c *Core) startDataNodeFlushedSegmentLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
log.Debug("close segment flush completed loop")
|
||||
return
|
||||
case segMsg, ok := <-c.DataNodeFlushedSegmentChan:
|
||||
if !ok {
|
||||
log.Debug("data node segment flush completed chan has closed, exit loop")
|
||||
return
|
||||
}
|
||||
|
||||
startPosStr, err := EncodeMsgPositions(segMsg.StartPositions)
|
||||
if err != nil {
|
||||
log.Error("encode msg start positions fail", zap.String("err", err.Error()))
|
||||
continue
|
||||
}
|
||||
endPosStr, err := EncodeMsgPositions(segMsg.EndPositions)
|
||||
if err != nil {
|
||||
log.Error("encode msg end positions fail", zap.String("err", err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
var segIdxInfos []*etcdpb.SegmentIndexInfo
|
||||
for _, msg := range segMsg.Msgs {
|
||||
// check msg type
|
||||
if msg.Type() != commonpb.MsgType_SegmentFlushDone {
|
||||
continue
|
||||
}
|
||||
flushMsg, ok := msg.(*ms.FlushCompletedMsg)
|
||||
if !ok {
|
||||
log.Debug("input msg is not FlushCompletedMsg")
|
||||
continue
|
||||
}
|
||||
segID := flushMsg.Segment.GetID()
|
||||
log.Debug("flush segment", zap.Int64("id", segID))
|
||||
|
||||
coll, err := c.MetaTable.GetCollectionBySegmentID(segID)
|
||||
if err != nil {
|
||||
log.Warn("GetCollectionBySegmentID error", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
err = c.MetaTable.AddFlushedSegment(segID)
|
||||
if err != nil {
|
||||
log.Warn("AddFlushedSegment error", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if len(coll.FieldIndexes) == 0 {
|
||||
log.Debug("no index params on collection", zap.String("collection_name", coll.Schema.Name))
|
||||
}
|
||||
|
||||
for _, f := range coll.FieldIndexes {
|
||||
fieldSch, err := GetFieldSchemaByID(coll, f.FiledID)
|
||||
if err != nil {
|
||||
log.Warn("field schema not found", zap.Int64("field id", f.FiledID))
|
||||
continue
|
||||
}
|
||||
|
||||
idxInfo, err := c.MetaTable.GetIndexByID(f.IndexID)
|
||||
if err != nil {
|
||||
log.Warn("index not found", zap.Int64("index id", f.IndexID))
|
||||
continue
|
||||
}
|
||||
|
||||
info := etcdpb.SegmentIndexInfo{
|
||||
SegmentID: segID,
|
||||
FieldID: fieldSch.FieldID,
|
||||
IndexID: idxInfo.IndexID,
|
||||
EnableIndex: false,
|
||||
}
|
||||
info.BuildID, err = c.BuildIndex(c.ctx, segID, fieldSch, idxInfo, true)
|
||||
if err == nil && info.BuildID != 0 {
|
||||
info.EnableIndex = true
|
||||
} else {
|
||||
log.Error("build index fail", zap.Int64("buildid", info.BuildID), zap.Error(err))
|
||||
}
|
||||
|
||||
segIdxInfos = append(segIdxInfos, &info)
|
||||
}
|
||||
}
|
||||
|
||||
if len(segIdxInfos) > 0 {
|
||||
_, err = c.MetaTable.AddIndex(segIdxInfos, startPosStr, endPosStr)
|
||||
if err != nil {
|
||||
log.Error("AddIndex fail", zap.String("err", err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Core) tsLoop() {
|
||||
tsoTicker := time.NewTicker(tso.UpdateTimestampStep)
|
||||
defer tsoTicker.Stop()
|
||||
|
@ -435,6 +285,26 @@ func (c *Core) watchProxyLoop() {
|
|||
|
||||
}
|
||||
|
||||
func (c *Core) getSegments(ctx context.Context, collID typeutil.UniqueID) (map[typeutil.UniqueID]typeutil.UniqueID, error) {
|
||||
collMeta, err := c.MetaTable.GetCollectionByID(collID, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segID2PartID := map[typeutil.UniqueID]typeutil.UniqueID{}
|
||||
for _, partID := range collMeta.PartitionIDs {
|
||||
if seg, err := c.CallGetFlushedSegmentsService(ctx, collID, partID); err == nil {
|
||||
for _, s := range seg {
|
||||
segID2PartID[s] = partID
|
||||
}
|
||||
} else {
|
||||
log.Debug("get flushed segments from data coord failed", zap.Int64("collection_id", collID), zap.Int64("partition_id", partID), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return segID2PartID, nil
|
||||
}
|
||||
|
||||
func (c *Core) setDdMsgSendFlag(b bool) error {
|
||||
flag, err := c.MetaTable.client.Load(DDMsgSendPrefix, 0)
|
||||
if err != nil {
|
||||
|
@ -454,33 +324,6 @@ func (c *Core) setDdMsgSendFlag(b bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *Core) startMsgStreamAndSeek(chanName string, subName string, key string) (*ms.MsgStream, error) {
|
||||
stream, err := c.msFactory.NewMsgStream(c.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream.AsConsumer([]string{chanName}, subName)
|
||||
log.Debug("AsConsumer: " + chanName + ":" + subName)
|
||||
|
||||
msgPosStr, err := c.MetaTable.client.Load(key, 0)
|
||||
if err == nil {
|
||||
msgPositions := make([]*ms.MsgPosition, 0)
|
||||
if err := DecodeMsgPositions(msgPosStr, &msgPositions); err != nil {
|
||||
return nil, fmt.Errorf("decode msg positions fail, err %s", err.Error())
|
||||
}
|
||||
if len(msgPositions) > 0 {
|
||||
log.Debug("msgstream seek to position", zap.String("chanName", chanName), zap.String("SubName", subName))
|
||||
if err := stream.Seek(msgPositions); err != nil {
|
||||
return nil, fmt.Errorf("msg stream seek fail, err %s", err.Error())
|
||||
}
|
||||
log.Debug("msg stream: " + chanName + ":" + subName + " seek to stored position")
|
||||
}
|
||||
}
|
||||
stream.Start()
|
||||
log.Debug("Start Consumer", zap.String("chanName", chanName), zap.String("SubName", subName))
|
||||
return &stream, nil
|
||||
}
|
||||
|
||||
func (c *Core) setMsgStreams() error {
|
||||
if Params.PulsarAddress == "" {
|
||||
return fmt.Errorf("PulsarAddress is empty")
|
||||
|
@ -606,28 +449,6 @@ func (c *Core) setMsgStreams() error {
|
|||
return c.dmlChannels.BroadcastAll(channelNames, &msgPack)
|
||||
}
|
||||
|
||||
if Params.DataCoordSegmentChannel == "" {
|
||||
return fmt.Errorf("DataCoordSegmentChannel is empty")
|
||||
}
|
||||
|
||||
// data service will put msg into this channel when create segment
|
||||
dsChanName := Params.DataCoordSegmentChannel
|
||||
dsSubName := Params.MsgChannelSubName + "ds"
|
||||
dsStream, err := c.startMsgStreamAndSeek(dsChanName, dsSubName, SegInfoMsgEndPosPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.DataCoordSegmentChan = (*dsStream).Chan()
|
||||
|
||||
// data node will put msg into this channel when flush segment
|
||||
dnChanName := Params.DataCoordSegmentChannel
|
||||
dnSubName := Params.MsgChannelSubName + "dn"
|
||||
dnStream, err := c.startMsgStreamAndSeek(dnChanName, dnSubName, FlushedSegMsgEndPosPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.DataNodeFlushedSegmentChan = (*dnStream).Chan()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -747,6 +568,42 @@ func (c *Core) SetDataCoord(ctx context.Context, s types.DataCoord) error {
|
|||
retErr = nil
|
||||
return
|
||||
}
|
||||
|
||||
c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) (retSegIDs []typeutil.UniqueID, retErr error) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
retSegIDs = []typeutil.UniqueID{}
|
||||
retErr = fmt.Errorf("get flushed segments from data coord panic, msg = %v", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
<-initCh
|
||||
req := &datapb.GetFlushedSegmentsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: 0, //TODO,msg type
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: c.session.ServerID,
|
||||
},
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
rsp, err := s.GetFlushedSegments(ctx, req)
|
||||
if err != nil {
|
||||
retSegIDs = []typeutil.UniqueID{}
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
if rsp.Status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
retSegIDs = []typeutil.UniqueID{}
|
||||
retErr = fmt.Errorf("get flushed segments from data coord failed, reason = %s", rsp.Status.Reason)
|
||||
return
|
||||
}
|
||||
retSegIDs = rsp.Segments
|
||||
retErr = nil
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1168,8 +1025,6 @@ func (c *Core) Start() error {
|
|||
return
|
||||
}
|
||||
go c.startTimeTickLoop()
|
||||
go c.startDataCoordSegmentLoop()
|
||||
go c.startDataNodeFlushedSegmentLoop()
|
||||
go c.tsLoop()
|
||||
go c.sessionLoop()
|
||||
go c.chanTimeTick.StartWatch()
|
||||
|
@ -1925,28 +1780,19 @@ func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlus
|
|||
segID := in.Segment.GetID()
|
||||
log.Debug("flush segment", zap.Int64("id", segID))
|
||||
|
||||
coll, err := c.MetaTable.GetCollectionBySegmentID(segID)
|
||||
coll, err := c.MetaTable.GetCollectionByID(in.Segment.CollectionID, 0)
|
||||
if err != nil {
|
||||
log.Warn("GetCollectionBySegmentID error", zap.Error(err))
|
||||
log.Warn("GetCollectionByID error", zap.Error(err))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: fmt.Sprintf("GetCollectionBySegmentID error = %v", err),
|
||||
}, nil
|
||||
}
|
||||
err = c.MetaTable.AddFlushedSegment(segID)
|
||||
if err != nil {
|
||||
log.Warn("AddFlushedSegment error", zap.Error(err))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: fmt.Sprintf("AddFlushedSegment error = %v", err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if len(coll.FieldIndexes) == 0 {
|
||||
log.Debug("no index params on collection", zap.String("collection_name", coll.Schema.Name))
|
||||
}
|
||||
|
||||
var segIdxInfos []*etcdpb.SegmentIndexInfo
|
||||
for _, f := range coll.FieldIndexes {
|
||||
fieldSch, err := GetFieldSchemaByID(coll, f.FiledID)
|
||||
if err != nil {
|
||||
|
@ -1971,47 +1817,14 @@ func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlus
|
|||
info.EnableIndex = true
|
||||
} else {
|
||||
log.Error("build index fail", zap.Int64("buildid", info.BuildID), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
segIdxInfos = append(segIdxInfos, &info)
|
||||
}
|
||||
if len(segIdxInfos) > 0 {
|
||||
_, err = c.MetaTable.AddIndex(segIdxInfos, "", "")
|
||||
_, err = c.MetaTable.AddIndex(&info, in.Segment.CollectionID, in.Segment.PartitionID)
|
||||
if err != nil {
|
||||
log.Error("AddIndex fail", zap.String("err", err.Error()))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: fmt.Sprintf("AddIndex error = %v", err),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) AddNewSegment(ctx context.Context, in *datapb.SegmentMsg) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: fmt.Sprintf("state code = %s", internalpb.StateCode_name[int32(code)]),
|
||||
}, nil
|
||||
}
|
||||
if in.Base.MsgType != commonpb.MsgType_SegmentInfo {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: fmt.Sprintf("AddNewSegment with incorrect msgtype = %s", commonpb.MsgType_name[int32(in.Base.MsgType)]),
|
||||
}, nil
|
||||
}
|
||||
if _, err := c.MetaTable.AddSegment([]*datapb.SegmentInfo{in.Segment}, "", ""); err != nil {
|
||||
log.Debug("add segment info meta table failed ", zap.String("error", err.Error()))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: fmt.Sprintf("add segment info meta table failed, error = %v", err),
|
||||
}, nil
|
||||
}
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
|
|
|
@ -26,12 +26,14 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
|
@ -66,6 +68,8 @@ func (p *proxyMock) GetCollArray() []string {
|
|||
type dataMock struct {
|
||||
types.DataCoord
|
||||
randVal int
|
||||
mu sync.Mutex
|
||||
segs []typeutil.UniqueID
|
||||
}
|
||||
|
||||
func (d *dataMock) Init() error {
|
||||
|
@ -114,13 +118,18 @@ func (d *dataMock) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInf
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (d *dataMock) GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
func (d *dataMock) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedSegmentsRequest) (*datapb.GetFlushedSegmentsResponse, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
rsp := &datapb.GetFlushedSegmentsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
},
|
||||
Value: fmt.Sprintf("segment-info-channel-%d", d.randVal),
|
||||
}, nil
|
||||
}
|
||||
rsp.Segments = append(rsp.Segments, d.segs...)
|
||||
return rsp, nil
|
||||
}
|
||||
|
||||
type queryMock struct {
|
||||
|
@ -308,7 +317,6 @@ func TestRootCoord(t *testing.T) {
|
|||
Params.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.MetaRootPath)
|
||||
Params.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.KvRootPath)
|
||||
Params.MsgChannelSubName = fmt.Sprintf("subname-%d", randVal)
|
||||
Params.DataCoordSegmentChannel = fmt.Sprintf("data-service-segment-%d", randVal)
|
||||
|
||||
err = core.Register()
|
||||
assert.Nil(t, err)
|
||||
|
@ -369,32 +377,12 @@ func TestRootCoord(t *testing.T) {
|
|||
err = tmpFactory.SetParams(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
dataCoordSegmentStream, _ := tmpFactory.NewMsgStream(ctx)
|
||||
dataCoordSegmentStream.AsProducer([]string{Params.DataCoordSegmentChannel})
|
||||
|
||||
timeTickStream, _ := tmpFactory.NewMsgStream(ctx)
|
||||
timeTickStream.AsConsumer([]string{Params.TimeTickChannel}, Params.MsgChannelSubName)
|
||||
timeTickStream.Start()
|
||||
|
||||
dmlStream, _ := tmpFactory.NewMsgStream(ctx)
|
||||
|
||||
// test dataCoordSegmentStream seek
|
||||
dataNodeSubName := Params.MsgChannelSubName + "dn"
|
||||
flushedSegStream, _ := tmpFactory.NewMsgStream(ctx)
|
||||
flushedSegStream.AsConsumer([]string{Params.DataCoordSegmentChannel}, dataNodeSubName)
|
||||
flushedSegStream.Start()
|
||||
msgPackTmp := GenFlushedSegMsgPack(9999)
|
||||
err = dataCoordSegmentStream.Produce(msgPackTmp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
flushedSegMsgPack := flushedSegStream.Consume()
|
||||
flushedSegStream.Close()
|
||||
|
||||
flushedSegPosStr, _ := EncodeMsgPositions(flushedSegMsgPack.EndPositions)
|
||||
|
||||
_, err = etcdCli.Put(ctx, path.Join(Params.MetaRootPath, FlushedSegMsgEndPosPrefix), flushedSegPosStr)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = core.Init()
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -677,16 +665,16 @@ func TestRootCoord(t *testing.T) {
|
|||
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(collMeta.PartitionIDs))
|
||||
partMeta, err := core.MetaTable.GetPartitionByID(1, collMeta.PartitionIDs[1], 0)
|
||||
partNameIdx1, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[1], 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, partName, partMeta.PartitionName)
|
||||
assert.Equal(t, partName, partNameIdx1)
|
||||
|
||||
msgs := getNotTtMsg(ctx, 1, dmlStream.Chan())
|
||||
assert.Equal(t, 1, len(msgs))
|
||||
partMsg, ok := (msgs[0]).(*msgstream.CreatePartitionMsg)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, collMeta.ID, partMsg.CollectionID)
|
||||
assert.Equal(t, partMeta.PartitionID, partMsg.PartitionID)
|
||||
assert.Equal(t, collMeta.PartitionIDs[1], partMsg.PartitionID)
|
||||
|
||||
assert.Equal(t, 1, len(pnm.GetCollArray()))
|
||||
assert.Equal(t, collName, pnm.GetCollArray()[0])
|
||||
|
@ -706,7 +694,7 @@ func TestRootCoord(t *testing.T) {
|
|||
err = proto.UnmarshalText(ddOp.Body, &ddReq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collMeta.ID, ddReq.CollectionID)
|
||||
assert.Equal(t, partMeta.PartitionID, ddReq.PartitionID)
|
||||
assert.Equal(t, collMeta.PartitionIDs[1], ddReq.PartitionID)
|
||||
})
|
||||
|
||||
t.Run("has partition", func(t *testing.T) {
|
||||
|
@ -752,23 +740,9 @@ func TestRootCoord(t *testing.T) {
|
|||
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
partID := coll.PartitionIDs[1]
|
||||
part, err := core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Zero(t, len(part.SegmentIDs))
|
||||
|
||||
seg := &datapb.SegmentInfo{
|
||||
ID: 1000,
|
||||
CollectionID: coll.ID,
|
||||
PartitionID: part.PartitionID,
|
||||
}
|
||||
segInfoMsgPack := GenSegInfoMsgPack(seg)
|
||||
err = dataCoordSegmentStream.Broadcast(segInfoMsgPack)
|
||||
assert.Nil(t, err)
|
||||
time.Sleep(time.Second)
|
||||
|
||||
part, err = core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(part.SegmentIDs))
|
||||
dm.mu.Lock()
|
||||
dm.segs = []typeutil.UniqueID{1000}
|
||||
dm.mu.Unlock()
|
||||
|
||||
req := &milvuspb.ShowSegmentsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -893,28 +867,20 @@ func TestRootCoord(t *testing.T) {
|
|||
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
partID := coll.PartitionIDs[1]
|
||||
part, err := core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(part.SegmentIDs))
|
||||
|
||||
seg := &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
CollectionID: coll.ID,
|
||||
PartitionID: part.PartitionID,
|
||||
flushMsg := datapb.SegmentFlushCompletedMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SegmentFlushDone,
|
||||
},
|
||||
Segment: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
CollectionID: coll.ID,
|
||||
PartitionID: partID,
|
||||
},
|
||||
}
|
||||
segInfoMsgPack := GenSegInfoMsgPack(seg)
|
||||
err = dataCoordSegmentStream.Broadcast(segInfoMsgPack)
|
||||
st, err := core.SegmentFlushCompleted(ctx, &flushMsg)
|
||||
assert.Nil(t, err)
|
||||
time.Sleep(time.Second)
|
||||
|
||||
part, err = core.MetaTable.GetPartitionByID(1, partID, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(part.SegmentIDs))
|
||||
|
||||
flushedSegMsgPack := GenFlushedSegMsgPack(segID)
|
||||
err = dataCoordSegmentStream.Broadcast(flushedSegMsgPack)
|
||||
assert.Nil(t, err)
|
||||
time.Sleep(time.Second)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_Success)
|
||||
|
||||
req := &milvuspb.DescribeIndexRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -1031,9 +997,9 @@ func TestRootCoord(t *testing.T) {
|
|||
collMeta, err = core.MetaTable.GetCollectionByName(collName, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(collMeta.PartitionIDs))
|
||||
partMeta, err := core.MetaTable.GetPartitionByID(1, collMeta.PartitionIDs[0], 0)
|
||||
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[0], 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, Params.DefaultPartitionName, partMeta.PartitionName)
|
||||
assert.Equal(t, Params.DefaultPartitionName, partName)
|
||||
|
||||
msgs := getNotTtMsg(ctx, 1, dmlStream.Chan())
|
||||
assert.Equal(t, 1, len(msgs))
|
||||
|
@ -1834,9 +1800,6 @@ func TestRootCoord2(t *testing.T) {
|
|||
err = msFactory.SetParams(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
dataCoordSegmentStream, _ := msFactory.NewMsgStream(ctx)
|
||||
dataCoordSegmentStream.AsProducer([]string{Params.DataCoordSegmentChannel})
|
||||
|
||||
timeTickStream, _ := msFactory.NewMsgStream(ctx)
|
||||
timeTickStream.AsConsumer([]string{Params.TimeTickChannel}, Params.MsgChannelSubName)
|
||||
timeTickStream.Start()
|
||||
|
@ -1972,6 +1935,12 @@ func TestCheckInit(t *testing.T) {
|
|||
err = c.checkInit()
|
||||
assert.NotNil(t, err)
|
||||
|
||||
c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) {
|
||||
return nil, nil
|
||||
}
|
||||
err = c.checkInit()
|
||||
assert.NotNil(t, err)
|
||||
|
||||
c.CallBuildIndexService = func(ctx context.Context, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo) (typeutil.UniqueID, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
@ -2000,13 +1969,5 @@ func TestCheckInit(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
err = c.checkInit()
|
||||
assert.NotNil(t, err)
|
||||
|
||||
c.DataCoordSegmentChan = make(chan *msgstream.MsgPack)
|
||||
err = c.checkInit()
|
||||
assert.NotNil(t, err)
|
||||
|
||||
c.DataNodeFlushedSegmentChan = make(chan *msgstream.MsgPack)
|
||||
err = c.checkInit()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
|
|
@ -146,12 +146,6 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
|
|||
PhysicalChannelNames: chanNames,
|
||||
}
|
||||
|
||||
// every collection has _default partition
|
||||
partInfo := etcdpb.PartitionInfo{
|
||||
PartitionName: Params.DefaultPartitionName,
|
||||
PartitionID: partID,
|
||||
SegmentIDs: make([]typeutil.UniqueID, 0, 16),
|
||||
}
|
||||
idxInfo := make([]*etcdpb.IndexInfo, 0, 16)
|
||||
/////////////////////// ignore index param from create_collection /////////////////////////
|
||||
//for _, field := range schema.Fields {
|
||||
|
@ -206,7 +200,7 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
|
|||
PartitionName: Params.DefaultPartitionName,
|
||||
DbID: 0, //TODO, not used
|
||||
CollectionID: collInfo.ID,
|
||||
PartitionID: partInfo.PartitionID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
|
||||
// build DdOperation and save it into etcd, when ddmsg send fail,
|
||||
|
@ -217,7 +211,7 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
|
|||
return EncodeDdOperation(&ddCollReq, &ddPartReq, CreateCollectionDDType)
|
||||
}
|
||||
|
||||
ts, err := t.core.MetaTable.AddCollection(&collInfo, &partInfo, idxInfo, ddOp)
|
||||
ts, err := t.core.MetaTable.AddCollection(&collInfo, partID, Params.DefaultPartitionName, idxInfo, ddOp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -266,6 +260,10 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error {
|
|||
DbID: 0, //not used
|
||||
CollectionID: collMeta.ID,
|
||||
}
|
||||
segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, collMeta.ID, -1)
|
||||
if err != nil {
|
||||
log.Debug("Get flushed segment from data coords failed", zap.String("collection_name", t.Req.CollectionName), zap.Error(err))
|
||||
}
|
||||
|
||||
// build DdOperation and save it into etcd, when ddmsg send fail,
|
||||
// system can restore ddmsg from etcd and re-send
|
||||
|
@ -274,7 +272,7 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error {
|
|||
return EncodeDdOperation(&ddReq, nil, DropCollectionDDType)
|
||||
}
|
||||
|
||||
ts, err := t.core.MetaTable.DeleteCollection(collMeta.ID, ddOp)
|
||||
ts, err := t.core.MetaTable.DeleteCollection(collMeta.ID, segIDs, ddOp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -490,10 +488,14 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partInfo, err := t.core.MetaTable.GetPartitionByName(collInfo.ID, t.Req.PartitionName, 0)
|
||||
partID, err := t.core.MetaTable.GetPartitionByName(collInfo.ID, t.Req.PartitionName, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, collInfo.ID, partID)
|
||||
if err != nil {
|
||||
log.Debug("get flushed segments from data coord failed", zap.String("collection_name", t.Req.CollectionName), zap.String("partition_name", t.Req.PartitionName))
|
||||
}
|
||||
|
||||
ddReq := internalpb.DropPartitionRequest{
|
||||
Base: t.Req.Base,
|
||||
|
@ -502,7 +504,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
|
|||
PartitionName: t.Req.PartitionName,
|
||||
DbID: 0, //todo,not used
|
||||
CollectionID: collInfo.ID,
|
||||
PartitionID: partInfo.PartitionID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
|
||||
// build DdOperation and save it into etcd, when ddmsg send fail,
|
||||
|
@ -512,7 +514,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
|
|||
return EncodeDdOperation(&ddReq, nil, DropPartitionDDType)
|
||||
}
|
||||
|
||||
ts, _, err := t.core.MetaTable.DeletePartition(collInfo.ID, t.Req.PartitionName, ddOp)
|
||||
ts, _, err := t.core.MetaTable.DeletePartition(collInfo.ID, t.Req.PartitionName, segIDs, ddOp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -539,7 +541,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
|
|||
|
||||
//notify query service to release partition
|
||||
go func() {
|
||||
if err = t.core.CallReleasePartitionService(t.core.ctx, ts, 0, collInfo.ID, []typeutil.UniqueID{partInfo.PartitionID}); err != nil {
|
||||
if err = t.core.CallReleasePartitionService(t.core.ctx, ts, 0, collInfo.ID, []typeutil.UniqueID{partID}); err != nil {
|
||||
log.Warn("CallReleaseCollectionService failed", zap.String("error", err.Error()))
|
||||
}
|
||||
}()
|
||||
|
@ -594,14 +596,9 @@ func (t *ShowPartitionReqTask) Execute(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, partID := range coll.PartitionIDs {
|
||||
partMeta, err := t.core.MetaTable.GetPartitionByID(coll.ID, partID, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Rsp.PartitionIDs = append(t.Rsp.PartitionIDs, partMeta.PartitionID)
|
||||
t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, partMeta.PartitionName)
|
||||
}
|
||||
t.Rsp.PartitionIDs = coll.PartitionIDs
|
||||
t.Rsp.PartitionNames = coll.PartitonNames
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -624,21 +621,19 @@ func (t *DescribeSegmentReqTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
exist := false
|
||||
for _, partID := range coll.PartitionIDs {
|
||||
if exist {
|
||||
break
|
||||
}
|
||||
partMeta, err := t.core.MetaTable.GetPartitionByID(coll.ID, partID, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range partMeta.SegmentIDs {
|
||||
if e == t.Req.SegmentID {
|
||||
segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, -1)
|
||||
if err != nil {
|
||||
log.Debug("get flushed segment from data coord failed", zap.String("collection_name", coll.Schema.Name), zap.Error(err))
|
||||
exist = true
|
||||
} else {
|
||||
for _, id := range segIDs {
|
||||
if id == t.Req.SegmentID {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return fmt.Errorf("segment id %d not belong to collection id %d", t.Req.SegmentID, t.Req.CollectionID)
|
||||
}
|
||||
|
@ -683,11 +678,13 @@ func (t *ShowSegmentReqTask) Execute(ctx context.Context) error {
|
|||
if !exist {
|
||||
return fmt.Errorf("partition id = %d not belong to collection id = %d", t.Req.PartitionID, t.Req.CollectionID)
|
||||
}
|
||||
partMeta, err := t.core.MetaTable.GetPartitionByID(coll.ID, t.Req.PartitionID, 0)
|
||||
segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, t.Req.PartitionID)
|
||||
if err != nil {
|
||||
log.Debug("get flushed segments from data coord failed", zap.String("collection name", coll.Schema.Name), zap.Int64("partition id", t.Req.PartitionID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
t.Rsp.SegmentIDs = append(t.Rsp.SegmentIDs, partMeta.SegmentIDs...)
|
||||
|
||||
t.Rsp.SegmentIDs = append(t.Rsp.SegmentIDs, segIDs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -715,16 +712,29 @@ func (t *CreateIndexReqTask) Execute(ctx context.Context) error {
|
|||
IndexID: indexID,
|
||||
IndexParams: t.Req.ExtraParams,
|
||||
}
|
||||
segIDs, field, err := t.core.MetaTable.GetNotIndexedSegments(t.Req.CollectionName, t.Req.FieldName, idxInfo)
|
||||
log.Debug("RootCoord CreateIndexReqTask metaTable.GetNotIndexedSegments", zap.Error(err))
|
||||
collMeta, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
segID2PartID, err := t.core.getSegments(ctx, collMeta.ID)
|
||||
flushedSegs := make([]typeutil.UniqueID, 0, len(segID2PartID))
|
||||
for k := range segID2PartID {
|
||||
flushedSegs = append(flushedSegs, k)
|
||||
}
|
||||
if err != nil {
|
||||
log.Debug("get flushed segments from data coord failed", zap.String("collection_name", collMeta.Schema.Name), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
segIDs, field, err := t.core.MetaTable.GetNotIndexedSegments(t.Req.CollectionName, t.Req.FieldName, idxInfo, flushedSegs)
|
||||
if err != nil {
|
||||
log.Debug("RootCoord CreateIndexReqTask metaTable.GetNotIndexedSegments", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
if field.DataType != schemapb.DataType_FloatVector && field.DataType != schemapb.DataType_BinaryVector {
|
||||
return fmt.Errorf("field name = %s, data type = %s", t.Req.FieldName, schemapb.DataType_name[int32(field.DataType)])
|
||||
}
|
||||
|
||||
var segIdxInfos []*etcdpb.SegmentIndexInfo
|
||||
for _, segID := range segIDs {
|
||||
info := etcdpb.SegmentIndexInfo{
|
||||
SegmentID: segID,
|
||||
|
@ -739,12 +749,13 @@ func (t *CreateIndexReqTask) Execute(ctx context.Context) error {
|
|||
if info.BuildID != 0 {
|
||||
info.EnableIndex = true
|
||||
}
|
||||
segIdxInfos = append(segIdxInfos, &info)
|
||||
partID := segID2PartID[segID]
|
||||
if _, err := t.core.MetaTable.AddIndex(&info, collMeta.ID, partID); err != nil {
|
||||
log.Debug("Add index into meta table failed", zap.Int64("collection_id", collMeta.ID), zap.Int64("index_id", info.IndexID), zap.Int64("build_id", info.BuildID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
_, err = t.core.MetaTable.AddIndex(segIdxInfos, "", "")
|
||||
log.Debug("RootCoord CreateIndexReq", zap.Any("segIdxInfos", segIdxInfos), zap.Error(err))
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
type DescribeIndexReqTask struct {
|
||||
|
@ -795,7 +806,7 @@ func (t *DropIndexReqTask) Execute(ctx context.Context) error {
|
|||
if t.Type() != commonpb.MsgType_DropIndex {
|
||||
return fmt.Errorf("drop index, msg type = %s", commonpb.MsgType_name[int32(t.Type())])
|
||||
}
|
||||
_, info, err := t.core.MetaTable.GetIndexByName(t.Req.CollectionName, t.Req.IndexName)
|
||||
collInfo, info, err := t.core.MetaTable.GetIndexByName(t.Req.CollectionName, t.Req.IndexName)
|
||||
if err != nil {
|
||||
log.Warn("GetIndexByName failed,", zap.String("collection name", t.Req.CollectionName), zap.String("field name", t.Req.FieldName), zap.String("index name", t.Req.IndexName), zap.Error(err))
|
||||
return err
|
||||
|
@ -806,10 +817,15 @@ func (t *DropIndexReqTask) Execute(ctx context.Context) error {
|
|||
if len(info) != 1 {
|
||||
return fmt.Errorf("len(index) = %d", len(info))
|
||||
}
|
||||
segIDs, err := t.core.CallGetFlushedSegmentsService(t.ctx, collInfo.ID, -1)
|
||||
if err != nil {
|
||||
log.Debug("call get flushed segments from data coord failed", zap.String("collection_name", collInfo.Schema.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
err = t.core.CallDropIndexService(ctx, info[0].IndexID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, _, err = t.core.MetaTable.DropIndex(t.Req.CollectionName, t.Req.FieldName, t.Req.IndexName)
|
||||
_, _, _, err = t.core.MetaTable.DropIndex(t.Req.CollectionName, t.Req.FieldName, t.Req.IndexName, segIDs)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -96,7 +96,6 @@ func SegmentIndexInfoEqual(info1 *etcdpb.SegmentIndexInfo, info2 *etcdpb.Segment
|
|||
return info1.SegmentID == info2.SegmentID &&
|
||||
info1.FieldID == info2.FieldID &&
|
||||
info1.IndexID == info2.IndexID &&
|
||||
info1.BuildID == info2.BuildID &&
|
||||
info1.EnableIndex == info2.EnableIndex
|
||||
}
|
||||
|
||||
|
|
|
@ -111,7 +111,6 @@ type RootCoord interface {
|
|||
ShowSegments(ctx context.Context, req *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error)
|
||||
ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error)
|
||||
SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error)
|
||||
AddNewSegment(ctx context.Context, in *datapb.SegmentMsg) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
// RootCoordComponent is used by grpc server of RootCoord
|
||||
|
|
Loading…
Reference in New Issue