mirror of https://github.com/milvus-io/milvus.git
Signed-off-by: Wei Liu <wei.liu@zilliz.com> Signed-off-by: Wei Liu <wei.liu@zilliz.com> Signed-off-by: Wei Liu <wei.liu@zilliz.com> Co-authored-by: wei liu <wei.liu@zilliz.com>pull/21294/head
parent
2e99557554
commit
642d63c57d
|
@ -144,5 +144,4 @@ type QueryCoordCatalog interface {
|
|||
ReleasePartition(collection int64, partitions ...int64) error
|
||||
ReleaseReplicas(collectionID int64) error
|
||||
ReleaseReplica(collection, replica int64) error
|
||||
RemoveHandoffEvent(segmentInfo *querypb.SegmentInfo) error
|
||||
}
|
||||
|
|
|
@ -220,6 +220,7 @@ message SegmentLoadInfo {
|
|||
repeated FieldIndexInfo index_infos = 11;
|
||||
int64 segment_size = 12;
|
||||
string insert_channel = 13;
|
||||
internal.MsgPosition start_position = 14;
|
||||
}
|
||||
|
||||
message FieldIndexInfo {
|
||||
|
@ -436,6 +437,7 @@ message LeaderView {
|
|||
string channel = 2;
|
||||
map<int64, SegmentDist> segment_dist = 3;
|
||||
repeated int64 growing_segmentIDs = 4;
|
||||
map<int64, internal.MsgPosition> growing_segments = 5;
|
||||
}
|
||||
|
||||
message SegmentDist {
|
||||
|
|
|
@ -1436,22 +1436,23 @@ func (m *UnsubDmChannelRequest) GetChannelName() string {
|
|||
}
|
||||
|
||||
type SegmentLoadInfo struct {
|
||||
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
|
||||
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
|
||||
CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
DbID int64 `protobuf:"varint,4,opt,name=dbID,proto3" json:"dbID,omitempty"`
|
||||
FlushTime int64 `protobuf:"varint,5,opt,name=flush_time,json=flushTime,proto3" json:"flush_time,omitempty"`
|
||||
BinlogPaths []*datapb.FieldBinlog `protobuf:"bytes,6,rep,name=binlog_paths,json=binlogPaths,proto3" json:"binlog_paths,omitempty"`
|
||||
NumOfRows int64 `protobuf:"varint,7,opt,name=num_of_rows,json=numOfRows,proto3" json:"num_of_rows,omitempty"`
|
||||
Statslogs []*datapb.FieldBinlog `protobuf:"bytes,8,rep,name=statslogs,proto3" json:"statslogs,omitempty"`
|
||||
Deltalogs []*datapb.FieldBinlog `protobuf:"bytes,9,rep,name=deltalogs,proto3" json:"deltalogs,omitempty"`
|
||||
CompactionFrom []int64 `protobuf:"varint,10,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
|
||||
IndexInfos []*FieldIndexInfo `protobuf:"bytes,11,rep,name=index_infos,json=indexInfos,proto3" json:"index_infos,omitempty"`
|
||||
SegmentSize int64 `protobuf:"varint,12,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
|
||||
InsertChannel string `protobuf:"bytes,13,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
|
||||
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
|
||||
CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
DbID int64 `protobuf:"varint,4,opt,name=dbID,proto3" json:"dbID,omitempty"`
|
||||
FlushTime int64 `protobuf:"varint,5,opt,name=flush_time,json=flushTime,proto3" json:"flush_time,omitempty"`
|
||||
BinlogPaths []*datapb.FieldBinlog `protobuf:"bytes,6,rep,name=binlog_paths,json=binlogPaths,proto3" json:"binlog_paths,omitempty"`
|
||||
NumOfRows int64 `protobuf:"varint,7,opt,name=num_of_rows,json=numOfRows,proto3" json:"num_of_rows,omitempty"`
|
||||
Statslogs []*datapb.FieldBinlog `protobuf:"bytes,8,rep,name=statslogs,proto3" json:"statslogs,omitempty"`
|
||||
Deltalogs []*datapb.FieldBinlog `protobuf:"bytes,9,rep,name=deltalogs,proto3" json:"deltalogs,omitempty"`
|
||||
CompactionFrom []int64 `protobuf:"varint,10,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
|
||||
IndexInfos []*FieldIndexInfo `protobuf:"bytes,11,rep,name=index_infos,json=indexInfos,proto3" json:"index_infos,omitempty"`
|
||||
SegmentSize int64 `protobuf:"varint,12,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
|
||||
InsertChannel string `protobuf:"bytes,13,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
|
||||
StartPosition *internalpb.MsgPosition `protobuf:"bytes,14,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SegmentLoadInfo) Reset() { *m = SegmentLoadInfo{} }
|
||||
|
@ -1570,6 +1571,13 @@ func (m *SegmentLoadInfo) GetInsertChannel() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *SegmentLoadInfo) GetStartPosition() *internalpb.MsgPosition {
|
||||
if m != nil {
|
||||
return m.StartPosition
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FieldIndexInfo struct {
|
||||
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
|
||||
// deprecated
|
||||
|
@ -3084,13 +3092,14 @@ func (m *GetDataDistributionResponse) GetLeaderViews() []*LeaderView {
|
|||
}
|
||||
|
||||
type LeaderView struct {
|
||||
Collection int64 `protobuf:"varint,1,opt,name=collection,proto3" json:"collection,omitempty"`
|
||||
Channel string `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"`
|
||||
SegmentDist map[int64]*SegmentDist `protobuf:"bytes,3,rep,name=segment_dist,json=segmentDist,proto3" json:"segment_dist,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
GrowingSegmentIDs []int64 `protobuf:"varint,4,rep,packed,name=growing_segmentIDs,json=growingSegmentIDs,proto3" json:"growing_segmentIDs,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Collection int64 `protobuf:"varint,1,opt,name=collection,proto3" json:"collection,omitempty"`
|
||||
Channel string `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"`
|
||||
SegmentDist map[int64]*SegmentDist `protobuf:"bytes,3,rep,name=segment_dist,json=segmentDist,proto3" json:"segment_dist,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
GrowingSegmentIDs []int64 `protobuf:"varint,4,rep,packed,name=growing_segmentIDs,json=growingSegmentIDs,proto3" json:"growing_segmentIDs,omitempty"`
|
||||
GrowingSegments map[int64]*internalpb.MsgPosition `protobuf:"bytes,5,rep,name=growing_segments,json=growingSegments,proto3" json:"growing_segments,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LeaderView) Reset() { *m = LeaderView{} }
|
||||
|
@ -3146,6 +3155,13 @@ func (m *LeaderView) GetGrowingSegmentIDs() []int64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *LeaderView) GetGrowingSegments() map[int64]*internalpb.MsgPosition {
|
||||
if m != nil {
|
||||
return m.GrowingSegments
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SegmentDist struct {
|
||||
NodeID int64 `protobuf:"varint,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
|
||||
Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
|
@ -3701,6 +3717,7 @@ func init() {
|
|||
proto.RegisterType((*GetDataDistributionRequest)(nil), "milvus.proto.query.GetDataDistributionRequest")
|
||||
proto.RegisterType((*GetDataDistributionResponse)(nil), "milvus.proto.query.GetDataDistributionResponse")
|
||||
proto.RegisterType((*LeaderView)(nil), "milvus.proto.query.LeaderView")
|
||||
proto.RegisterMapType((map[int64]*internalpb.MsgPosition)(nil), "milvus.proto.query.LeaderView.GrowingSegmentsEntry")
|
||||
proto.RegisterMapType((map[int64]*SegmentDist)(nil), "milvus.proto.query.LeaderView.SegmentDistEntry")
|
||||
proto.RegisterType((*SegmentDist)(nil), "milvus.proto.query.SegmentDist")
|
||||
proto.RegisterType((*SegmentVersionInfo)(nil), "milvus.proto.query.SegmentVersionInfo")
|
||||
|
@ -3717,237 +3734,241 @@ func init() {
|
|||
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
|
||||
|
||||
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
|
||||
// 3674 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x6c, 0x1c, 0xd9,
|
||||
0x56, 0xae, 0xfe, 0xd8, 0xdd, 0xa7, 0x3f, 0x2e, 0x5f, 0x27, 0x4e, 0xbf, 0x7e, 0xf9, 0x78, 0x2a,
|
||||
0x9f, 0x31, 0xce, 0x1b, 0x3b, 0xe3, 0xcc, 0x0c, 0x19, 0x66, 0x46, 0x43, 0x62, 0x4f, 0x1c, 0x93,
|
||||
0xc4, 0x63, 0xaa, 0x93, 0x80, 0xa2, 0x81, 0x9e, 0xea, 0xae, 0xdb, 0xed, 0x52, 0xaa, 0xab, 0x3a,
|
||||
0x55, 0xd5, 0x4e, 0x3c, 0x6c, 0xd9, 0x30, 0x02, 0x24, 0x58, 0xb0, 0x42, 0xac, 0x40, 0x02, 0xc4,
|
||||
0x20, 0x16, 0xb0, 0x43, 0x08, 0x09, 0x09, 0x76, 0x88, 0x1d, 0x4b, 0xb6, 0x48, 0x20, 0xb1, 0x9a,
|
||||
0x05, 0x0b, 0x24, 0x74, 0x7f, 0xf5, 0xbd, 0x65, 0x77, 0xec, 0x64, 0x3e, 0x88, 0x5d, 0xd7, 0xb9,
|
||||
0x9f, 0x73, 0xee, 0xf9, 0x9f, 0x73, 0x6f, 0xc3, 0xc2, 0xf3, 0x09, 0xf6, 0x0e, 0xbb, 0x7d, 0xd7,
|
||||
0xf5, 0xcc, 0xb5, 0xb1, 0xe7, 0x06, 0x2e, 0x42, 0x23, 0xcb, 0x3e, 0x98, 0xf8, 0xec, 0x6b, 0x8d,
|
||||
0x8e, 0xb7, 0xeb, 0x7d, 0x77, 0x34, 0x72, 0x1d, 0x06, 0x6b, 0xd7, 0xe3, 0x33, 0xda, 0x4d, 0xcb,
|
||||
0x09, 0xb0, 0xe7, 0x18, 0xb6, 0x18, 0xf5, 0xfb, 0xfb, 0x78, 0x64, 0xf0, 0x2f, 0xd5, 0x34, 0x02,
|
||||
0x23, 0xbe, 0xbf, 0xf6, 0x9b, 0x0a, 0x2c, 0x75, 0xf6, 0xdd, 0x17, 0x9b, 0xae, 0x6d, 0xe3, 0x7e,
|
||||
0x60, 0xb9, 0x8e, 0xaf, 0xe3, 0xe7, 0x13, 0xec, 0x07, 0xe8, 0x06, 0x94, 0x7a, 0x86, 0x8f, 0x5b,
|
||||
0xca, 0xb2, 0xb2, 0x52, 0xdb, 0x38, 0xbf, 0x96, 0xa0, 0x84, 0x93, 0xf0, 0xd0, 0x1f, 0xde, 0x31,
|
||||
0x7c, 0xac, 0xd3, 0x99, 0x08, 0x41, 0xc9, 0xec, 0xed, 0x6c, 0xb5, 0x0a, 0xcb, 0xca, 0x4a, 0x51,
|
||||
0xa7, 0xbf, 0xd1, 0x15, 0x68, 0xf4, 0xc3, 0xbd, 0x77, 0xb6, 0xfc, 0x56, 0x71, 0xb9, 0xb8, 0x52,
|
||||
0xd4, 0x93, 0x40, 0xed, 0xdf, 0x14, 0x38, 0x97, 0x21, 0xc3, 0x1f, 0xbb, 0x8e, 0x8f, 0xd1, 0x4d,
|
||||
0x98, 0xf5, 0x03, 0x23, 0x98, 0xf8, 0x9c, 0x92, 0x9f, 0x4a, 0x29, 0xe9, 0xd0, 0x29, 0x3a, 0x9f,
|
||||
0x9a, 0x45, 0x5b, 0x90, 0xa0, 0x45, 0xef, 0xc2, 0x19, 0xcb, 0x79, 0x88, 0x47, 0xae, 0x77, 0xd8,
|
||||
0x1d, 0x63, 0xaf, 0x8f, 0x9d, 0xc0, 0x18, 0x62, 0x41, 0xe3, 0xa2, 0x18, 0xdb, 0x8b, 0x86, 0xd0,
|
||||
0x07, 0x70, 0x8e, 0x49, 0xc9, 0xc7, 0xde, 0x81, 0xd5, 0xc7, 0x5d, 0xe3, 0xc0, 0xb0, 0x6c, 0xa3,
|
||||
0x67, 0xe3, 0x56, 0x69, 0xb9, 0xb8, 0x52, 0xd1, 0xcf, 0xd2, 0xe1, 0x0e, 0x1b, 0xbd, 0x2d, 0x06,
|
||||
0xb5, 0x3f, 0x51, 0xe0, 0x2c, 0x39, 0xe1, 0x9e, 0xe1, 0x05, 0xd6, 0x1b, 0xe0, 0xb3, 0x06, 0xf5,
|
||||
0xf8, 0xd9, 0x5a, 0x45, 0x3a, 0x96, 0x80, 0x91, 0x39, 0x63, 0x81, 0x9e, 0xf0, 0xa4, 0x44, 0x8f,
|
||||
0x99, 0x80, 0x69, 0x7f, 0xcc, 0x15, 0x22, 0x4e, 0xe7, 0x69, 0x04, 0x91, 0xc6, 0x59, 0xc8, 0xe2,
|
||||
0x3c, 0x81, 0x18, 0xb4, 0xaf, 0x8b, 0x70, 0xf6, 0x81, 0x6b, 0x98, 0x91, 0xc2, 0x7c, 0xf7, 0xec,
|
||||
0xfc, 0x04, 0x66, 0x99, 0x75, 0xb5, 0x4a, 0x14, 0xd7, 0xd5, 0x24, 0x2e, 0x6e, 0x79, 0x11, 0x85,
|
||||
0x1d, 0x0a, 0xd0, 0xf9, 0x22, 0x74, 0x15, 0x9a, 0x1e, 0x1e, 0xdb, 0x56, 0xdf, 0xe8, 0x3a, 0x93,
|
||||
0x51, 0x0f, 0x7b, 0xad, 0xf2, 0xb2, 0xb2, 0x52, 0xd6, 0x1b, 0x1c, 0xba, 0x4b, 0x81, 0xe8, 0x4b,
|
||||
0x68, 0x0c, 0x2c, 0x6c, 0x9b, 0x5d, 0xcb, 0x31, 0xf1, 0xcb, 0x9d, 0xad, 0xd6, 0xec, 0x72, 0x71,
|
||||
0xa5, 0xb6, 0xf1, 0xd1, 0x5a, 0xd6, 0x33, 0xac, 0x49, 0x39, 0xb2, 0x76, 0x97, 0x2c, 0xdf, 0x61,
|
||||
0xab, 0x3f, 0x73, 0x02, 0xef, 0x50, 0xaf, 0x0f, 0x62, 0xa0, 0xf6, 0xa7, 0xb0, 0x90, 0x99, 0x82,
|
||||
0x54, 0x28, 0x3e, 0xc3, 0x87, 0x94, 0x8b, 0x45, 0x9d, 0xfc, 0x44, 0x67, 0xa0, 0x7c, 0x60, 0xd8,
|
||||
0x13, 0xcc, 0xf9, 0xc4, 0x3e, 0x7e, 0xa1, 0x70, 0x4b, 0xd1, 0xfe, 0x50, 0x81, 0x96, 0x8e, 0x6d,
|
||||
0x6c, 0xf8, 0xf8, 0xfb, 0x94, 0xc7, 0x12, 0xcc, 0x3a, 0xae, 0x89, 0x77, 0xb6, 0xa8, 0x3c, 0x8a,
|
||||
0x3a, 0xff, 0xd2, 0xfe, 0x5b, 0x81, 0x33, 0xdb, 0x38, 0x20, 0x8a, 0x69, 0xf9, 0x81, 0xd5, 0x0f,
|
||||
0x2d, 0xef, 0x13, 0x28, 0x7a, 0xf8, 0x39, 0xa7, 0xec, 0x7a, 0x92, 0xb2, 0xd0, 0x8f, 0xca, 0x56,
|
||||
0xea, 0x64, 0x1d, 0x7a, 0x0b, 0xea, 0xe6, 0xc8, 0xee, 0xf6, 0xf7, 0x0d, 0xc7, 0xc1, 0x36, 0x53,
|
||||
0xed, 0xaa, 0x5e, 0x33, 0x47, 0xf6, 0x26, 0x07, 0xa1, 0x8b, 0x00, 0x3e, 0x1e, 0x8e, 0xb0, 0x13,
|
||||
0x44, 0xae, 0x2f, 0x06, 0x41, 0xab, 0xb0, 0x30, 0xf0, 0xdc, 0x51, 0xd7, 0xdf, 0x37, 0x3c, 0xb3,
|
||||
0x6b, 0x63, 0xc3, 0xc4, 0x1e, 0xa5, 0xbe, 0xa2, 0xcf, 0x93, 0x81, 0x0e, 0x81, 0x3f, 0xa0, 0x60,
|
||||
0x74, 0x13, 0xca, 0x7e, 0xdf, 0x1d, 0x63, 0xaa, 0x26, 0xcd, 0x8d, 0x0b, 0x32, 0x05, 0xd8, 0x32,
|
||||
0x02, 0xa3, 0x43, 0x26, 0xe9, 0x6c, 0xae, 0xf6, 0x97, 0xdc, 0x4e, 0x7e, 0xe0, 0x6e, 0x27, 0x66,
|
||||
0x4b, 0xe5, 0xd7, 0x63, 0x4b, 0xb3, 0x53, 0xd9, 0xd2, 0xdc, 0xd1, 0xb6, 0x94, 0xe1, 0xda, 0x9b,
|
||||
0xb7, 0xa5, 0xbf, 0x8f, 0x6c, 0xe9, 0x87, 0x2e, 0xb3, 0xc8, 0xde, 0xca, 0x09, 0x7b, 0xfb, 0x33,
|
||||
0x05, 0x7e, 0xb2, 0x8d, 0x83, 0x90, 0x7c, 0x62, 0x3e, 0xf8, 0x07, 0x1a, 0xee, 0xbe, 0x51, 0xa0,
|
||||
0x2d, 0xa3, 0xf5, 0x34, 0x21, 0xef, 0x29, 0x2c, 0x85, 0x38, 0xba, 0x26, 0xf6, 0xfb, 0x9e, 0x35,
|
||||
0xa6, 0x62, 0xa4, 0x1e, 0xa2, 0xb6, 0x71, 0x59, 0xa6, 0x6e, 0x69, 0x0a, 0xce, 0x86, 0x5b, 0x6c,
|
||||
0xc5, 0x76, 0xd0, 0x7e, 0x47, 0x81, 0xb3, 0xc4, 0x23, 0x71, 0x17, 0xe2, 0x0c, 0xdc, 0x93, 0xf3,
|
||||
0x35, 0xe9, 0x9c, 0x0a, 0x19, 0xe7, 0x34, 0x05, 0x8f, 0x69, 0xfe, 0x98, 0xa6, 0xe7, 0x34, 0xbc,
|
||||
0x7b, 0x1f, 0xca, 0x96, 0x33, 0x70, 0x05, 0xab, 0x2e, 0xc9, 0x58, 0x15, 0x47, 0xc6, 0x66, 0x6b,
|
||||
0x0e, 0xa3, 0x22, 0xf2, 0x96, 0xa7, 0x50, 0xb7, 0xf4, 0xb1, 0x0b, 0x92, 0x63, 0xff, 0xb6, 0x02,
|
||||
0xe7, 0x32, 0x08, 0x4f, 0x73, 0xee, 0x8f, 0x61, 0x96, 0xc6, 0x00, 0x71, 0xf0, 0x2b, 0xd2, 0x83,
|
||||
0xc7, 0xd0, 0x3d, 0xb0, 0xfc, 0x40, 0xe7, 0x6b, 0x34, 0x17, 0xd4, 0xf4, 0x18, 0x89, 0x4e, 0x3c,
|
||||
0x32, 0x75, 0x1d, 0x63, 0xc4, 0x18, 0x50, 0xd5, 0x6b, 0x1c, 0xb6, 0x6b, 0x8c, 0x30, 0xfa, 0x09,
|
||||
0x54, 0x88, 0xc9, 0x76, 0x2d, 0x53, 0x88, 0x7f, 0x8e, 0x9a, 0xb0, 0xe9, 0xa3, 0x0b, 0x00, 0x74,
|
||||
0xc8, 0x30, 0x4d, 0x8f, 0x05, 0xae, 0xaa, 0x5e, 0x25, 0x90, 0xdb, 0x04, 0xa0, 0xfd, 0x9e, 0x02,
|
||||
0x75, 0xe2, 0x20, 0x1f, 0xe2, 0xc0, 0x20, 0x72, 0x40, 0x1f, 0x42, 0xd5, 0x76, 0x0d, 0xb3, 0x1b,
|
||||
0x1c, 0x8e, 0x19, 0xaa, 0x66, 0x9a, 0xd7, 0x91, 0x57, 0x7d, 0x74, 0x38, 0xc6, 0x7a, 0xc5, 0xe6,
|
||||
0xbf, 0xa6, 0xe1, 0x77, 0xc6, 0x94, 0x8b, 0x12, 0x53, 0xfe, 0xc7, 0x32, 0x2c, 0xfd, 0x8a, 0x11,
|
||||
0xf4, 0xf7, 0xb7, 0x46, 0x22, 0xfe, 0x9e, 0x5c, 0x09, 0x22, 0xdf, 0x56, 0x88, 0xfb, 0xb6, 0xd7,
|
||||
0xe6, 0x3b, 0x43, 0x3d, 0x2f, 0xcb, 0xf4, 0x9c, 0x94, 0x69, 0x6b, 0x4f, 0xb8, 0xa8, 0x62, 0x7a,
|
||||
0x1e, 0x0b, 0x93, 0xb3, 0x27, 0x09, 0x93, 0x9b, 0xd0, 0xc0, 0x2f, 0xfb, 0xf6, 0x84, 0xc8, 0x9c,
|
||||
0x62, 0x67, 0xf1, 0xef, 0xa2, 0x04, 0x7b, 0xdc, 0xc8, 0xea, 0x7c, 0xd1, 0x0e, 0xa7, 0x81, 0x89,
|
||||
0x7a, 0x84, 0x03, 0xa3, 0x55, 0xa1, 0x64, 0x2c, 0xe7, 0x89, 0x5a, 0xe8, 0x07, 0x13, 0x37, 0xf9,
|
||||
0x42, 0xe7, 0xa1, 0xca, 0x83, 0xf2, 0xce, 0x56, 0xab, 0x4a, 0xd9, 0x17, 0x01, 0x90, 0x01, 0x0d,
|
||||
0xee, 0x81, 0x38, 0x85, 0x40, 0x29, 0xfc, 0x58, 0x86, 0x40, 0x2e, 0xec, 0x38, 0xe5, 0x3e, 0x0f,
|
||||
0xd1, 0x7e, 0x0c, 0x44, 0x4a, 0x43, 0x77, 0x30, 0xb0, 0x2d, 0x07, 0xef, 0x32, 0x09, 0xd7, 0x28,
|
||||
0x11, 0x49, 0x20, 0x6a, 0xc1, 0xdc, 0x01, 0xf6, 0x7c, 0xcb, 0x75, 0x5a, 0x75, 0x3a, 0x2e, 0x3e,
|
||||
0xdb, 0x5d, 0x58, 0xc8, 0xa0, 0x90, 0x84, 0xf8, 0xf7, 0xe2, 0x21, 0xfe, 0x78, 0x1e, 0xc7, 0x52,
|
||||
0x80, 0x3f, 0x55, 0xe0, 0xec, 0x63, 0xc7, 0x9f, 0xf4, 0xc2, 0xb3, 0x7d, 0x3f, 0x7a, 0x9c, 0xf6,
|
||||
0x20, 0xa5, 0x8c, 0x07, 0xd1, 0xfe, 0xae, 0x04, 0xf3, 0xfc, 0x14, 0x44, 0xdc, 0xd4, 0x15, 0x9c,
|
||||
0x87, 0x6a, 0x18, 0x44, 0x38, 0x43, 0x22, 0x00, 0x5a, 0x86, 0x5a, 0xcc, 0x10, 0x38, 0x55, 0x71,
|
||||
0xd0, 0x54, 0xa4, 0x89, 0x94, 0xa0, 0x14, 0x4b, 0x09, 0x2e, 0x00, 0x0c, 0xec, 0x89, 0xbf, 0xdf,
|
||||
0x0d, 0xac, 0x11, 0xe6, 0x29, 0x49, 0x95, 0x42, 0x1e, 0x59, 0x23, 0x8c, 0x6e, 0x43, 0xbd, 0x67,
|
||||
0x39, 0xb6, 0x3b, 0xec, 0x8e, 0x8d, 0x60, 0xdf, 0xe7, 0x65, 0x94, 0x4c, 0x2c, 0x34, 0x81, 0xbb,
|
||||
0x43, 0xe7, 0xea, 0x35, 0xb6, 0x66, 0x8f, 0x2c, 0x41, 0x17, 0xa1, 0xe6, 0x4c, 0x46, 0x5d, 0x77,
|
||||
0xd0, 0xf5, 0xdc, 0x17, 0xc4, 0x78, 0x28, 0x0a, 0x67, 0x32, 0xfa, 0x7c, 0xa0, 0xbb, 0x2f, 0x88,
|
||||
0x13, 0xaf, 0x12, 0x77, 0xee, 0xdb, 0xee, 0xd0, 0x6f, 0x55, 0xa6, 0xda, 0x3f, 0x5a, 0x40, 0x56,
|
||||
0x9b, 0xd8, 0x0e, 0x0c, 0xba, 0xba, 0x3a, 0xdd, 0xea, 0x70, 0x01, 0xba, 0x06, 0xcd, 0xbe, 0x3b,
|
||||
0x1a, 0x1b, 0x94, 0x43, 0x77, 0x3d, 0x77, 0x44, 0x2d, 0xa7, 0xa8, 0xa7, 0xa0, 0x68, 0x13, 0x6a,
|
||||
0x34, 0xf9, 0xe5, 0xe6, 0x55, 0xa3, 0x78, 0x34, 0x99, 0x79, 0xc5, 0xf2, 0x58, 0xa2, 0xa0, 0x60,
|
||||
0x89, 0x9f, 0x3e, 0xd1, 0x0c, 0x61, 0xa5, 0xbe, 0xf5, 0x15, 0xe6, 0x16, 0x52, 0xe3, 0xb0, 0x8e,
|
||||
0xf5, 0x15, 0x26, 0x19, 0xb9, 0xe5, 0xf8, 0xd8, 0x0b, 0x44, 0x7d, 0xd4, 0x6a, 0x50, 0xf5, 0x69,
|
||||
0x30, 0x28, 0x57, 0x6c, 0xed, 0xbf, 0x0a, 0xd0, 0x4c, 0x22, 0x22, 0x96, 0xc7, 0x52, 0x6a, 0xa1,
|
||||
0x3d, 0xe2, 0x93, 0xa0, 0xc5, 0x8e, 0xd1, 0xb3, 0x31, 0xcb, 0xdf, 0xa9, 0xf2, 0x54, 0xf4, 0x1a,
|
||||
0x83, 0xd1, 0x0d, 0x88, 0x12, 0xb0, 0xe3, 0x51, 0x8d, 0x2d, 0x52, 0x94, 0x55, 0x0a, 0xa1, 0x11,
|
||||
0xaf, 0x05, 0x73, 0x22, 0xf5, 0x67, 0xaa, 0x23, 0x3e, 0xc9, 0x48, 0x6f, 0x62, 0x51, 0xac, 0x4c,
|
||||
0x75, 0xc4, 0x27, 0xda, 0x82, 0x3a, 0xdb, 0x72, 0x6c, 0x78, 0xc6, 0x48, 0x28, 0xce, 0x5b, 0x52,
|
||||
0xe3, 0xbb, 0x8f, 0x0f, 0x9f, 0x10, 0x3b, 0xde, 0x33, 0x2c, 0x4f, 0x67, 0x8c, 0xde, 0xa3, 0xab,
|
||||
0xd0, 0x0a, 0xa8, 0x6c, 0x97, 0x81, 0x65, 0x63, 0xae, 0x82, 0x73, 0x34, 0xac, 0x36, 0x29, 0xfc,
|
||||
0xae, 0x65, 0x63, 0xa6, 0x65, 0xe1, 0x11, 0x28, 0x6b, 0x2b, 0x4c, 0xc9, 0x28, 0x84, 0x32, 0xf6,
|
||||
0x32, 0x34, 0xd8, 0xb0, 0x70, 0x4f, 0xcc, 0x87, 0x32, 0x1a, 0x9f, 0x30, 0x18, 0x8d, 0xec, 0x93,
|
||||
0x11, 0x53, 0x53, 0x60, 0xc7, 0x71, 0x26, 0x23, 0xa2, 0xa4, 0xda, 0xef, 0x97, 0x60, 0x91, 0xd8,
|
||||
0x2a, 0x37, 0xdb, 0x53, 0xc4, 0xc8, 0x0b, 0x00, 0xa6, 0x1f, 0x74, 0x13, 0xfe, 0xa5, 0x6a, 0xfa,
|
||||
0x01, 0xf7, 0xa0, 0x1f, 0x8a, 0x10, 0x57, 0xcc, 0xcf, 0x7a, 0x53, 0xbe, 0x23, 0x1b, 0xe6, 0x4e,
|
||||
0xd4, 0x59, 0xb9, 0x0c, 0x0d, 0xdf, 0x9d, 0x78, 0x7d, 0xdc, 0x4d, 0xd4, 0x27, 0x75, 0x06, 0xdc,
|
||||
0x95, 0x7b, 0xc0, 0x59, 0x69, 0x87, 0x27, 0x16, 0xea, 0xe6, 0x4e, 0x17, 0xea, 0x2a, 0xe9, 0x50,
|
||||
0x77, 0x1f, 0xe6, 0xa9, 0xf9, 0x76, 0xc7, 0xae, 0xcf, 0xca, 0x3c, 0x6e, 0xf5, 0x5a, 0x4e, 0x27,
|
||||
0xe2, 0xa1, 0x3f, 0xdc, 0xe3, 0x53, 0xf5, 0x26, 0x5d, 0x2a, 0x3e, 0xfd, 0x78, 0xb8, 0x82, 0x44,
|
||||
0xb8, 0x22, 0xcc, 0x70, 0x30, 0x36, 0xbb, 0x81, 0x67, 0x38, 0xfe, 0x00, 0x7b, 0x34, 0xdc, 0x55,
|
||||
0xf4, 0x3a, 0x01, 0x3e, 0xe2, 0x30, 0xed, 0x9f, 0x0b, 0xb0, 0xc4, 0xab, 0xce, 0xd3, 0xeb, 0x45,
|
||||
0x5e, 0xcc, 0x11, 0x4e, 0xbb, 0x78, 0x44, 0x1d, 0x57, 0x9a, 0x22, 0x9f, 0x2a, 0x4b, 0xf2, 0xa9,
|
||||
0x64, 0x2d, 0x33, 0x9b, 0xa9, 0x65, 0xc2, 0xe6, 0xc9, 0xdc, 0xf4, 0xcd, 0x13, 0x52, 0xa5, 0xd3,
|
||||
0x04, 0x9b, 0xca, 0xae, 0xaa, 0xb3, 0x8f, 0xe9, 0x18, 0xfa, 0x1f, 0x0a, 0x34, 0x3a, 0xd8, 0xf0,
|
||||
0xfa, 0xfb, 0x82, 0x8f, 0x1f, 0xc4, 0x9b, 0x4d, 0x57, 0x72, 0x44, 0x9c, 0x58, 0xf2, 0xe3, 0xe9,
|
||||
0x32, 0xfd, 0xa7, 0x02, 0xf5, 0x5f, 0x26, 0x43, 0xe2, 0xb0, 0xb7, 0xe2, 0x87, 0xbd, 0x96, 0x73,
|
||||
0x58, 0x1d, 0x07, 0x9e, 0x85, 0x0f, 0xf0, 0x8f, 0xee, 0xb8, 0xff, 0xa4, 0x40, 0xbb, 0x73, 0xe8,
|
||||
0xf4, 0x75, 0x66, 0xcb, 0xa7, 0xb7, 0x98, 0xcb, 0xd0, 0x38, 0x48, 0xa4, 0x5a, 0x05, 0xaa, 0x70,
|
||||
0xf5, 0x83, 0x78, 0xb5, 0xa6, 0x83, 0x2a, 0x7a, 0x5c, 0xfc, 0xb0, 0xc2, 0xb5, 0xbe, 0x2d, 0xa3,
|
||||
0x3a, 0x45, 0x1c, 0x75, 0x4d, 0xf3, 0x5e, 0x12, 0xa8, 0xfd, 0xae, 0x02, 0x8b, 0x92, 0x89, 0xe8,
|
||||
0x1c, 0xcc, 0xf1, 0xca, 0x90, 0xc7, 0x60, 0x66, 0xc3, 0x26, 0x11, 0x4f, 0xd4, 0xdb, 0xb0, 0xcc,
|
||||
0x6c, 0xfe, 0x66, 0xa2, 0x4b, 0x50, 0x0b, 0x53, 0x78, 0x33, 0x23, 0x1f, 0xd3, 0x47, 0x6d, 0xa8,
|
||||
0x70, 0xe7, 0x24, 0x6a, 0xa3, 0xf0, 0x5b, 0xfb, 0x5b, 0x05, 0x96, 0xee, 0x19, 0x8e, 0xe9, 0x0e,
|
||||
0x06, 0xa7, 0x67, 0xeb, 0x26, 0x24, 0x32, 0xff, 0x69, 0x7b, 0x0a, 0xc9, 0x72, 0xe1, 0x3a, 0x2c,
|
||||
0x78, 0xcc, 0x33, 0x9a, 0x49, 0xbe, 0x17, 0x75, 0x55, 0x0c, 0x84, 0xfc, 0xfc, 0x8b, 0x02, 0x20,
|
||||
0x12, 0x0c, 0xee, 0x18, 0xb6, 0xe1, 0xf4, 0xf1, 0xc9, 0x49, 0xbf, 0x0a, 0xcd, 0x44, 0x08, 0x0b,
|
||||
0x2f, 0xb0, 0xe2, 0x31, 0xcc, 0x47, 0xf7, 0xa1, 0xd9, 0x63, 0xa8, 0xba, 0x1e, 0x36, 0x7c, 0xd7,
|
||||
0xa1, 0xce, 0xb5, 0x29, 0x6f, 0x1f, 0x3c, 0xf2, 0xac, 0xe1, 0x10, 0x7b, 0x9b, 0xae, 0x63, 0xb2,
|
||||
0x20, 0xd2, 0xe8, 0x09, 0x32, 0xc9, 0x52, 0x22, 0xb8, 0x28, 0x9e, 0x0b, 0xd1, 0x40, 0x18, 0xd0,
|
||||
0x29, 0x2b, 0x7c, 0x6c, 0xd8, 0x11, 0x23, 0x22, 0x6f, 0xac, 0xb2, 0x81, 0x4e, 0x7e, 0xf7, 0x48,
|
||||
0x12, 0x5f, 0xb5, 0xbf, 0x56, 0x00, 0x85, 0x45, 0x0e, 0x2d, 0xe7, 0xa8, 0xf6, 0xa5, 0x97, 0x2a,
|
||||
0x92, 0xa0, 0x70, 0x1e, 0xaa, 0xa6, 0x58, 0xc9, 0xcd, 0x25, 0x02, 0x50, 0x1f, 0x4d, 0x89, 0xee,
|
||||
0x92, 0x60, 0x8c, 0x4d, 0x51, 0x44, 0x30, 0xe0, 0x03, 0x0a, 0x4b, 0x86, 0xe7, 0x52, 0x3a, 0x3c,
|
||||
0xc7, 0x9b, 0x23, 0xe5, 0x44, 0x73, 0x44, 0xfb, 0xa6, 0x00, 0x2a, 0x75, 0x77, 0x9b, 0x51, 0x85,
|
||||
0x3e, 0x15, 0xd1, 0x97, 0xa1, 0xc1, 0xaf, 0x78, 0x13, 0x84, 0xd7, 0x9f, 0xc7, 0x36, 0x43, 0x37,
|
||||
0xe0, 0x0c, 0x9b, 0xe4, 0x61, 0x7f, 0x62, 0x47, 0xf9, 0x33, 0x4b, 0x66, 0xd1, 0x73, 0xe6, 0x67,
|
||||
0xc9, 0x90, 0x58, 0xf1, 0x18, 0x96, 0x86, 0xb6, 0xdb, 0x33, 0xec, 0x6e, 0x52, 0x3c, 0x4c, 0x86,
|
||||
0x53, 0x68, 0xfc, 0x19, 0xb6, 0xbc, 0x13, 0x97, 0xa1, 0x8f, 0xb6, 0x49, 0x2d, 0x8e, 0x9f, 0x85,
|
||||
0xf9, 0x09, 0x6f, 0xcd, 0x4f, 0x93, 0x9e, 0xd4, 0xc9, 0x42, 0xf1, 0xa5, 0xfd, 0x91, 0x02, 0xf3,
|
||||
0xa9, 0xfe, 0x66, 0xba, 0x0e, 0x54, 0xb2, 0x75, 0xe0, 0x2d, 0x28, 0x93, 0xe2, 0x88, 0x39, 0xc3,
|
||||
0xa6, 0xbc, 0x46, 0x49, 0xee, 0xaa, 0xb3, 0x05, 0x68, 0x1d, 0x16, 0x25, 0xf7, 0x89, 0x5c, 0x07,
|
||||
0x50, 0xf6, 0x3a, 0x51, 0xfb, 0xb6, 0x04, 0xb5, 0x18, 0x3f, 0x8e, 0x29, 0x61, 0xa7, 0x69, 0x58,
|
||||
0xa5, 0x8e, 0x57, 0xcc, 0x1e, 0x2f, 0xe7, 0xb6, 0x8a, 0xe8, 0xdd, 0x08, 0x8f, 0x58, 0xf2, 0xcf,
|
||||
0x2b, 0x91, 0x11, 0x1e, 0xd1, 0xd4, 0x3f, 0x9e, 0xd5, 0xcf, 0x26, 0xb2, 0xfa, 0x54, 0xdd, 0x33,
|
||||
0x77, 0x44, 0xdd, 0x53, 0x49, 0xd6, 0x3d, 0x09, 0x3b, 0xaa, 0xa6, 0xed, 0x68, 0xda, 0xaa, 0xf2,
|
||||
0x06, 0x2c, 0xf6, 0x3d, 0x6c, 0x04, 0xd8, 0xbc, 0x73, 0xb8, 0x19, 0x0e, 0xf1, 0xcc, 0x48, 0x36,
|
||||
0x84, 0xee, 0x46, 0x8d, 0x1e, 0x26, 0xe5, 0x3a, 0x95, 0xb2, 0xbc, 0xac, 0xe2, 0xb2, 0x61, 0x42,
|
||||
0x16, 0xee, 0x99, 0x7e, 0xa5, 0xeb, 0xd9, 0xc6, 0x89, 0xea, 0xd9, 0x4b, 0x50, 0x13, 0xa1, 0x95,
|
||||
0x98, 0x7b, 0x93, 0x79, 0x3e, 0xe1, 0x0b, 0x4c, 0x3f, 0xe1, 0x0c, 0xe6, 0x93, 0x9d, 0xd2, 0x74,
|
||||
0x51, 0xaa, 0x66, 0x8b, 0xd2, 0x73, 0x30, 0x67, 0xf9, 0xdd, 0x81, 0xf1, 0x0c, 0xb7, 0x16, 0xe8,
|
||||
0xe8, 0xac, 0xe5, 0xdf, 0x35, 0x9e, 0x61, 0xed, 0x5f, 0x8a, 0xd0, 0x8c, 0xaa, 0x98, 0xa9, 0xdd,
|
||||
0xc8, 0x34, 0x77, 0xea, 0xbb, 0xa0, 0x46, 0x81, 0x9a, 0x72, 0xf8, 0xc8, 0x42, 0x2c, 0x7d, 0xfd,
|
||||
0x30, 0x3f, 0x4e, 0xd9, 0x6b, 0xa2, 0xc1, 0x5b, 0x7a, 0xa5, 0x06, 0xef, 0x29, 0xef, 0xf6, 0x6e,
|
||||
0xc2, 0xd9, 0x30, 0x00, 0x27, 0x8e, 0xcd, 0xb2, 0xfc, 0x33, 0x62, 0x70, 0x2f, 0x7e, 0xfc, 0x1c,
|
||||
0x17, 0x30, 0x97, 0xe7, 0x02, 0xd2, 0x2a, 0x50, 0xc9, 0xa8, 0x40, 0xf6, 0x8a, 0xb1, 0x2a, 0xb9,
|
||||
0x62, 0xd4, 0x1e, 0xc3, 0x22, 0xed, 0xdd, 0xf9, 0x7d, 0xcf, 0xea, 0xe1, 0x30, 0x67, 0x9d, 0x46,
|
||||
0xac, 0x6d, 0xa8, 0xa4, 0xd2, 0xde, 0xf0, 0x5b, 0xfb, 0x5a, 0x81, 0xa5, 0xec, 0xbe, 0x54, 0x63,
|
||||
0x22, 0x47, 0xa2, 0x24, 0x1c, 0xc9, 0xaf, 0xc2, 0x62, 0xb4, 0x7d, 0x32, 0xa1, 0xce, 0x49, 0x19,
|
||||
0x25, 0x84, 0xeb, 0x28, 0xda, 0x43, 0xc0, 0xb4, 0x6f, 0x95, 0xb0, 0x05, 0x4a, 0x60, 0x43, 0xda,
|
||||
0x18, 0x26, 0xc1, 0xcd, 0x75, 0x6c, 0xcb, 0x09, 0xab, 0x6e, 0x7e, 0x46, 0x06, 0xe4, 0x55, 0xf7,
|
||||
0x3d, 0x98, 0xe7, 0x93, 0xc2, 0x18, 0x35, 0x65, 0x56, 0xd6, 0x64, 0xeb, 0xc2, 0xe8, 0x74, 0x15,
|
||||
0x9a, 0xbc, 0x63, 0x2b, 0xf0, 0x15, 0x65, 0x7d, 0xdc, 0x5f, 0x02, 0x55, 0x4c, 0x7b, 0xd5, 0xa8,
|
||||
0x38, 0xcf, 0x17, 0x86, 0xd9, 0xdd, 0x6f, 0x29, 0xd0, 0x4a, 0xc6, 0xc8, 0xd8, 0xf1, 0x5f, 0x3d,
|
||||
0xc7, 0xfb, 0x28, 0x79, 0xd7, 0x75, 0xf5, 0x08, 0x7a, 0x22, 0x3c, 0xe2, 0xc6, 0x6b, 0x97, 0xde,
|
||||
0x5b, 0x92, 0xd2, 0x64, 0xcb, 0xf2, 0x03, 0xcf, 0xea, 0x4d, 0x4e, 0xf5, 0xe8, 0x42, 0xfb, 0x9b,
|
||||
0x02, 0xfc, 0x54, 0xba, 0xe1, 0x69, 0x6e, 0xb5, 0xf2, 0x3a, 0x01, 0x77, 0xa0, 0x92, 0x2a, 0x61,
|
||||
0xae, 0x1d, 0x71, 0x78, 0xde, 0xd4, 0x62, 0xcd, 0x15, 0xb1, 0x8e, 0xec, 0x11, 0xea, 0x74, 0x29,
|
||||
0x7f, 0x0f, 0xae, 0xb4, 0x89, 0x3d, 0xc4, 0x3a, 0x74, 0x1b, 0xea, 0xac, 0x3c, 0xec, 0x1e, 0x58,
|
||||
0xf8, 0x85, 0xb8, 0x8c, 0xb9, 0x28, 0xf5, 0x6b, 0x74, 0xde, 0x13, 0x0b, 0xbf, 0xd0, 0x6b, 0x76,
|
||||
0xf8, 0xdb, 0xd7, 0xfe, 0xbc, 0x00, 0x10, 0x8d, 0x91, 0xda, 0x34, 0x32, 0x18, 0x6e, 0x01, 0x31,
|
||||
0x08, 0x09, 0xc4, 0xc9, 0xdc, 0x4f, 0x7c, 0x22, 0x3d, 0xea, 0xa9, 0x9a, 0x96, 0x1f, 0x70, 0xbe,
|
||||
0xac, 0x1f, 0x4d, 0x8b, 0x60, 0x11, 0x11, 0x19, 0xbb, 0xeb, 0x10, 0xb5, 0x17, 0x81, 0xa0, 0x77,
|
||||
0x00, 0x0d, 0x3d, 0xf7, 0x85, 0xe5, 0x0c, 0xe3, 0x19, 0x3b, 0x4b, 0xec, 0x17, 0xf8, 0x48, 0x94,
|
||||
0xb2, 0xb7, 0xbb, 0xa0, 0xa6, 0xf7, 0x93, 0x5c, 0x6c, 0xbc, 0x9f, 0xbc, 0xd8, 0x38, 0xca, 0x8c,
|
||||
0xc8, 0x36, 0xf1, 0x9b, 0x8d, 0x4f, 0xc3, 0x34, 0x8b, 0x92, 0x97, 0xe7, 0xb9, 0x62, 0xcd, 0xac,
|
||||
0x42, 0xa2, 0x99, 0xa5, 0xfd, 0x81, 0x02, 0x28, 0xab, 0x15, 0xa8, 0x09, 0x85, 0x70, 0x93, 0xc2,
|
||||
0xce, 0x56, 0x4a, 0x0a, 0x85, 0x8c, 0x14, 0xce, 0x43, 0x35, 0x8c, 0x24, 0xdc, 0x6d, 0x44, 0x80,
|
||||
0xb8, 0x8c, 0x4a, 0x49, 0x19, 0xc5, 0x08, 0x2b, 0x27, 0x09, 0xdb, 0x07, 0x94, 0xd5, 0xb4, 0xf8,
|
||||
0x4e, 0x4a, 0x72, 0xa7, 0xe3, 0x28, 0x8c, 0x61, 0x2a, 0x26, 0x31, 0xfd, 0x7b, 0x01, 0x50, 0x14,
|
||||
0x2b, 0xc3, 0x5b, 0x97, 0x69, 0x02, 0xcc, 0x3a, 0x2c, 0x66, 0x23, 0xa9, 0x48, 0x1f, 0x50, 0x26,
|
||||
0x8e, 0xca, 0x62, 0x5e, 0x51, 0xf6, 0xac, 0xe6, 0x83, 0xd0, 0x37, 0xb0, 0xc4, 0xe0, 0x62, 0x5e,
|
||||
0x62, 0x90, 0x72, 0x0f, 0xbf, 0x96, 0x7e, 0x8e, 0xc3, 0xec, 0xef, 0x96, 0xd4, 0x8e, 0x33, 0x47,
|
||||
0x7e, 0xf3, 0x6f, 0x71, 0xfe, 0xb5, 0x00, 0x0b, 0x21, 0x37, 0x5e, 0x89, 0xd3, 0xc7, 0xdf, 0x72,
|
||||
0xbd, 0x61, 0xd6, 0x7e, 0x21, 0x67, 0xed, 0xcf, 0x1f, 0x99, 0xfb, 0x7d, 0x77, 0x9c, 0xed, 0xc0,
|
||||
0x1c, 0x6f, 0x3b, 0x65, 0x6c, 0x77, 0x9a, 0xea, 0xea, 0x0c, 0x94, 0x89, 0xab, 0x10, 0x7d, 0x18,
|
||||
0xf6, 0xa1, 0xfd, 0x95, 0x02, 0xd0, 0x39, 0x74, 0xfa, 0xb7, 0x99, 0x09, 0xdd, 0x80, 0xd2, 0x71,
|
||||
0xaf, 0x11, 0xc8, 0x6c, 0x9a, 0xac, 0xd2, 0x99, 0x53, 0x48, 0x2d, 0x51, 0x18, 0x16, 0xd3, 0x85,
|
||||
0x61, 0x5e, 0x49, 0x97, 0xef, 0x36, 0xfe, 0x41, 0x81, 0x73, 0x84, 0x88, 0xd7, 0x12, 0xc3, 0xa7,
|
||||
0x62, 0x5d, 0xcc, 0x25, 0x15, 0x93, 0x2e, 0xe9, 0x16, 0xcc, 0xb1, 0xda, 0x4c, 0xc4, 0xd3, 0x8b,
|
||||
0x79, 0x2c, 0x63, 0x0c, 0xd6, 0xc5, 0xf4, 0xd5, 0x5f, 0x84, 0x6a, 0xd8, 0x23, 0x45, 0x35, 0x98,
|
||||
0x7b, 0xec, 0xdc, 0x77, 0xdc, 0x17, 0x8e, 0x3a, 0x83, 0xe6, 0xa0, 0x78, 0xdb, 0xb6, 0x55, 0x05,
|
||||
0x35, 0xa0, 0xda, 0x09, 0x3c, 0x6c, 0x8c, 0x2c, 0x67, 0xa8, 0x16, 0x50, 0x13, 0xe0, 0x9e, 0xe5,
|
||||
0x07, 0xae, 0x67, 0xf5, 0x0d, 0x5b, 0x2d, 0xae, 0x7e, 0x05, 0xcd, 0x64, 0x05, 0x82, 0xea, 0x50,
|
||||
0xd9, 0x75, 0x83, 0xcf, 0x5e, 0x5a, 0x7e, 0xa0, 0xce, 0x90, 0xf9, 0xbb, 0x6e, 0xb0, 0xe7, 0x61,
|
||||
0x1f, 0x3b, 0x81, 0xaa, 0x20, 0x80, 0xd9, 0xcf, 0x9d, 0x2d, 0xcb, 0x7f, 0xa6, 0x16, 0xd0, 0x22,
|
||||
0x6f, 0x2e, 0x18, 0xf6, 0x0e, 0x4f, 0xeb, 0xd5, 0x22, 0x59, 0x1e, 0x7e, 0x95, 0x90, 0x0a, 0xf5,
|
||||
0x70, 0xca, 0xf6, 0xde, 0x63, 0xb5, 0x8c, 0xaa, 0x50, 0x66, 0x3f, 0x67, 0x57, 0x4d, 0x50, 0xd3,
|
||||
0x9d, 0x31, 0xb2, 0x27, 0x3b, 0x44, 0x08, 0x52, 0x67, 0xc8, 0xc9, 0x78, 0x6b, 0x52, 0x55, 0xd0,
|
||||
0x3c, 0xd4, 0x62, 0x8d, 0x3e, 0xb5, 0x40, 0x00, 0xdb, 0xde, 0xb8, 0xcf, 0xa5, 0xc7, 0x48, 0x20,
|
||||
0x39, 0xe8, 0x16, 0xe1, 0x44, 0x69, 0xf5, 0x0e, 0x54, 0x44, 0x69, 0x44, 0xa6, 0x72, 0x16, 0x91,
|
||||
0x4f, 0x75, 0x06, 0x2d, 0x40, 0x23, 0xf1, 0xdc, 0x50, 0x55, 0x10, 0x82, 0x66, 0xf2, 0x35, 0xaf,
|
||||
0x5a, 0x58, 0xdd, 0x00, 0x88, 0x4c, 0x9d, 0x90, 0xb3, 0xe3, 0x1c, 0x18, 0xb6, 0x65, 0x32, 0xda,
|
||||
0xc8, 0x10, 0xe1, 0x2e, 0xe5, 0x0e, 0x6b, 0x71, 0xa9, 0x85, 0xd5, 0x4b, 0x50, 0x11, 0x5a, 0x4e,
|
||||
0xe0, 0x3a, 0x1e, 0xb9, 0x07, 0x98, 0x49, 0xa6, 0x83, 0x03, 0x55, 0xd9, 0xf8, 0x9f, 0x06, 0x00,
|
||||
0x6b, 0x66, 0xb9, 0xae, 0x67, 0x22, 0x1b, 0xd0, 0x36, 0x0e, 0x48, 0xa1, 0xee, 0x3a, 0xa2, 0xc8,
|
||||
0xf6, 0xd1, 0x5a, 0x52, 0x15, 0xf8, 0x47, 0x76, 0x22, 0x3f, 0x7d, 0xfb, 0x8a, 0x74, 0x7e, 0x6a,
|
||||
0xb2, 0x36, 0x83, 0x46, 0x14, 0xdb, 0x23, 0x6b, 0x84, 0x1f, 0x59, 0xfd, 0x67, 0x61, 0x07, 0x2c,
|
||||
0xff, 0x29, 0x6e, 0x6a, 0xaa, 0xc0, 0x77, 0x59, 0x8a, 0xaf, 0x13, 0x78, 0x96, 0x33, 0x14, 0x29,
|
||||
0xac, 0x36, 0x83, 0x9e, 0xa7, 0x1e, 0x02, 0x0b, 0x84, 0x1b, 0xd3, 0xbc, 0xfd, 0x3d, 0x19, 0x4a,
|
||||
0x1b, 0xe6, 0x53, 0x7f, 0x6c, 0x40, 0xab, 0xf2, 0xb7, 0x5d, 0xb2, 0x3f, 0x61, 0xb4, 0xaf, 0x4f,
|
||||
0x35, 0x37, 0xc4, 0x66, 0x41, 0x33, 0xf9, 0x78, 0x1f, 0xfd, 0x5c, 0xde, 0x06, 0x99, 0xd7, 0xa5,
|
||||
0xed, 0xd5, 0x69, 0xa6, 0x86, 0xa8, 0x9e, 0x32, 0x05, 0x3d, 0x0e, 0x95, 0xf4, 0x19, 0x6d, 0xfb,
|
||||
0xa8, 0xea, 0x41, 0x9b, 0x41, 0x5f, 0xc2, 0x42, 0xe6, 0x0d, 0x2c, 0xfa, 0x99, 0xfc, 0x96, 0x43,
|
||||
0xfe, 0x54, 0xf6, 0x38, 0x0c, 0x4f, 0xd3, 0xe6, 0x95, 0x4f, 0x7d, 0xe6, 0x49, 0xfb, 0xf4, 0xd4,
|
||||
0xc7, 0xb6, 0x3f, 0x8a, 0xfa, 0x57, 0xc6, 0x30, 0xa1, 0x66, 0x93, 0x6e, 0xa9, 0xbe, 0x23, 0x43,
|
||||
0x91, 0xfb, 0x10, 0xb7, 0xbd, 0x36, 0xed, 0xf4, 0xb8, 0x76, 0x25, 0xdf, 0x7a, 0xca, 0x99, 0x26,
|
||||
0x7d, 0x9f, 0x2a, 0xd7, 0x2e, 0xf9, 0xd3, 0x51, 0x6d, 0x06, 0x3d, 0x4a, 0xb8, 0x57, 0x74, 0x2d,
|
||||
0x4f, 0x38, 0xc9, 0x8b, 0x96, 0xe3, 0xf8, 0xf6, 0x1b, 0x80, 0x98, 0xed, 0x38, 0x03, 0x6b, 0x38,
|
||||
0xf1, 0x0c, 0xa6, 0x58, 0x79, 0xee, 0x26, 0x3b, 0x55, 0xa0, 0x79, 0xf7, 0x15, 0x56, 0x84, 0x47,
|
||||
0xea, 0x02, 0x6c, 0xe3, 0xe0, 0x21, 0x0e, 0x3c, 0xab, 0xef, 0xa7, 0x4f, 0x14, 0x79, 0x54, 0x3e,
|
||||
0x41, 0xa0, 0x7a, 0xfb, 0xd8, 0x79, 0x21, 0x82, 0x1e, 0xd4, 0xb6, 0x71, 0xc0, 0xf3, 0x2a, 0x1f,
|
||||
0xe5, 0xae, 0x14, 0x33, 0x04, 0x8a, 0x95, 0xe3, 0x27, 0xc6, 0xdd, 0x59, 0xea, 0xdd, 0x2b, 0xca,
|
||||
0x15, 0x6c, 0xf6, 0x35, 0xae, 0xdc, 0x9d, 0xe5, 0x3c, 0xa4, 0x65, 0x27, 0xda, 0xdc, 0xc7, 0xfd,
|
||||
0x67, 0xf7, 0xb0, 0x61, 0x07, 0xfb, 0x39, 0x27, 0x8a, 0xcd, 0x38, 0xfa, 0x44, 0x89, 0x89, 0x02,
|
||||
0xc7, 0xc6, 0x37, 0x4d, 0xa8, 0xd2, 0xf8, 0x47, 0x82, 0xf5, 0xff, 0x87, 0xbf, 0xd7, 0x1c, 0xfe,
|
||||
0xbe, 0x80, 0xf9, 0xd4, 0x33, 0x4d, 0xb9, 0xbe, 0xc8, 0xdf, 0x72, 0x4e, 0xe1, 0xc5, 0x93, 0x0f,
|
||||
0x25, 0xe5, 0x0e, 0x49, 0xfa, 0x98, 0xf2, 0xb8, 0xbd, 0x9f, 0xb0, 0x17, 0xce, 0x61, 0xbf, 0xf1,
|
||||
0xed, 0xdc, 0xca, 0x2b, 0x79, 0x4f, 0xfd, 0xfd, 0x47, 0x87, 0x37, 0x1f, 0x3d, 0xbf, 0x80, 0xf9,
|
||||
0xd4, 0x6b, 0x21, 0xb9, 0x54, 0xe5, 0x4f, 0x8a, 0x8e, 0xdb, 0xfd, 0x3b, 0x0c, 0x33, 0x26, 0x2c,
|
||||
0x4a, 0x1e, 0x72, 0xa0, 0xb5, 0xbc, 0xca, 0x47, 0xfe, 0xe2, 0xe3, 0xf8, 0x03, 0x35, 0x12, 0xa6,
|
||||
0x84, 0x56, 0xf2, 0x88, 0x4c, 0xff, 0xd1, 0xac, 0xfd, 0xb3, 0xe9, 0xfe, 0x95, 0x16, 0x1e, 0xa8,
|
||||
0x03, 0xb3, 0xec, 0x0d, 0x11, 0x7a, 0x4b, 0xde, 0x97, 0x8b, 0xbd, 0x2f, 0x6a, 0x1f, 0xf7, 0x0a,
|
||||
0xc9, 0x9f, 0xd8, 0x81, 0x4f, 0x37, 0x2d, 0x53, 0x0f, 0x89, 0xa4, 0x8f, 0xdf, 0xe2, 0x0f, 0x7f,
|
||||
0xda, 0xc7, 0xbf, 0xf5, 0x11, 0x9b, 0xfe, 0xdf, 0x8e, 0xc5, 0x2f, 0x61, 0x51, 0xd2, 0x4d, 0x47,
|
||||
0x79, 0x39, 0x57, 0x4e, 0x1f, 0xbf, 0xbd, 0x3e, 0xf5, 0xfc, 0x10, 0xf3, 0xaf, 0x83, 0x9a, 0xee,
|
||||
0x28, 0xa0, 0xeb, 0x79, 0xfa, 0x2c, 0xc3, 0x79, 0xb4, 0x32, 0xdf, 0x79, 0xef, 0xe9, 0xc6, 0xd0,
|
||||
0x0a, 0xf6, 0x27, 0x3d, 0x32, 0xb2, 0xce, 0xa6, 0xbe, 0x63, 0xb9, 0xfc, 0xd7, 0xba, 0xe0, 0xff,
|
||||
0x3a, 0x5d, 0xbd, 0x4e, 0x51, 0x8d, 0x7b, 0xbd, 0x59, 0xfa, 0x79, 0xf3, 0x7f, 0x03, 0x00, 0x00,
|
||||
0xff, 0xff, 0xc7, 0xa5, 0x8a, 0x37, 0xe3, 0x3e, 0x00, 0x00,
|
||||
// 3736 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x49, 0x6c, 0x24, 0x59,
|
||||
0x56, 0x15, 0xb9, 0xd8, 0x99, 0x2f, 0x17, 0x87, 0xbf, 0xab, 0x5c, 0x39, 0x39, 0xd5, 0xd5, 0xee,
|
||||
0xa8, 0xae, 0x6e, 0xe3, 0x9e, 0xb6, 0x7b, 0x5c, 0x33, 0x4d, 0x0d, 0x33, 0xa3, 0xa1, 0xca, 0x9e,
|
||||
0x72, 0x9b, 0xee, 0xf2, 0x98, 0xc8, 0xaa, 0x02, 0xb5, 0x9a, 0xc9, 0x89, 0xcc, 0xf8, 0x99, 0x0e,
|
||||
0x55, 0x2c, 0x59, 0x11, 0x91, 0xae, 0x76, 0x73, 0xe5, 0x32, 0x23, 0x40, 0x82, 0x03, 0x27, 0xc4,
|
||||
0x09, 0x24, 0x90, 0x68, 0xc4, 0x01, 0x6e, 0x1c, 0x90, 0x90, 0xe0, 0x86, 0xb8, 0x71, 0xe4, 0x8a,
|
||||
0x04, 0x12, 0x12, 0xd2, 0x1c, 0x38, 0x20, 0xa1, 0xbf, 0xc5, 0xfa, 0xc3, 0x19, 0x65, 0x77, 0xf5,
|
||||
0x82, 0xe6, 0x96, 0xf1, 0xfe, 0xf2, 0xde, 0x7f, 0xfb, 0x7b, 0xff, 0x27, 0xac, 0x3e, 0x9b, 0x63,
|
||||
0xff, 0x6c, 0x38, 0xf6, 0x3c, 0xdf, 0xdc, 0x9e, 0xf9, 0x5e, 0xe8, 0x21, 0xe4, 0x58, 0xf6, 0xe9,
|
||||
0x3c, 0x60, 0x5f, 0xdb, 0x74, 0xbc, 0xdf, 0x1e, 0x7b, 0x8e, 0xe3, 0xb9, 0x0c, 0xd6, 0x6f, 0x27,
|
||||
0x67, 0xf4, 0xbb, 0x96, 0x1b, 0x62, 0xdf, 0x35, 0x6c, 0x31, 0x1a, 0x8c, 0x4f, 0xb0, 0x63, 0xf0,
|
||||
0x2f, 0xd5, 0x34, 0x42, 0x23, 0xb9, 0xbf, 0xf6, 0x3b, 0x0a, 0xac, 0x0f, 0x4e, 0xbc, 0xe7, 0x7b,
|
||||
0x9e, 0x6d, 0xe3, 0x71, 0x68, 0x79, 0x6e, 0xa0, 0xe3, 0x67, 0x73, 0x1c, 0x84, 0xe8, 0x1d, 0xa8,
|
||||
0x8d, 0x8c, 0x00, 0xf7, 0x94, 0x0d, 0x65, 0xb3, 0xb5, 0x7b, 0x63, 0x3b, 0x45, 0x09, 0x27, 0xe1,
|
||||
0x61, 0x30, 0xbd, 0x6f, 0x04, 0x58, 0xa7, 0x33, 0x11, 0x82, 0x9a, 0x39, 0x3a, 0xdc, 0xef, 0x55,
|
||||
0x36, 0x94, 0xcd, 0xaa, 0x4e, 0x7f, 0xa3, 0xd7, 0xa1, 0x33, 0x8e, 0xf6, 0x3e, 0xdc, 0x0f, 0x7a,
|
||||
0xd5, 0x8d, 0xea, 0x66, 0x55, 0x4f, 0x03, 0xb5, 0x7f, 0x53, 0xe0, 0x7a, 0x8e, 0x8c, 0x60, 0xe6,
|
||||
0xb9, 0x01, 0x46, 0x77, 0x60, 0x29, 0x08, 0x8d, 0x70, 0x1e, 0x70, 0x4a, 0xbe, 0x2e, 0xa5, 0x64,
|
||||
0x40, 0xa7, 0xe8, 0x7c, 0x6a, 0x1e, 0x6d, 0x45, 0x82, 0x16, 0x7d, 0x13, 0xae, 0x5a, 0xee, 0x43,
|
||||
0xec, 0x78, 0xfe, 0xd9, 0x70, 0x86, 0xfd, 0x31, 0x76, 0x43, 0x63, 0x8a, 0x05, 0x8d, 0x6b, 0x62,
|
||||
0xec, 0x38, 0x1e, 0x42, 0xef, 0xc2, 0x75, 0x26, 0xa5, 0x00, 0xfb, 0xa7, 0xd6, 0x18, 0x0f, 0x8d,
|
||||
0x53, 0xc3, 0xb2, 0x8d, 0x91, 0x8d, 0x7b, 0xb5, 0x8d, 0xea, 0x66, 0x43, 0xbf, 0x46, 0x87, 0x07,
|
||||
0x6c, 0xf4, 0x9e, 0x18, 0xd4, 0xfe, 0x4c, 0x81, 0x6b, 0xe4, 0x84, 0xc7, 0x86, 0x1f, 0x5a, 0x2f,
|
||||
0x81, 0xcf, 0x1a, 0xb4, 0x93, 0x67, 0xeb, 0x55, 0xe9, 0x58, 0x0a, 0x46, 0xe6, 0xcc, 0x04, 0x7a,
|
||||
0xc2, 0x93, 0x1a, 0x3d, 0x66, 0x0a, 0xa6, 0xfd, 0x29, 0x57, 0x88, 0x24, 0x9d, 0x97, 0x11, 0x44,
|
||||
0x16, 0x67, 0x25, 0x8f, 0xf3, 0x02, 0x62, 0xd0, 0x7e, 0x56, 0x85, 0x6b, 0x1f, 0x78, 0x86, 0x19,
|
||||
0x2b, 0xcc, 0xe7, 0xcf, 0xce, 0xef, 0xc3, 0x12, 0xb3, 0xae, 0x5e, 0x8d, 0xe2, 0xba, 0x9d, 0xc6,
|
||||
0xc5, 0x2d, 0x2f, 0xa6, 0x70, 0x40, 0x01, 0x3a, 0x5f, 0x84, 0x6e, 0x43, 0xd7, 0xc7, 0x33, 0xdb,
|
||||
0x1a, 0x1b, 0x43, 0x77, 0xee, 0x8c, 0xb0, 0xdf, 0xab, 0x6f, 0x28, 0x9b, 0x75, 0xbd, 0xc3, 0xa1,
|
||||
0x47, 0x14, 0x88, 0x7e, 0x02, 0x9d, 0x89, 0x85, 0x6d, 0x73, 0x68, 0xb9, 0x26, 0xfe, 0xf8, 0x70,
|
||||
0xbf, 0xb7, 0xb4, 0x51, 0xdd, 0x6c, 0xed, 0x7e, 0x77, 0x3b, 0xef, 0x19, 0xb6, 0xa5, 0x1c, 0xd9,
|
||||
0x7e, 0x40, 0x96, 0x1f, 0xb2, 0xd5, 0x3f, 0x74, 0x43, 0xff, 0x4c, 0x6f, 0x4f, 0x12, 0xa0, 0xfe,
|
||||
0x0f, 0x60, 0x35, 0x37, 0x05, 0xa9, 0x50, 0x7d, 0x8a, 0xcf, 0x28, 0x17, 0xab, 0x3a, 0xf9, 0x89,
|
||||
0xae, 0x42, 0xfd, 0xd4, 0xb0, 0xe7, 0x98, 0xf3, 0x89, 0x7d, 0xfc, 0x4a, 0xe5, 0xae, 0xa2, 0xfd,
|
||||
0xb1, 0x02, 0x3d, 0x1d, 0xdb, 0xd8, 0x08, 0xf0, 0x17, 0x29, 0x8f, 0x75, 0x58, 0x72, 0x3d, 0x13,
|
||||
0x1f, 0xee, 0x53, 0x79, 0x54, 0x75, 0xfe, 0xa5, 0xfd, 0x8f, 0x02, 0x57, 0x0f, 0x70, 0x48, 0x14,
|
||||
0xd3, 0x0a, 0x42, 0x6b, 0x1c, 0x59, 0xde, 0xf7, 0xa1, 0xea, 0xe3, 0x67, 0x9c, 0xb2, 0xb7, 0xd2,
|
||||
0x94, 0x45, 0x7e, 0x54, 0xb6, 0x52, 0x27, 0xeb, 0xd0, 0x6b, 0xd0, 0x36, 0x1d, 0x7b, 0x38, 0x3e,
|
||||
0x31, 0x5c, 0x17, 0xdb, 0x4c, 0xb5, 0x9b, 0x7a, 0xcb, 0x74, 0xec, 0x3d, 0x0e, 0x42, 0x37, 0x01,
|
||||
0x02, 0x3c, 0x75, 0xb0, 0x1b, 0xc6, 0xae, 0x2f, 0x01, 0x41, 0x5b, 0xb0, 0x3a, 0xf1, 0x3d, 0x67,
|
||||
0x18, 0x9c, 0x18, 0xbe, 0x39, 0xb4, 0xb1, 0x61, 0x62, 0x9f, 0x52, 0xdf, 0xd0, 0x57, 0xc8, 0xc0,
|
||||
0x80, 0xc0, 0x3f, 0xa0, 0x60, 0x74, 0x07, 0xea, 0xc1, 0xd8, 0x9b, 0x61, 0xaa, 0x26, 0xdd, 0xdd,
|
||||
0x57, 0x64, 0x0a, 0xb0, 0x6f, 0x84, 0xc6, 0x80, 0x4c, 0xd2, 0xd9, 0x5c, 0xed, 0xaf, 0xb8, 0x9d,
|
||||
0x7c, 0xc9, 0xdd, 0x4e, 0xc2, 0x96, 0xea, 0x9f, 0x8d, 0x2d, 0x2d, 0x95, 0xb2, 0xa5, 0xe5, 0xf3,
|
||||
0x6d, 0x29, 0xc7, 0xb5, 0x97, 0x6f, 0x4b, 0x7f, 0x1f, 0xdb, 0xd2, 0x97, 0x5d, 0x66, 0xb1, 0xbd,
|
||||
0xd5, 0x53, 0xf6, 0xf6, 0x17, 0x0a, 0x7c, 0xed, 0x00, 0x87, 0x11, 0xf9, 0xc4, 0x7c, 0xf0, 0x97,
|
||||
0x34, 0xdc, 0x7d, 0xaa, 0x40, 0x5f, 0x46, 0xeb, 0x65, 0x42, 0xde, 0x87, 0xb0, 0x1e, 0xe1, 0x18,
|
||||
0x9a, 0x38, 0x18, 0xfb, 0xd6, 0x8c, 0x8a, 0x91, 0x7a, 0x88, 0xd6, 0xee, 0x2d, 0x99, 0xba, 0x65,
|
||||
0x29, 0xb8, 0x16, 0x6d, 0xb1, 0x9f, 0xd8, 0x41, 0xfb, 0x3d, 0x05, 0xae, 0x11, 0x8f, 0xc4, 0x5d,
|
||||
0x88, 0x3b, 0xf1, 0x2e, 0xce, 0xd7, 0xb4, 0x73, 0xaa, 0xe4, 0x9c, 0x53, 0x09, 0x1e, 0xd3, 0xfc,
|
||||
0x31, 0x4b, 0xcf, 0x65, 0x78, 0xf7, 0x6d, 0xa8, 0x5b, 0xee, 0xc4, 0x13, 0xac, 0x7a, 0x55, 0xc6,
|
||||
0xaa, 0x24, 0x32, 0x36, 0x5b, 0x73, 0x19, 0x15, 0xb1, 0xb7, 0xbc, 0x84, 0xba, 0x65, 0x8f, 0x5d,
|
||||
0x91, 0x1c, 0xfb, 0x77, 0x15, 0xb8, 0x9e, 0x43, 0x78, 0x99, 0x73, 0x7f, 0x0f, 0x96, 0x68, 0x0c,
|
||||
0x10, 0x07, 0x7f, 0x5d, 0x7a, 0xf0, 0x04, 0xba, 0x0f, 0xac, 0x20, 0xd4, 0xf9, 0x1a, 0xcd, 0x03,
|
||||
0x35, 0x3b, 0x46, 0xa2, 0x13, 0x8f, 0x4c, 0x43, 0xd7, 0x70, 0x18, 0x03, 0x9a, 0x7a, 0x8b, 0xc3,
|
||||
0x8e, 0x0c, 0x07, 0xa3, 0xaf, 0x41, 0x83, 0x98, 0xec, 0xd0, 0x32, 0x85, 0xf8, 0x97, 0xa9, 0x09,
|
||||
0x9b, 0x01, 0x7a, 0x05, 0x80, 0x0e, 0x19, 0xa6, 0xe9, 0xb3, 0xc0, 0xd5, 0xd4, 0x9b, 0x04, 0x72,
|
||||
0x8f, 0x00, 0xb4, 0x3f, 0x50, 0xa0, 0x4d, 0x1c, 0xe4, 0x43, 0x1c, 0x1a, 0x44, 0x0e, 0xe8, 0x3b,
|
||||
0xd0, 0xb4, 0x3d, 0xc3, 0x1c, 0x86, 0x67, 0x33, 0x86, 0xaa, 0x9b, 0xe5, 0x75, 0xec, 0x55, 0x1f,
|
||||
0x9d, 0xcd, 0xb0, 0xde, 0xb0, 0xf9, 0xaf, 0x32, 0xfc, 0xce, 0x99, 0x72, 0x55, 0x62, 0xca, 0xff,
|
||||
0x58, 0x87, 0xf5, 0xdf, 0x30, 0xc2, 0xf1, 0xc9, 0xbe, 0x23, 0xe2, 0xef, 0xc5, 0x95, 0x20, 0xf6,
|
||||
0x6d, 0x95, 0xa4, 0x6f, 0xfb, 0xcc, 0x7c, 0x67, 0xa4, 0xe7, 0x75, 0x99, 0x9e, 0x93, 0x32, 0x6d,
|
||||
0xfb, 0x09, 0x17, 0x55, 0x42, 0xcf, 0x13, 0x61, 0x72, 0xe9, 0x22, 0x61, 0x72, 0x0f, 0x3a, 0xf8,
|
||||
0xe3, 0xb1, 0x3d, 0x27, 0x32, 0xa7, 0xd8, 0x59, 0xfc, 0xbb, 0x29, 0xc1, 0x9e, 0x34, 0xb2, 0x36,
|
||||
0x5f, 0x74, 0xc8, 0x69, 0x60, 0xa2, 0x76, 0x70, 0x68, 0xf4, 0x1a, 0x94, 0x8c, 0x8d, 0x22, 0x51,
|
||||
0x0b, 0xfd, 0x60, 0xe2, 0x26, 0x5f, 0xe8, 0x06, 0x34, 0x79, 0x50, 0x3e, 0xdc, 0xef, 0x35, 0x29,
|
||||
0xfb, 0x62, 0x00, 0x32, 0xa0, 0xc3, 0x3d, 0x10, 0xa7, 0x10, 0x28, 0x85, 0xdf, 0x93, 0x21, 0x90,
|
||||
0x0b, 0x3b, 0x49, 0x79, 0xc0, 0x43, 0x74, 0x90, 0x00, 0x91, 0xd2, 0xd0, 0x9b, 0x4c, 0x6c, 0xcb,
|
||||
0xc5, 0x47, 0x4c, 0xc2, 0x2d, 0x4a, 0x44, 0x1a, 0x88, 0x7a, 0xb0, 0x7c, 0x8a, 0xfd, 0xc0, 0xf2,
|
||||
0xdc, 0x5e, 0x9b, 0x8e, 0x8b, 0xcf, 0xfe, 0x10, 0x56, 0x73, 0x28, 0x24, 0x21, 0xfe, 0x5b, 0xc9,
|
||||
0x10, 0xbf, 0x98, 0xc7, 0x89, 0x14, 0xe0, 0xcf, 0x15, 0xb8, 0xf6, 0xd8, 0x0d, 0xe6, 0xa3, 0xe8,
|
||||
0x6c, 0x5f, 0x8c, 0x1e, 0x67, 0x3d, 0x48, 0x2d, 0xe7, 0x41, 0xb4, 0x9f, 0xd6, 0x61, 0x85, 0x9f,
|
||||
0x82, 0x88, 0x9b, 0xba, 0x82, 0x1b, 0xd0, 0x8c, 0x82, 0x08, 0x67, 0x48, 0x0c, 0x40, 0x1b, 0xd0,
|
||||
0x4a, 0x18, 0x02, 0xa7, 0x2a, 0x09, 0x2a, 0x45, 0x9a, 0x48, 0x09, 0x6a, 0x89, 0x94, 0xe0, 0x15,
|
||||
0x80, 0x89, 0x3d, 0x0f, 0x4e, 0x86, 0xa1, 0xe5, 0x60, 0x9e, 0x92, 0x34, 0x29, 0xe4, 0x91, 0xe5,
|
||||
0x60, 0x74, 0x0f, 0xda, 0x23, 0xcb, 0xb5, 0xbd, 0xe9, 0x70, 0x66, 0x84, 0x27, 0x01, 0x2f, 0xa3,
|
||||
0x64, 0x62, 0xa1, 0x09, 0xdc, 0x7d, 0x3a, 0x57, 0x6f, 0xb1, 0x35, 0xc7, 0x64, 0x09, 0xba, 0x09,
|
||||
0x2d, 0x77, 0xee, 0x0c, 0xbd, 0xc9, 0xd0, 0xf7, 0x9e, 0x13, 0xe3, 0xa1, 0x28, 0xdc, 0xb9, 0xf3,
|
||||
0xa3, 0x89, 0xee, 0x3d, 0x27, 0x4e, 0xbc, 0x49, 0xdc, 0x79, 0x60, 0x7b, 0xd3, 0xa0, 0xd7, 0x28,
|
||||
0xb5, 0x7f, 0xbc, 0x80, 0xac, 0x36, 0xb1, 0x1d, 0x1a, 0x74, 0x75, 0xb3, 0xdc, 0xea, 0x68, 0x01,
|
||||
0x7a, 0x03, 0xba, 0x63, 0xcf, 0x99, 0x19, 0x94, 0x43, 0x0f, 0x7c, 0xcf, 0xa1, 0x96, 0x53, 0xd5,
|
||||
0x33, 0x50, 0xb4, 0x07, 0x2d, 0x9a, 0xfc, 0x72, 0xf3, 0x6a, 0x51, 0x3c, 0x9a, 0xcc, 0xbc, 0x12,
|
||||
0x79, 0x2c, 0x51, 0x50, 0xb0, 0xc4, 0xcf, 0x80, 0x68, 0x86, 0xb0, 0xd2, 0xc0, 0xfa, 0x04, 0x73,
|
||||
0x0b, 0x69, 0x71, 0xd8, 0xc0, 0xfa, 0x04, 0x93, 0x8c, 0xdc, 0x72, 0x03, 0xec, 0x87, 0xa2, 0x3e,
|
||||
0xea, 0x75, 0xa8, 0xfa, 0x74, 0x18, 0x94, 0x2b, 0x36, 0x3a, 0x84, 0x6e, 0x10, 0x1a, 0x7e, 0x38,
|
||||
0x9c, 0x79, 0x01, 0x55, 0x80, 0x5e, 0x97, 0xea, 0xb6, 0x56, 0x50, 0x8d, 0x3d, 0x0c, 0xa6, 0xc7,
|
||||
0x7c, 0xa6, 0xde, 0xa1, 0x2b, 0xc5, 0xa7, 0xf6, 0x5f, 0x15, 0xe8, 0xa6, 0x69, 0x26, 0x46, 0xcc,
|
||||
0xb2, 0x73, 0xa1, 0x88, 0xe2, 0x93, 0x9c, 0x00, 0xbb, 0xc6, 0xc8, 0xc6, 0xac, 0x14, 0xa0, 0x7a,
|
||||
0xd8, 0xd0, 0x5b, 0x0c, 0x46, 0x37, 0x20, 0xfa, 0xc4, 0x38, 0x45, 0x95, 0xbf, 0x4a, 0xa9, 0x6f,
|
||||
0x52, 0x08, 0x0d, 0x9e, 0x3d, 0x58, 0x16, 0x55, 0x04, 0xd3, 0x42, 0xf1, 0x49, 0x46, 0x46, 0x73,
|
||||
0x8b, 0x62, 0x65, 0x5a, 0x28, 0x3e, 0xd1, 0x3e, 0xb4, 0xd9, 0x96, 0x33, 0xc3, 0x37, 0x1c, 0xa1,
|
||||
0x83, 0xaf, 0x49, 0xed, 0xf8, 0x7d, 0x7c, 0xf6, 0x84, 0xb8, 0x84, 0x63, 0xc3, 0xf2, 0x75, 0x26,
|
||||
0xb3, 0x63, 0xba, 0x0a, 0x6d, 0x82, 0xca, 0x76, 0x99, 0x58, 0x36, 0xe6, 0xda, 0xbc, 0x4c, 0x23,
|
||||
0x74, 0x97, 0xc2, 0x1f, 0x58, 0x36, 0x66, 0x0a, 0x1b, 0x1d, 0x81, 0x4a, 0xa9, 0xc1, 0xf4, 0x95,
|
||||
0x42, 0xa8, 0x8c, 0x6e, 0x41, 0x87, 0x0d, 0x0b, 0x4f, 0xc7, 0xdc, 0x31, 0xa3, 0xf1, 0x09, 0x83,
|
||||
0xd1, 0x24, 0x61, 0xee, 0x30, 0x8d, 0x07, 0x76, 0x1c, 0x77, 0xee, 0x10, 0x7d, 0xd7, 0xfe, 0xb0,
|
||||
0x06, 0x6b, 0xc4, 0xec, 0xb9, 0x07, 0xb8, 0x44, 0xb8, 0x7d, 0x05, 0xc0, 0x0c, 0xc2, 0x61, 0xca,
|
||||
0x55, 0x35, 0xcd, 0x20, 0xe4, 0xce, 0xf8, 0x3b, 0x22, 0x5a, 0x56, 0x8b, 0x13, 0xe8, 0x8c, 0x1b,
|
||||
0xca, 0x47, 0xcc, 0x0b, 0x35, 0x69, 0x6e, 0x41, 0x27, 0xf0, 0xe6, 0xfe, 0x18, 0x0f, 0x53, 0xa5,
|
||||
0x4e, 0x9b, 0x01, 0x8f, 0xe4, 0xce, 0x74, 0x49, 0xda, 0x2c, 0x4a, 0x44, 0xcd, 0xe5, 0xcb, 0x45,
|
||||
0xcd, 0x46, 0x36, 0x6a, 0xbe, 0x0f, 0x2b, 0xd4, 0x13, 0x44, 0x56, 0x24, 0x1c, 0x48, 0x19, 0x33,
|
||||
0xea, 0xd2, 0xa5, 0xe2, 0x33, 0x48, 0x46, 0x3e, 0x48, 0x45, 0x3e, 0xc2, 0x0c, 0x17, 0x63, 0x73,
|
||||
0x18, 0xfa, 0x86, 0x1b, 0x4c, 0xb0, 0x4f, 0x23, 0x67, 0x43, 0x6f, 0x13, 0xe0, 0x23, 0x0e, 0xd3,
|
||||
0xfe, 0xb9, 0x02, 0xeb, 0xbc, 0x80, 0xbd, 0xbc, 0x5e, 0x14, 0x85, 0x2f, 0xe1, 0xff, 0xab, 0xe7,
|
||||
0x94, 0x84, 0xb5, 0x12, 0xa9, 0x59, 0x5d, 0x92, 0x9a, 0xa5, 0xcb, 0xa2, 0xa5, 0x5c, 0x59, 0x14,
|
||||
0xf5, 0x61, 0x96, 0xcb, 0xf7, 0x61, 0x48, 0xc1, 0x4f, 0x73, 0x75, 0x2a, 0xbb, 0xa6, 0xce, 0x3e,
|
||||
0xca, 0x31, 0xf4, 0x3f, 0x14, 0xe8, 0x0c, 0xb0, 0xe1, 0x8f, 0x4f, 0x04, 0x1f, 0xdf, 0x4d, 0xf6,
|
||||
0xad, 0x5e, 0x2f, 0x10, 0x71, 0x6a, 0xc9, 0x57, 0xa7, 0x61, 0xf5, 0x9f, 0x0a, 0xb4, 0x7f, 0x9d,
|
||||
0x0c, 0x89, 0xc3, 0xde, 0x4d, 0x1e, 0xf6, 0x8d, 0x82, 0xc3, 0xea, 0x38, 0xf4, 0x2d, 0x7c, 0x8a,
|
||||
0xbf, 0x72, 0xc7, 0xfd, 0x27, 0x05, 0xfa, 0x83, 0x33, 0x77, 0xac, 0x33, 0x5b, 0xbe, 0xbc, 0xc5,
|
||||
0xdc, 0x82, 0xce, 0x69, 0x2a, 0x6b, 0xab, 0x50, 0x85, 0x6b, 0x9f, 0x26, 0x0b, 0x3f, 0x1d, 0x54,
|
||||
0xd1, 0x2e, 0xe3, 0x87, 0x15, 0xae, 0xf5, 0x4d, 0x19, 0xd5, 0x19, 0xe2, 0xa8, 0x6b, 0x5a, 0xf1,
|
||||
0xd3, 0x40, 0xed, 0xf7, 0x15, 0x58, 0x93, 0x4c, 0x44, 0xd7, 0x61, 0x99, 0x17, 0x99, 0x3c, 0x06,
|
||||
0x33, 0x1b, 0x36, 0x89, 0x78, 0xe2, 0x36, 0x89, 0x65, 0xe6, 0x53, 0x41, 0x13, 0xbd, 0x0a, 0xad,
|
||||
0xa8, 0x1a, 0x30, 0x73, 0xf2, 0x31, 0x03, 0xd4, 0x87, 0x06, 0x77, 0x4e, 0xa2, 0xcc, 0x8a, 0xbe,
|
||||
0xb5, 0xbf, 0x53, 0x60, 0xfd, 0x3d, 0xc3, 0x35, 0xbd, 0xc9, 0xe4, 0xf2, 0x6c, 0xdd, 0x83, 0x54,
|
||||
0x11, 0x51, 0xb6, 0x3d, 0x91, 0xae, 0x3c, 0xde, 0x82, 0x55, 0x9f, 0x79, 0x46, 0x33, 0xcd, 0xf7,
|
||||
0xaa, 0xae, 0x8a, 0x81, 0x88, 0x9f, 0x7f, 0x59, 0x01, 0x44, 0x82, 0xc1, 0x7d, 0xc3, 0x36, 0xdc,
|
||||
0x31, 0xbe, 0x38, 0xe9, 0xb7, 0xa1, 0x9b, 0x0a, 0x61, 0xd1, 0x5d, 0x58, 0x32, 0x86, 0x05, 0xe8,
|
||||
0x7d, 0xe8, 0x8e, 0x18, 0xaa, 0xa1, 0x8f, 0x8d, 0xc0, 0x73, 0xa9, 0x73, 0xed, 0xca, 0x3b, 0x11,
|
||||
0x8f, 0x7c, 0x6b, 0x3a, 0xc5, 0xfe, 0x9e, 0xe7, 0x9a, 0x3c, 0x17, 0x1b, 0x09, 0x32, 0xc9, 0x52,
|
||||
0x22, 0xb8, 0x38, 0x9e, 0x0b, 0xd1, 0x40, 0x14, 0xd0, 0x29, 0x2b, 0x02, 0x6c, 0xd8, 0x31, 0x23,
|
||||
0x62, 0x6f, 0xac, 0xb2, 0x81, 0x41, 0x71, 0x23, 0x4a, 0x12, 0x5f, 0xb5, 0xbf, 0x51, 0x00, 0x45,
|
||||
0xf5, 0x12, 0xad, 0x0c, 0xa9, 0xf6, 0x65, 0x97, 0x2a, 0x92, 0xa0, 0x70, 0x03, 0x9a, 0xa6, 0x58,
|
||||
0xc9, 0xcd, 0x25, 0x06, 0x50, 0x1f, 0x4d, 0x89, 0x1e, 0x92, 0x60, 0x8c, 0x4d, 0x51, 0x8f, 0x30,
|
||||
0xe0, 0x07, 0x14, 0x96, 0x0e, 0xcf, 0xb5, 0x6c, 0x78, 0x4e, 0xf6, 0x59, 0xea, 0xa9, 0x3e, 0x8b,
|
||||
0xf6, 0x69, 0x05, 0x54, 0xea, 0xee, 0xf6, 0xe2, 0x62, 0xbf, 0x14, 0xd1, 0xb7, 0xa0, 0xc3, 0x6f,
|
||||
0x8b, 0x53, 0x84, 0xb7, 0x9f, 0x25, 0x36, 0x43, 0xef, 0xc0, 0x55, 0x36, 0xc9, 0xc7, 0xc1, 0xdc,
|
||||
0x8e, 0x53, 0x71, 0x96, 0xcc, 0xa2, 0x67, 0xcc, 0xcf, 0x92, 0x21, 0xb1, 0xe2, 0x31, 0xac, 0x4f,
|
||||
0x6d, 0x6f, 0x64, 0xd8, 0xc3, 0xb4, 0x78, 0x98, 0x0c, 0x4b, 0x68, 0xfc, 0x55, 0xb6, 0x7c, 0x90,
|
||||
0x94, 0x61, 0x80, 0x0e, 0x48, 0x59, 0x8f, 0x9f, 0xc6, 0x59, 0x7e, 0xbd, 0x74, 0x96, 0xdf, 0x26,
|
||||
0x0b, 0xa3, 0x24, 0xff, 0x4f, 0x14, 0x58, 0xc9, 0xb4, 0x4a, 0xb3, 0x25, 0xa5, 0x92, 0x2f, 0x29,
|
||||
0xef, 0x42, 0x9d, 0xd4, 0x59, 0xcc, 0x19, 0x76, 0xe5, 0xe5, 0x4e, 0x7a, 0x57, 0x9d, 0x2d, 0x40,
|
||||
0x3b, 0xb0, 0x26, 0xb9, 0x9a, 0xe4, 0x3a, 0x80, 0xf2, 0x37, 0x93, 0xda, 0xcf, 0x6b, 0xd0, 0x4a,
|
||||
0xf0, 0x63, 0x41, 0x35, 0x5c, 0xa6, 0xf7, 0x95, 0x39, 0x5e, 0x35, 0x7f, 0xbc, 0x82, 0x8b, 0x2f,
|
||||
0xa2, 0x77, 0x0e, 0x76, 0x58, 0xf2, 0xcf, 0x2b, 0x11, 0x07, 0x3b, 0x34, 0xf5, 0x4f, 0x66, 0xf5,
|
||||
0x4b, 0xa9, 0xac, 0x3e, 0x53, 0xf7, 0x2c, 0x9f, 0x53, 0xf7, 0x34, 0xd2, 0x75, 0x4f, 0xca, 0x8e,
|
||||
0x9a, 0x59, 0x3b, 0x2a, 0x5b, 0xa0, 0xbe, 0x03, 0x6b, 0x63, 0x1f, 0x1b, 0x21, 0x36, 0xef, 0x9f,
|
||||
0xed, 0x45, 0x43, 0x3c, 0x33, 0x92, 0x0d, 0xa1, 0x07, 0x71, 0xcf, 0x88, 0x49, 0xb9, 0x4d, 0xa5,
|
||||
0x2c, 0x2f, 0xab, 0xb8, 0x6c, 0x98, 0x90, 0x85, 0x7b, 0xa6, 0x5f, 0xd9, 0xd2, 0xb8, 0x73, 0xa1,
|
||||
0xd2, 0xf8, 0x55, 0x68, 0x89, 0xd0, 0x4a, 0xcc, 0xbd, 0xcb, 0x3c, 0x9f, 0xf0, 0x05, 0x66, 0x90,
|
||||
0x72, 0x06, 0x2b, 0xe9, 0xa6, 0x6b, 0xb6, 0x28, 0x55, 0xf3, 0x45, 0xe9, 0x75, 0x58, 0xb6, 0x82,
|
||||
0xe1, 0xc4, 0x78, 0x8a, 0x7b, 0xab, 0x74, 0x74, 0xc9, 0x0a, 0x1e, 0x18, 0x4f, 0xb1, 0xf6, 0x2f,
|
||||
0x55, 0xe8, 0xc6, 0x55, 0x4c, 0x69, 0x37, 0x52, 0xe6, 0x7a, 0xfe, 0x08, 0xd4, 0x38, 0x50, 0x53,
|
||||
0x0e, 0x9f, 0x5b, 0x88, 0x65, 0x6f, 0x32, 0x56, 0x66, 0x19, 0x7b, 0x4d, 0xf5, 0x8a, 0x6b, 0x2f,
|
||||
0xd4, 0x2b, 0xbe, 0xe4, 0x35, 0xe1, 0x1d, 0xb8, 0x16, 0x05, 0xe0, 0xd4, 0xb1, 0x59, 0x96, 0x7f,
|
||||
0x55, 0x0c, 0x1e, 0x27, 0x8f, 0x5f, 0xe0, 0x02, 0x96, 0x8b, 0x5c, 0x40, 0x56, 0x05, 0x1a, 0x39,
|
||||
0x15, 0xc8, 0xdf, 0x56, 0x36, 0x25, 0xb7, 0x95, 0xda, 0x63, 0x58, 0xa3, 0x6d, 0xc0, 0x60, 0xec,
|
||||
0x5b, 0x23, 0x1c, 0xe5, 0xac, 0x65, 0xc4, 0xda, 0x87, 0x46, 0x26, 0xed, 0x8d, 0xbe, 0xb5, 0x9f,
|
||||
0x29, 0xb0, 0x9e, 0xdf, 0x97, 0x6a, 0x4c, 0xec, 0x48, 0x94, 0x94, 0x23, 0xf9, 0x4d, 0x58, 0x8b,
|
||||
0xb7, 0x4f, 0x27, 0xd4, 0x05, 0x29, 0xa3, 0x84, 0x70, 0x1d, 0xc5, 0x7b, 0x08, 0x98, 0xf6, 0x73,
|
||||
0x25, 0xea, 0xa6, 0x12, 0xd8, 0x94, 0xf6, 0x98, 0x49, 0x70, 0xf3, 0x5c, 0xdb, 0x72, 0xa3, 0xaa,
|
||||
0x9b, 0x9f, 0x91, 0x01, 0x79, 0xd5, 0xfd, 0x1e, 0xac, 0xf0, 0x49, 0x51, 0x8c, 0x2a, 0x99, 0x95,
|
||||
0x75, 0xd9, 0xba, 0x28, 0x3a, 0xdd, 0x86, 0x2e, 0x6f, 0xfe, 0x0a, 0x7c, 0x55, 0x59, 0x4b, 0xf8,
|
||||
0xd7, 0x40, 0x15, 0xd3, 0x5e, 0x34, 0x2a, 0xae, 0xf0, 0x85, 0x51, 0x76, 0xf7, 0x53, 0x05, 0x7a,
|
||||
0xe9, 0x18, 0x99, 0x38, 0xfe, 0x8b, 0xe7, 0x78, 0xdf, 0x4d, 0x5f, 0x9b, 0xdd, 0x3e, 0x87, 0x9e,
|
||||
0x18, 0x8f, 0xb8, 0x3c, 0x3b, 0xa2, 0x57, 0xa0, 0xa4, 0x34, 0xd9, 0xb7, 0x82, 0xd0, 0xb7, 0x46,
|
||||
0xf3, 0x4b, 0xbd, 0xdf, 0xd0, 0xfe, 0xb6, 0x02, 0x5f, 0x97, 0x6e, 0x78, 0x99, 0x0b, 0xb2, 0xa2,
|
||||
0x4e, 0xc0, 0x7d, 0x68, 0x64, 0x4a, 0x98, 0x37, 0xce, 0x39, 0x3c, 0x6f, 0x6a, 0xb1, 0xe6, 0x8a,
|
||||
0x58, 0x47, 0xf6, 0x88, 0x74, 0xba, 0x56, 0xbc, 0x07, 0x57, 0xda, 0xd4, 0x1e, 0x62, 0x1d, 0xba,
|
||||
0x07, 0x6d, 0x56, 0x1e, 0x0e, 0x4f, 0x2d, 0xfc, 0x5c, 0xdc, 0xeb, 0xdc, 0x94, 0xfa, 0x35, 0x3a,
|
||||
0xef, 0x89, 0x85, 0x9f, 0xeb, 0x2d, 0x3b, 0xfa, 0x1d, 0x68, 0xff, 0x5d, 0x05, 0x88, 0xc7, 0x48,
|
||||
0x6d, 0x1a, 0x1b, 0x0c, 0xb7, 0x80, 0x04, 0x84, 0x04, 0xe2, 0x74, 0xee, 0x27, 0x3e, 0x91, 0x1e,
|
||||
0xb7, 0x67, 0x4d, 0x2b, 0x08, 0x39, 0x5f, 0x76, 0xce, 0xa7, 0x45, 0xb0, 0x88, 0x88, 0x8c, 0x5d,
|
||||
0x9b, 0x88, 0xda, 0x8b, 0x40, 0xd0, 0xdb, 0x80, 0xa6, 0xbe, 0xf7, 0xdc, 0x72, 0xa7, 0xc9, 0x8c,
|
||||
0x9d, 0x25, 0xf6, 0xab, 0x7c, 0x24, 0x91, 0xb2, 0xff, 0x18, 0xd4, 0xcc, 0x74, 0xc1, 0x92, 0x3b,
|
||||
0x0b, 0xc8, 0x38, 0x48, 0xed, 0xc5, 0x6f, 0x70, 0x56, 0xd2, 0x18, 0x82, 0xfe, 0x10, 0xd4, 0x2c,
|
||||
0xbd, 0x92, 0x3b, 0x98, 0x6f, 0xa7, 0xef, 0x60, 0xce, 0x33, 0x53, 0xb2, 0x4d, 0xe2, 0x12, 0xa6,
|
||||
0x3f, 0x81, 0xab, 0x32, 0x4a, 0x24, 0x48, 0xee, 0xa6, 0x91, 0x94, 0xc9, 0x69, 0x13, 0x97, 0x3d,
|
||||
0x3f, 0x88, 0xd2, 0x45, 0xca, 0xe6, 0x22, 0x0f, 0x9c, 0x68, 0xca, 0x55, 0x52, 0x4d, 0x39, 0xed,
|
||||
0x8f, 0x14, 0x40, 0x79, 0xed, 0x46, 0x5d, 0xa8, 0x44, 0x9b, 0x54, 0x0e, 0xf7, 0x33, 0xda, 0x54,
|
||||
0xc9, 0x69, 0xd3, 0x0d, 0x68, 0x46, 0x11, 0x91, 0xbb, 0xbf, 0x18, 0x90, 0xd4, 0xb5, 0x5a, 0x5a,
|
||||
0xd7, 0x12, 0x84, 0xd5, 0xd3, 0x84, 0x9d, 0x00, 0xca, 0x5b, 0x4c, 0x72, 0x27, 0x25, 0xbd, 0xd3,
|
||||
0x22, 0x0a, 0x13, 0x98, 0xaa, 0x69, 0x4c, 0xff, 0x5e, 0x01, 0x14, 0xc7, 0xfc, 0xe8, 0x22, 0xaa,
|
||||
0x4c, 0xa0, 0xdc, 0x81, 0xb5, 0x7c, 0x46, 0x20, 0xd2, 0x20, 0x94, 0xcb, 0x07, 0x64, 0xb1, 0xbb,
|
||||
0x2a, 0x7b, 0x69, 0xf4, 0x6e, 0xe4, 0xe3, 0x58, 0x82, 0x73, 0xb3, 0x28, 0xc1, 0xc9, 0xb8, 0xb9,
|
||||
0xdf, 0xca, 0xbe, 0x50, 0x62, 0x46, 0x73, 0x57, 0xea, 0x8f, 0x72, 0x47, 0x7e, 0xf9, 0xcf, 0x93,
|
||||
0xfe, 0xb5, 0x02, 0xab, 0x11, 0x37, 0x5e, 0x88, 0xd3, 0x8b, 0x2f, 0xfe, 0x5e, 0x32, 0x6b, 0x3f,
|
||||
0x92, 0xb3, 0xf6, 0x97, 0xcf, 0xcd, 0x61, 0x3f, 0x3f, 0xce, 0x0e, 0x60, 0x99, 0xb7, 0xcf, 0x72,
|
||||
0xb6, 0x5b, 0xa6, 0x4a, 0xbc, 0x0a, 0x75, 0xe2, 0x2a, 0x44, 0x3f, 0x89, 0x7d, 0x68, 0x7f, 0xad,
|
||||
0x00, 0x0c, 0xce, 0xdc, 0xf1, 0x3d, 0x66, 0x42, 0xef, 0x40, 0x6d, 0xd1, 0x03, 0x0d, 0x32, 0x9b,
|
||||
0x26, 0xdd, 0x74, 0x66, 0x09, 0xa9, 0xa5, 0x0a, 0xdc, 0x6a, 0xb6, 0xc0, 0x2d, 0x2a, 0x4d, 0x8b,
|
||||
0xdd, 0xc6, 0x3f, 0x28, 0x70, 0x9d, 0x10, 0xf1, 0x99, 0xe4, 0x22, 0xa5, 0x58, 0x97, 0x70, 0x49,
|
||||
0xd5, 0xb4, 0x4b, 0xba, 0x0b, 0xcb, 0xac, 0xc6, 0x14, 0x79, 0xc1, 0xcd, 0x22, 0x96, 0x31, 0x06,
|
||||
0xeb, 0x62, 0xfa, 0xd6, 0xaf, 0x42, 0x33, 0xea, 0xf5, 0xa2, 0x16, 0x2c, 0x3f, 0x76, 0xdf, 0x77,
|
||||
0xbd, 0xe7, 0xae, 0x7a, 0x05, 0x2d, 0x43, 0xf5, 0x9e, 0x6d, 0xab, 0x0a, 0xea, 0x40, 0x73, 0x10,
|
||||
0xfa, 0xd8, 0x70, 0x2c, 0x77, 0xaa, 0x56, 0x50, 0x17, 0xe0, 0x3d, 0x2b, 0x08, 0x3d, 0xdf, 0x1a,
|
||||
0x1b, 0xb6, 0x5a, 0xdd, 0xfa, 0x04, 0xba, 0xe9, 0x4a, 0x0a, 0xb5, 0xa1, 0x71, 0xe4, 0x85, 0x3f,
|
||||
0xfc, 0xd8, 0x0a, 0x42, 0xf5, 0x0a, 0x99, 0x7f, 0xe4, 0x85, 0xc7, 0x3e, 0x0e, 0xb0, 0x1b, 0xaa,
|
||||
0x0a, 0x02, 0x58, 0xfa, 0x91, 0xbb, 0x6f, 0x05, 0x4f, 0xd5, 0x0a, 0x5a, 0xe3, 0x4d, 0x12, 0xc3,
|
||||
0x3e, 0xe4, 0xe5, 0x89, 0x5a, 0x25, 0xcb, 0xa3, 0xaf, 0x1a, 0x52, 0xa1, 0x1d, 0x4d, 0x39, 0x38,
|
||||
0x7e, 0xac, 0xd6, 0x51, 0x13, 0xea, 0xec, 0xe7, 0xd2, 0x96, 0x09, 0x6a, 0xb6, 0xc3, 0x47, 0xf6,
|
||||
0x64, 0x87, 0x88, 0x40, 0xea, 0x15, 0x72, 0x32, 0xde, 0x62, 0x55, 0x15, 0xb4, 0x02, 0xad, 0x44,
|
||||
0xc3, 0x52, 0xad, 0x10, 0xc0, 0x81, 0x3f, 0x1b, 0x73, 0xe9, 0x31, 0x12, 0x48, 0x2e, 0xbd, 0x4f,
|
||||
0x38, 0x51, 0xdb, 0xba, 0x0f, 0x0d, 0x51, 0xe2, 0x91, 0xa9, 0x9c, 0x45, 0xe4, 0x53, 0xbd, 0x82,
|
||||
0x56, 0xa1, 0x93, 0x7a, 0x81, 0xa9, 0x2a, 0x08, 0x41, 0x37, 0xfd, 0xc0, 0x59, 0xad, 0x6c, 0xed,
|
||||
0x02, 0xc4, 0xa6, 0x4e, 0xc8, 0x39, 0x74, 0x4f, 0x0d, 0xdb, 0x32, 0x19, 0x6d, 0x64, 0x88, 0x70,
|
||||
0x97, 0x72, 0x87, 0xb5, 0xea, 0xd4, 0xca, 0xd6, 0xab, 0xd0, 0x10, 0x5a, 0x4e, 0xe0, 0x3a, 0x76,
|
||||
0xbc, 0x53, 0xcc, 0x24, 0x33, 0xc0, 0xa1, 0xaa, 0xec, 0xfe, 0x6f, 0x07, 0x80, 0x35, 0xe5, 0x3c,
|
||||
0xcf, 0x37, 0x91, 0x0d, 0xe8, 0x00, 0x87, 0x7b, 0x9e, 0x33, 0xf3, 0x5c, 0xd1, 0x2c, 0x08, 0xd0,
|
||||
0x76, 0x5a, 0x15, 0xf8, 0x47, 0x7e, 0x22, 0x3f, 0x7d, 0xff, 0x75, 0xe9, 0xfc, 0xcc, 0x64, 0xed,
|
||||
0x0a, 0x72, 0x28, 0xb6, 0x47, 0x96, 0x83, 0x1f, 0x59, 0xe3, 0xa7, 0x51, 0x27, 0xaf, 0xf8, 0x75,
|
||||
0x72, 0x66, 0xaa, 0xc0, 0x77, 0x4b, 0x8a, 0x6f, 0x10, 0xfa, 0x96, 0x3b, 0x15, 0xa9, 0xb8, 0x76,
|
||||
0x05, 0x3d, 0xcb, 0xbc, 0x8d, 0x16, 0x08, 0x77, 0xcb, 0x3c, 0x87, 0xbe, 0x18, 0x4a, 0x1b, 0x56,
|
||||
0x32, 0xff, 0xf5, 0x40, 0x5b, 0xf2, 0xe7, 0x6e, 0xb2, 0xff, 0xa5, 0xf4, 0xdf, 0x2a, 0x35, 0x37,
|
||||
0xc2, 0x66, 0x41, 0x37, 0xfd, 0x7f, 0x06, 0xf4, 0x4b, 0x45, 0x1b, 0xe4, 0x1e, 0xdc, 0xf6, 0xb7,
|
||||
0xca, 0x4c, 0x8d, 0x50, 0x7d, 0xc8, 0x14, 0x74, 0x11, 0x2a, 0xe9, 0xcb, 0xe2, 0xfe, 0x79, 0x55,
|
||||
0x90, 0x76, 0x05, 0xfd, 0x04, 0x56, 0x73, 0xcf, 0x82, 0xd1, 0x37, 0xe4, 0xb7, 0x35, 0xf2, 0xd7,
|
||||
0xc3, 0x8b, 0x30, 0x7c, 0x98, 0x35, 0xaf, 0x62, 0xea, 0x73, 0xaf, 0xfc, 0xcb, 0x53, 0x9f, 0xd8,
|
||||
0xfe, 0x3c, 0xea, 0x5f, 0x18, 0xc3, 0x9c, 0x9a, 0x4d, 0xb6, 0x35, 0xfc, 0xb6, 0x0c, 0x45, 0xe1,
|
||||
0xdb, 0xe4, 0xfe, 0x76, 0xd9, 0xe9, 0x49, 0xed, 0x4a, 0x3f, 0x7f, 0x95, 0x33, 0x4d, 0xfa, 0x64,
|
||||
0x57, 0xae, 0x5d, 0xf2, 0xd7, 0xb4, 0xda, 0x15, 0xf4, 0x28, 0xe5, 0x5e, 0xd1, 0x1b, 0x45, 0xc2,
|
||||
0x49, 0x5f, 0x18, 0x2d, 0xe2, 0xdb, 0x6f, 0x03, 0x62, 0xb6, 0xe3, 0x4e, 0xac, 0xe9, 0xdc, 0x37,
|
||||
0x98, 0x62, 0x15, 0xb9, 0x9b, 0xfc, 0x54, 0x81, 0xe6, 0x9b, 0x2f, 0xb0, 0x22, 0x3a, 0xd2, 0x10,
|
||||
0xe0, 0x00, 0x87, 0x0f, 0x71, 0xe8, 0x5b, 0xe3, 0x20, 0x7b, 0xa2, 0xd8, 0xa3, 0xf2, 0x09, 0x02,
|
||||
0xd5, 0x9b, 0x0b, 0xe7, 0x45, 0x08, 0x46, 0xd0, 0x3a, 0xc0, 0x21, 0xcf, 0xab, 0x02, 0x54, 0xb8,
|
||||
0x52, 0xcc, 0x10, 0x28, 0x36, 0x17, 0x4f, 0x4c, 0xba, 0xb3, 0xcc, 0x53, 0x60, 0x54, 0x28, 0xd8,
|
||||
0xfc, 0x03, 0x65, 0xb9, 0x3b, 0x2b, 0x78, 0x5b, 0xcc, 0x4e, 0xb4, 0x77, 0x82, 0xc7, 0x4f, 0xdf,
|
||||
0xc3, 0x86, 0x1d, 0x9e, 0x14, 0x9c, 0x28, 0x31, 0xe3, 0xfc, 0x13, 0xa5, 0x26, 0x0a, 0x1c, 0xbb,
|
||||
0x9f, 0x76, 0xa1, 0x49, 0xe3, 0x1f, 0x09, 0xd6, 0xbf, 0x08, 0x7f, 0x9f, 0x71, 0xf8, 0xfb, 0x08,
|
||||
0x56, 0x32, 0x2f, 0x57, 0xe5, 0xfa, 0x22, 0x7f, 0xde, 0x5a, 0xc2, 0x8b, 0xa7, 0xdf, 0x8e, 0xca,
|
||||
0x1d, 0x92, 0xf4, 0x7d, 0xe9, 0xa2, 0xbd, 0x9f, 0xb0, 0x47, 0xdf, 0x51, 0xdf, 0xf4, 0xcd, 0xc2,
|
||||
0xca, 0x2b, 0x7d, 0xdf, 0xfe, 0xc5, 0x47, 0x87, 0x97, 0x1f, 0x3d, 0x3f, 0x82, 0x95, 0xcc, 0xab,
|
||||
0x27, 0xb9, 0x54, 0xe5, 0x4f, 0xa3, 0x16, 0xed, 0xfe, 0x39, 0x86, 0x19, 0x13, 0xd6, 0x24, 0x0f,
|
||||
0x52, 0xd0, 0x76, 0x51, 0xe5, 0x23, 0x7f, 0xb9, 0xb2, 0xf8, 0x40, 0x9d, 0x94, 0x29, 0xa1, 0xcd,
|
||||
0x22, 0x22, 0xb3, 0xff, 0xbd, 0xeb, 0x7f, 0xa3, 0xdc, 0x1f, 0xf5, 0xa2, 0x03, 0x0d, 0x60, 0x89,
|
||||
0xbd, 0x85, 0x42, 0xaf, 0xc9, 0xfb, 0x7f, 0x89, 0x77, 0x52, 0xfd, 0x45, 0xaf, 0xa9, 0x82, 0xb9,
|
||||
0x1d, 0x06, 0x74, 0xd3, 0x3a, 0xf5, 0x90, 0x48, 0xfa, 0x88, 0x2f, 0xf9, 0x80, 0xa9, 0xbf, 0xf8,
|
||||
0xcd, 0x92, 0xd8, 0xf4, 0xff, 0x77, 0x2c, 0xfe, 0x18, 0xd6, 0x24, 0xb7, 0x02, 0xa8, 0x28, 0xe7,
|
||||
0x2a, 0xb8, 0x8f, 0xe8, 0xef, 0x94, 0x9e, 0x1f, 0x61, 0xfe, 0x31, 0xa8, 0xd9, 0x8e, 0x02, 0x7a,
|
||||
0xab, 0x48, 0x9f, 0x65, 0x38, 0xcf, 0x57, 0xe6, 0xfb, 0xdf, 0xfa, 0x70, 0x77, 0x6a, 0x85, 0x27,
|
||||
0xf3, 0x11, 0x19, 0xd9, 0x61, 0x53, 0xdf, 0xb6, 0x3c, 0xfe, 0x6b, 0x47, 0xf0, 0x7f, 0x87, 0xae,
|
||||
0xde, 0xa1, 0xa8, 0x66, 0xa3, 0xd1, 0x12, 0xfd, 0xbc, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
|
||||
0x37, 0x34, 0x97, 0xee, 0xf6, 0x3f, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
|
|
@ -113,7 +113,7 @@ func (b *RowCountBasedBalancer) balanceReplica(replica *meta.Replica) ([]Segment
|
|||
segments := b.dist.SegmentDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nid)
|
||||
// Only balance segments in targets
|
||||
segments = lo.Filter(segments, func(segment *meta.Segment, _ int) bool {
|
||||
return b.targetMgr.GetSegment(segment.GetID()) != nil
|
||||
return b.targetMgr.GetHistoricalSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil
|
||||
})
|
||||
cnt := 0
|
||||
for _, s := range segments {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
|
@ -59,7 +60,7 @@ func (suite *RowCountBasedBalancerTestSuite) SetupTest() {
|
|||
store := meta.NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
testMeta := meta.NewMeta(idAllocator, store)
|
||||
testTarget := meta.NewTargetManager()
|
||||
testTarget := meta.NewTargetManager(suite.broker, testMeta)
|
||||
|
||||
distManager := meta.NewDistributionManager()
|
||||
nodeManager := session.NewNodeManager()
|
||||
|
@ -156,9 +157,27 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalance() {
|
|||
defer suite.TearDownTest()
|
||||
balancer := suite.balancer
|
||||
collection := utils.CreateTestCollection(1, 1)
|
||||
balancer.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
||||
balancer.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 2, "test-insert-channel"))
|
||||
balancer.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 3, "test-insert-channel"))
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
},
|
||||
{
|
||||
SegmentID: 2,
|
||||
},
|
||||
{
|
||||
SegmentID: 3,
|
||||
},
|
||||
{
|
||||
SegmentID: 4,
|
||||
},
|
||||
{
|
||||
SegmentID: 5,
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1)
|
||||
collection.LoadPercentage = 100
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -73,16 +74,14 @@ func (c *ChannelChecker) Check(ctx context.Context) []task.Task {
|
|||
|
||||
func (c *ChannelChecker) checkReplica(ctx context.Context, replica *meta.Replica) []task.Task {
|
||||
ret := make([]task.Task, 0)
|
||||
targets := c.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID())
|
||||
dists := c.getChannelDist(replica)
|
||||
|
||||
lacks, redundancies := diffChannels(targets, dists)
|
||||
lacks, redundancies := c.getDmChannelDiff(c.targetMgr, c.dist, c.meta, replica.GetCollectionID(), replica.GetID())
|
||||
tasks := c.createChannelLoadTask(ctx, lacks, replica)
|
||||
ret = append(ret, tasks...)
|
||||
tasks = c.createChannelReduceTasks(ctx, redundancies, replica.GetID())
|
||||
ret = append(ret, tasks...)
|
||||
|
||||
repeated := findRepeatedChannels(dists)
|
||||
repeated := c.findRepeatedChannels(c.dist, c.meta, replica.GetID())
|
||||
tasks = c.createChannelReduceTasks(ctx, repeated, replica.GetID())
|
||||
ret = append(ret, tasks...)
|
||||
|
||||
|
@ -91,38 +90,69 @@ func (c *ChannelChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||
return ret
|
||||
}
|
||||
|
||||
func (c *ChannelChecker) getChannelDist(replica *meta.Replica) []*meta.DmChannel {
|
||||
dists := make([]*meta.DmChannel, 0)
|
||||
for _, nodeID := range replica.Nodes.Collect() {
|
||||
dists = append(dists, c.dist.ChannelDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nodeID)...)
|
||||
// GetDmChannelDiff get channel diff between target and dist
|
||||
func (c *ChannelChecker) getDmChannelDiff(targetMgr *meta.TargetManager,
|
||||
distMgr *meta.DistributionManager,
|
||||
metaInfo *meta.Meta,
|
||||
collectionID int64,
|
||||
replicaID int64) (toLoad, toRelease []*meta.DmChannel) {
|
||||
replica := metaInfo.Get(replicaID)
|
||||
if replica == nil {
|
||||
log.Info("replica does not exist, skip it")
|
||||
return
|
||||
}
|
||||
return dists
|
||||
}
|
||||
|
||||
func diffChannels(targets, dists []*meta.DmChannel) (lacks, redundancies []*meta.DmChannel) {
|
||||
distMap := make(map[string]struct{})
|
||||
targetMap := make(map[string]struct{})
|
||||
for _, ch := range targets {
|
||||
targetMap[ch.GetChannelName()] = struct{}{}
|
||||
dist := c.getChannelDist(distMgr, replica)
|
||||
distMap := typeutil.NewSet[string]()
|
||||
for _, ch := range dist {
|
||||
distMap.Insert(ch.GetChannelName())
|
||||
}
|
||||
for _, ch := range dists {
|
||||
distMap[ch.GetChannelName()] = struct{}{}
|
||||
if _, ok := targetMap[ch.GetChannelName()]; !ok {
|
||||
redundancies = append(redundancies, ch)
|
||||
|
||||
nextTargetMap := targetMgr.GetDmChannelsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetMap := targetMgr.GetDmChannelsByCollection(collectionID, meta.CurrentTarget)
|
||||
|
||||
// get channels which exists on dist, but not exist on current and next
|
||||
for _, ch := range dist {
|
||||
_, existOnCurrent := currentTargetMap[ch.GetChannelName()]
|
||||
_, existOnNext := nextTargetMap[ch.GetChannelName()]
|
||||
if !existOnNext && !existOnCurrent {
|
||||
toRelease = append(toRelease, ch)
|
||||
}
|
||||
}
|
||||
for _, ch := range targets {
|
||||
if _, ok := distMap[ch.GetChannelName()]; !ok {
|
||||
lacks = append(lacks, ch)
|
||||
|
||||
//get channels which exists on next target, but not on dist
|
||||
for name, channel := range nextTargetMap {
|
||||
_, existOnDist := distMap[name]
|
||||
if !existOnDist {
|
||||
toLoad = append(toLoad, channel)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func findRepeatedChannels(dists []*meta.DmChannel) []*meta.DmChannel {
|
||||
func (c *ChannelChecker) getChannelDist(distMgr *meta.DistributionManager, replica *meta.Replica) []*meta.DmChannel {
|
||||
dist := make([]*meta.DmChannel, 0)
|
||||
for _, nodeID := range replica.Nodes.Collect() {
|
||||
dist = append(dist, distMgr.ChannelDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nodeID)...)
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
func (c *ChannelChecker) findRepeatedChannels(distMgr *meta.DistributionManager,
|
||||
metaInfo *meta.Meta,
|
||||
replicaID int64) []*meta.DmChannel {
|
||||
replica := metaInfo.Get(replicaID)
|
||||
ret := make([]*meta.DmChannel, 0)
|
||||
|
||||
if replica == nil {
|
||||
log.Info("replica does not exist, skip it")
|
||||
return ret
|
||||
}
|
||||
dist := c.getChannelDist(distMgr, replica)
|
||||
|
||||
versionsMap := make(map[string]*meta.DmChannel)
|
||||
for _, ch := range dists {
|
||||
for _, ch := range dist {
|
||||
maxVer, ok := versionsMap[ch.GetChannelName()]
|
||||
if !ok {
|
||||
versionsMap[ch.GetChannelName()] = ch
|
||||
|
|
|
@ -20,21 +20,25 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type ChannelCheckerTestSuite struct {
|
||||
suite.Suite
|
||||
kv *etcdkv.EtcdKV
|
||||
checker *ChannelChecker
|
||||
meta *meta.Meta
|
||||
broker *meta.MockBroker
|
||||
}
|
||||
|
||||
func (suite *ChannelCheckerTestSuite) SetupSuite() {
|
||||
|
@ -58,13 +62,14 @@ func (suite *ChannelCheckerTestSuite) SetupTest() {
|
|||
// meta
|
||||
store := meta.NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
testMeta := meta.NewMeta(idAllocator, store)
|
||||
suite.meta = meta.NewMeta(idAllocator, store)
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||
|
||||
distManager := meta.NewDistributionManager()
|
||||
targetManager := meta.NewTargetManager()
|
||||
|
||||
balancer := suite.createMockBalancer()
|
||||
suite.checker = NewChannelChecker(testMeta, distManager, targetManager, balancer)
|
||||
suite.checker = NewChannelChecker(suite.meta, distManager, targetManager, balancer)
|
||||
}
|
||||
|
||||
func (suite *ChannelCheckerTestSuite) TearDownTest() {
|
||||
|
@ -94,7 +99,16 @@ func (suite *ChannelCheckerTestSuite) TestLoadChannel() {
|
|||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1}))
|
||||
|
||||
checker.targetMgr.AddDmChannel(utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
||||
channels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: 1,
|
||||
ChannelName: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
channels, nil, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
|
||||
tasks := checker.Check(context.TODO())
|
||||
suite.Len(tasks, 1)
|
||||
|
@ -126,9 +140,27 @@ func (suite *ChannelCheckerTestSuite) TestReduceChannel() {
|
|||
|
||||
func (suite *ChannelCheckerTestSuite) TestRepeatedChannels() {
|
||||
checker := suite.checker
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
checker.targetMgr.AddDmChannel(utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
||||
err := checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
suite.NoError(err)
|
||||
err = checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
suite.NoError(err)
|
||||
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
|
||||
channels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: 1,
|
||||
ChannelName: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.dist.ChannelDistManager.Update(1, utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 2, "test-insert-channel"))
|
||||
|
||||
|
|
|
@ -76,11 +76,9 @@ func (c *SegmentChecker) Check(ctx context.Context) []task.Task {
|
|||
|
||||
func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica) []task.Task {
|
||||
ret := make([]task.Task, 0)
|
||||
targets := c.targetMgr.GetSegmentsByCollection(replica.CollectionID)
|
||||
dists := c.getSegmentsDist(replica)
|
||||
|
||||
// compare with targets to find the lack and redundancy of segments
|
||||
lacks, redundancies := diffSegments(targets, dists)
|
||||
lacks, redundancies := c.getHistoricalSegmentDiff(c.targetMgr, c.dist, c.meta, replica.GetCollectionID(), replica.GetID())
|
||||
tasks := c.createSegmentLoadTasks(ctx, lacks, replica)
|
||||
ret = append(ret, tasks...)
|
||||
|
||||
|
@ -88,68 +86,146 @@ func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||
ret = append(ret, tasks...)
|
||||
|
||||
// compare inner dists to find repeated loaded segments
|
||||
redundancies = findRepeatedSegments(dists)
|
||||
redundancies = c.findRepeatedHistoricalSegments(c.dist, c.meta, replica.GetID())
|
||||
redundancies = c.filterExistedOnLeader(replica, redundancies)
|
||||
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_All)
|
||||
ret = append(ret, tasks...)
|
||||
|
||||
// release redundant growing segments
|
||||
leaderRedundancies := c.findNeedReleasedGrowingSegments(replica)
|
||||
redundancies = make([]*meta.Segment, 0)
|
||||
for _, segments := range leaderRedundancies {
|
||||
redundancies = append(redundancies, segments...)
|
||||
}
|
||||
// compare with target to find the lack and redundancy of segments
|
||||
_, redundancies = c.getStreamingSegmentDiff(c.targetMgr, c.dist, c.meta, replica.GetCollectionID(), replica.GetID())
|
||||
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_Streaming)
|
||||
ret = append(ret, tasks...)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) getSegmentsDist(replica *meta.Replica) []*meta.Segment {
|
||||
// GetStreamingSegmentDiff get streaming segment diff between leader view and target
|
||||
func (c *SegmentChecker) getStreamingSegmentDiff(targetMgr *meta.TargetManager,
|
||||
distMgr *meta.DistributionManager,
|
||||
metaInfo *meta.Meta,
|
||||
collectionID int64,
|
||||
replicaID int64) (toLoad []*datapb.SegmentInfo, toRelease []*meta.Segment) {
|
||||
replica := metaInfo.Get(replicaID)
|
||||
if replica == nil {
|
||||
log.Info("replica does not exist, skip it")
|
||||
return
|
||||
}
|
||||
dist := c.getStreamingSegmentsDist(distMgr, replica)
|
||||
distMap := typeutil.NewUniqueSet()
|
||||
for _, s := range dist {
|
||||
distMap.Insert(s.GetID())
|
||||
}
|
||||
|
||||
nextTargetSegmentIDs := targetMgr.GetStreamingSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetSegmentIDs := targetMgr.GetStreamingSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||
currentTargetChannelMap := targetMgr.GetDmChannelsByCollection(collectionID, meta.CurrentTarget)
|
||||
|
||||
// get segment which exist on dist, but not on current target and next target
|
||||
for _, segment := range dist {
|
||||
if !currentTargetSegmentIDs.Contain(segment.GetID()) && !nextTargetSegmentIDs.Contain(segment.GetID()) {
|
||||
if channel, ok := currentTargetChannelMap[segment.InsertChannel]; ok {
|
||||
timestampInSegment := segment.GetStartPosition().GetTimestamp()
|
||||
timestampInTarget := channel.GetSeekPosition().GetTimestamp()
|
||||
// filter toRelease which seekPosition is newer than next target dmChannel
|
||||
if timestampInSegment < timestampInTarget {
|
||||
log.Info("growing segment not exist in target, so release it",
|
||||
zap.Int64("segmentID", segment.GetID()),
|
||||
)
|
||||
toRelease = append(toRelease, segment)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) getStreamingSegmentsDist(distMgr *meta.DistributionManager, replica *meta.Replica) map[int64]*meta.Segment {
|
||||
segments := make(map[int64]*meta.Segment, 0)
|
||||
for _, node := range replica.Nodes.Collect() {
|
||||
segmentsOnNodes := distMgr.LeaderViewManager.GetGrowingSegmentDistByCollectionAndNode(replica.CollectionID, node)
|
||||
for k, v := range segmentsOnNodes {
|
||||
segments[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
// GetHistoricalSegmentDiff get historical segment diff between target and dist
|
||||
func (c *SegmentChecker) getHistoricalSegmentDiff(targetMgr *meta.TargetManager,
|
||||
distMgr *meta.DistributionManager,
|
||||
metaInfo *meta.Meta,
|
||||
collectionID int64,
|
||||
replicaID int64) (toLoad []*datapb.SegmentInfo, toRelease []*meta.Segment) {
|
||||
replica := metaInfo.Get(replicaID)
|
||||
if replica == nil {
|
||||
log.Info("replica does not exist, skip it")
|
||||
return
|
||||
}
|
||||
dist := c.getHistoricalSegmentsDist(distMgr, replica)
|
||||
distMap := typeutil.NewUniqueSet()
|
||||
for _, s := range dist {
|
||||
distMap.Insert(s.GetID())
|
||||
}
|
||||
|
||||
nextTargetMap := targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetMap := targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||
|
||||
//get segment which exist on next target, but not on dist
|
||||
for segmentID, segment := range nextTargetMap {
|
||||
if !distMap.Contain(segmentID) {
|
||||
toLoad = append(toLoad, segment)
|
||||
}
|
||||
}
|
||||
|
||||
// get segment which exist on dist, but not on current target and next target
|
||||
for _, segment := range dist {
|
||||
_, existOnCurrent := currentTargetMap[segment.GetID()]
|
||||
_, existOnNext := nextTargetMap[segment.GetID()]
|
||||
|
||||
if !existOnNext && !existOnCurrent {
|
||||
toRelease = append(toRelease, segment)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) getHistoricalSegmentsDist(distMgr *meta.DistributionManager, replica *meta.Replica) []*meta.Segment {
|
||||
ret := make([]*meta.Segment, 0)
|
||||
for _, node := range replica.Nodes.Collect() {
|
||||
ret = append(ret, c.dist.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...)
|
||||
ret = append(ret, distMgr.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func diffSegments(targets []*datapb.SegmentInfo, dists []*meta.Segment) (lacks []*datapb.SegmentInfo, redundancies []*meta.Segment) {
|
||||
distMap := typeutil.NewUniqueSet()
|
||||
targetMap := typeutil.NewUniqueSet()
|
||||
for _, s := range targets {
|
||||
targetMap.Insert(s.GetID())
|
||||
func (c *SegmentChecker) findRepeatedHistoricalSegments(distMgr *meta.DistributionManager,
|
||||
metaInfo *meta.Meta,
|
||||
replicaID int64) []*meta.Segment {
|
||||
segments := make([]*meta.Segment, 0)
|
||||
replica := metaInfo.Get(replicaID)
|
||||
if replica == nil {
|
||||
log.Info("replica does not exist, skip it")
|
||||
return segments
|
||||
}
|
||||
for _, s := range dists {
|
||||
distMap.Insert(s.GetID())
|
||||
if !targetMap.Contain(s.GetID()) {
|
||||
redundancies = append(redundancies, s)
|
||||
}
|
||||
}
|
||||
for _, s := range targets {
|
||||
if !distMap.Contain(s.GetID()) {
|
||||
lacks = append(lacks, s)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func findRepeatedSegments(dists []*meta.Segment) []*meta.Segment {
|
||||
ret := make([]*meta.Segment, 0)
|
||||
dist := c.getHistoricalSegmentsDist(distMgr, replica)
|
||||
versions := make(map[int64]*meta.Segment)
|
||||
for _, s := range dists {
|
||||
for _, s := range dist {
|
||||
maxVer, ok := versions[s.GetID()]
|
||||
if !ok {
|
||||
versions[s.GetID()] = s
|
||||
continue
|
||||
}
|
||||
if maxVer.Version <= s.Version {
|
||||
ret = append(ret, maxVer)
|
||||
segments = append(segments, maxVer)
|
||||
versions[s.GetID()] = s
|
||||
} else {
|
||||
ret = append(ret, s)
|
||||
segments = append(segments, s)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) filterExistedOnLeader(replica *meta.Replica, segments []*meta.Segment) []*meta.Segment {
|
||||
|
@ -177,55 +253,6 @@ func (c *SegmentChecker) filterExistedOnLeader(replica *meta.Replica, segments [
|
|||
return filtered
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) findNeedReleasedGrowingSegments(replica *meta.Replica) map[int64][]*meta.Segment {
|
||||
ret := make(map[int64][]*meta.Segment, 0) // leaderID -> segment ids
|
||||
leaders := c.dist.ChannelDistManager.GetShardLeadersByReplica(replica)
|
||||
for shard, leaderID := range leaders {
|
||||
leaderView := c.dist.LeaderViewManager.GetLeaderShardView(leaderID, shard)
|
||||
if leaderView == nil {
|
||||
continue
|
||||
}
|
||||
// find growing segments from leaderview's sealed segments
|
||||
// because growing segments should be released only after loading the compaction created segment successfully.
|
||||
for sid := range leaderView.Segments {
|
||||
segment := c.targetMgr.GetSegment(sid)
|
||||
if segment == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
sources := append(segment.GetCompactionFrom(), segment.GetID())
|
||||
for _, source := range sources {
|
||||
if leaderView.GrowingSegments.Contain(source) {
|
||||
ret[leaderView.ID] = append(ret[leaderView.ID], &meta.Segment{
|
||||
SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: source,
|
||||
CollectionID: replica.GetCollectionID(),
|
||||
InsertChannel: leaderView.Channel,
|
||||
},
|
||||
Node: leaderID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func packSegments(segmentIDs []int64, nodeID int64, collectionID int64) []*meta.Segment {
|
||||
ret := make([]*meta.Segment, 0, len(segmentIDs))
|
||||
for _, id := range segmentIDs {
|
||||
segment := &meta.Segment{
|
||||
SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: id,
|
||||
CollectionID: collectionID,
|
||||
},
|
||||
Node: nodeID,
|
||||
}
|
||||
ret = append(ret, segment)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) createSegmentLoadTasks(ctx context.Context, segments []*datapb.SegmentInfo, replica *meta.Replica) []task.Task {
|
||||
if len(segments) == 0 {
|
||||
return nil
|
||||
|
|
|
@ -18,23 +18,29 @@ package checkers
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type SegmentCheckerTestSuite struct {
|
||||
suite.Suite
|
||||
kv *etcdkv.EtcdKV
|
||||
checker *SegmentChecker
|
||||
meta *meta.Meta
|
||||
broker *meta.MockBroker
|
||||
}
|
||||
|
||||
func (suite *SegmentCheckerTestSuite) SetupSuite() {
|
||||
|
@ -58,13 +64,13 @@ func (suite *SegmentCheckerTestSuite) SetupTest() {
|
|||
// meta
|
||||
store := meta.NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
testMeta := meta.NewMeta(idAllocator, store)
|
||||
|
||||
suite.meta = meta.NewMeta(idAllocator, store)
|
||||
distManager := meta.NewDistributionManager()
|
||||
targetManager := meta.NewTargetManager()
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||
|
||||
balancer := suite.createMockBalancer()
|
||||
suite.checker = NewSegmentChecker(testMeta, distManager, targetManager, balancer)
|
||||
suite.checker = NewSegmentChecker(suite.meta, distManager, targetManager, balancer)
|
||||
}
|
||||
|
||||
func (suite *SegmentCheckerTestSuite) TearDownTest() {
|
||||
|
@ -96,11 +102,19 @@ func (suite *SegmentCheckerTestSuite) TestLoadSegments() {
|
|||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
// set target
|
||||
checker.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
|
||||
// set dist
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||
|
||||
tasks := checker.Check(context.TODO())
|
||||
suite.Len(tasks, 1)
|
||||
|
@ -121,7 +135,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseSegments() {
|
|||
|
||||
// set dist
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||
checker.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 2, 1, 1, "test-insert-channel"))
|
||||
|
||||
tasks := checker.Check(context.TODO())
|
||||
|
@ -141,11 +155,19 @@ func (suite *SegmentCheckerTestSuite) TestReleaseRepeatedSegments() {
|
|||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
// set target
|
||||
checker.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
|
||||
// set dist
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 2}, []int64{}))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 2}, map[int64]*meta.Segment{}))
|
||||
checker.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 1, 1, 2, "test-insert-channel"))
|
||||
|
||||
|
@ -160,7 +182,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseRepeatedSegments() {
|
|||
suite.EqualValues(1, action.Node())
|
||||
|
||||
// test less version exist on leader
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 1}, []int64{}))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 1}, map[int64]*meta.Segment{}))
|
||||
tasks = checker.Check(context.TODO())
|
||||
suite.Len(tasks, 0)
|
||||
}
|
||||
|
@ -172,16 +194,43 @@ func (suite *SegmentCheckerTestSuite) TestReleaseGrowingSegments() {
|
|||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
segment := utils.CreateTestSegmentInfo(1, 1, 3, "test-insert-channel")
|
||||
segment.CompactionFrom = append(segment.CompactionFrom, 2)
|
||||
checker.targetMgr.AddSegment(segment)
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 3,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
channels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: 1,
|
||||
ChannelName: "test-insert-channel",
|
||||
SeekPosition: &internalpb.MsgPosition{Timestamp: 10},
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionCurrentTarget(int64(1), int64(1))
|
||||
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, []int64{2, 3}))
|
||||
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 3, 2, 1, "test-insert-channel"))
|
||||
growingSegments := make(map[int64]*meta.Segment)
|
||||
growingSegments[2] = utils.CreateTestSegment(1, 1, 2, 2, 0, "test-insert-channel")
|
||||
growingSegments[2].SegmentInfo.StartPosition = &internalpb.MsgPosition{Timestamp: 2}
|
||||
growingSegments[3] = utils.CreateTestSegment(1, 1, 3, 2, 1, "test-insert-channel")
|
||||
growingSegments[3].SegmentInfo.StartPosition = &internalpb.MsgPosition{Timestamp: 3}
|
||||
growingSegments[4] = utils.CreateTestSegment(1, 1, 4, 2, 1, "test-insert-channel")
|
||||
growingSegments[4].SegmentInfo.StartPosition = &internalpb.MsgPosition{Timestamp: 11}
|
||||
|
||||
dmChannel := utils.CreateTestChannel(1, 2, 1, "test-insert-channel")
|
||||
dmChannel.UnflushedSegmentIds = []int64{2, 3}
|
||||
checker.dist.ChannelDistManager.Update(2, dmChannel)
|
||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, growingSegments))
|
||||
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 3, 2, 2, "test-insert-channel"))
|
||||
|
||||
tasks := checker.Check(context.TODO())
|
||||
suite.Len(tasks, 2)
|
||||
sort.Slice(tasks, func(i, j int) bool {
|
||||
return tasks[i].Actions()[0].(*task.SegmentAction).SegmentID() < tasks[j].Actions()[0].(*task.SegmentAction).SegmentID()
|
||||
})
|
||||
suite.Len(tasks[0].Actions(), 1)
|
||||
action, ok := tasks[0].Actions()[0].(*task.SegmentAction)
|
||||
suite.True(ok)
|
||||
|
|
|
@ -21,15 +21,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
type DistControllerTestSuite struct {
|
||||
|
@ -37,19 +40,46 @@ type DistControllerTestSuite struct {
|
|||
controller *Controller
|
||||
mockCluster *session.MockCluster
|
||||
mockScheduler *task.MockScheduler
|
||||
|
||||
kv *etcdkv.EtcdKV
|
||||
meta *meta.Meta
|
||||
broker *meta.MockBroker
|
||||
}
|
||||
|
||||
func (suite *DistControllerTestSuite) SetupTest() {
|
||||
Params.Init()
|
||||
|
||||
var err error
|
||||
config := GenerateEtcdConfig()
|
||||
cli, err := etcd.GetEtcdClient(
|
||||
config.UseEmbedEtcd,
|
||||
config.EtcdUseSSL,
|
||||
config.Endpoints,
|
||||
config.EtcdTLSCert,
|
||||
config.EtcdTLSKey,
|
||||
config.EtcdTLSCACert,
|
||||
config.EtcdTLSMinVersion)
|
||||
suite.Require().NoError(err)
|
||||
suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath)
|
||||
|
||||
// meta
|
||||
store := meta.NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
suite.meta = meta.NewMeta(idAllocator, store)
|
||||
|
||||
suite.mockCluster = session.NewMockCluster(suite.T())
|
||||
nodeManager := session.NewNodeManager()
|
||||
distManager := meta.NewDistributionManager()
|
||||
targetManager := meta.NewTargetManager()
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||
suite.mockScheduler = task.NewMockScheduler(suite.T())
|
||||
suite.controller = NewDistController(suite.mockCluster, nodeManager, distManager, targetManager, suite.mockScheduler)
|
||||
}
|
||||
|
||||
func (suite *DistControllerTestSuite) TearDownSuite() {
|
||||
suite.kv.Close()
|
||||
}
|
||||
|
||||
func (suite *DistControllerTestSuite) TestStart() {
|
||||
dispatchCalled := atomic.NewBool(false)
|
||||
suite.mockCluster.EXPECT().GetDataDistribution(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -128,7 +127,12 @@ func (dh *distHandler) handleDistResp(resp *querypb.GetDataDistributionResponse)
|
|||
func (dh *distHandler) updateSegmentsDistribution(resp *querypb.GetDataDistributionResponse) {
|
||||
updates := make([]*meta.Segment, 0, len(resp.GetSegments()))
|
||||
for _, s := range resp.GetSegments() {
|
||||
segmentInfo := dh.target.GetSegment(s.GetID())
|
||||
// for collection which is already loaded
|
||||
segmentInfo := dh.target.GetHistoricalSegment(s.GetCollection(), s.GetID(), meta.CurrentTarget)
|
||||
if segmentInfo == nil {
|
||||
// for collection which is loading
|
||||
segmentInfo = dh.target.GetHistoricalSegment(s.GetCollection(), s.GetID(), meta.NextTarget)
|
||||
}
|
||||
var segment *meta.Segment
|
||||
if segmentInfo == nil {
|
||||
segment = &meta.Segment{
|
||||
|
@ -157,7 +161,7 @@ func (dh *distHandler) updateSegmentsDistribution(resp *querypb.GetDataDistribut
|
|||
func (dh *distHandler) updateChannelsDistribution(resp *querypb.GetDataDistributionResponse) {
|
||||
updates := make([]*meta.DmChannel, 0, len(resp.GetChannels()))
|
||||
for _, ch := range resp.GetChannels() {
|
||||
channelInfo := dh.target.GetDmChannel(ch.GetChannel())
|
||||
channelInfo := dh.target.GetDmChannel(ch.GetCollection(), ch.GetChannel(), meta.CurrentTarget)
|
||||
var channel *meta.DmChannel
|
||||
if channelInfo == nil {
|
||||
channel = &meta.DmChannel{
|
||||
|
@ -180,12 +184,26 @@ func (dh *distHandler) updateChannelsDistribution(resp *querypb.GetDataDistribut
|
|||
func (dh *distHandler) updateLeaderView(resp *querypb.GetDataDistributionResponse) {
|
||||
updates := make([]*meta.LeaderView, 0, len(resp.GetLeaderViews()))
|
||||
for _, lview := range resp.GetLeaderViews() {
|
||||
segments := make(map[int64]*meta.Segment)
|
||||
|
||||
for ID, position := range lview.GrowingSegments {
|
||||
segments[ID] = &meta.Segment{
|
||||
SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: ID,
|
||||
CollectionID: lview.GetCollection(),
|
||||
StartPosition: position,
|
||||
InsertChannel: lview.GetChannel(),
|
||||
},
|
||||
Node: resp.NodeID,
|
||||
}
|
||||
}
|
||||
|
||||
view := &meta.LeaderView{
|
||||
ID: resp.GetNodeID(),
|
||||
CollectionID: lview.GetCollection(),
|
||||
Channel: lview.GetChannel(),
|
||||
Segments: lview.GetSegmentDist(),
|
||||
GrowingSegments: typeutil.NewUniqueSet(lview.GetGrowingSegmentIDs()...),
|
||||
GrowingSegments: segments,
|
||||
}
|
||||
updates = append(updates, view)
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ func (s *Server) balanceSegments(ctx context.Context, req *querypb.LoadBalanceRe
|
|||
// Only balance segments in targets
|
||||
segments := s.dist.SegmentDistManager.GetByCollectionAndNode(req.GetCollectionID(), srcNode)
|
||||
segments = lo.Filter(segments, func(segment *meta.Segment, _ int) bool {
|
||||
return s.targetMgr.GetSegment(segment.GetID()) != nil
|
||||
return s.targetMgr.GetHistoricalSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil
|
||||
})
|
||||
allSegments := make(map[int64]*meta.Segment)
|
||||
for _, segment := range segments {
|
||||
|
@ -290,7 +290,7 @@ func (s *Server) tryGetNodesMetrics(ctx context.Context, req *milvuspb.GetMetric
|
|||
func (s *Server) fillReplicaInfo(replica *meta.Replica, withShardNodes bool) (*milvuspb.ReplicaInfo, error) {
|
||||
info := utils.Replica2ReplicaInfo(replica.Replica)
|
||||
|
||||
channels := s.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID())
|
||||
channels := s.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID(), meta.CurrentTarget)
|
||||
if len(channels) == 0 {
|
||||
msg := "failed to get channels, collection not loaded"
|
||||
log.Warn(msg)
|
||||
|
|
|
@ -21,16 +21,16 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Job is request of loading/releasing collection/partitions,
|
||||
|
@ -105,12 +105,11 @@ type LoadCollectionJob struct {
|
|||
*BaseJob
|
||||
req *querypb.LoadCollectionRequest
|
||||
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker meta.Broker
|
||||
nodeMgr *session.NodeManager
|
||||
handoffObserver *observers.HandoffObserver
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker meta.Broker
|
||||
nodeMgr *session.NodeManager
|
||||
}
|
||||
|
||||
func NewLoadCollectionJob(
|
||||
|
@ -121,17 +120,15 @@ func NewLoadCollectionJob(
|
|||
targetMgr *meta.TargetManager,
|
||||
broker meta.Broker,
|
||||
nodeMgr *session.NodeManager,
|
||||
handoffObserver *observers.HandoffObserver,
|
||||
) *LoadCollectionJob {
|
||||
return &LoadCollectionJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
broker: broker,
|
||||
nodeMgr: nodeMgr,
|
||||
handoffObserver: handoffObserver,
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
broker: broker,
|
||||
nodeMgr: nodeMgr,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,21 +205,16 @@ func (job *LoadCollectionJob) Execute() error {
|
|||
}
|
||||
|
||||
// Fetch channels and segments from DataCoord
|
||||
partitions, err := job.broker.GetPartitions(job.ctx, req.GetCollectionID())
|
||||
partitionIDs, err := job.broker.GetPartitions(job.ctx, req.GetCollectionID())
|
||||
if err != nil {
|
||||
msg := "failed to get partitions from RootCoord"
|
||||
log.Error(msg, zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
|
||||
job.handoffObserver.Register(job.CollectionID())
|
||||
err = utils.RegisterTargets(job.ctx,
|
||||
job.targetMgr,
|
||||
job.broker,
|
||||
req.GetCollectionID(),
|
||||
partitions)
|
||||
err = job.targetMgr.UpdateCollectionNextTargetWithPartitions(req.GetCollectionID(), partitionIDs...)
|
||||
if err != nil {
|
||||
msg := "failed to register channels and segments"
|
||||
msg := "failed to update next targets for collection"
|
||||
log.Error(msg, zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
|
@ -250,18 +242,16 @@ func (job *LoadCollectionJob) Execute() error {
|
|||
func (job *LoadCollectionJob) PostExecute() {
|
||||
if job.Error() != nil && !job.meta.Exist(job.CollectionID()) {
|
||||
job.meta.ReplicaManager.RemoveCollection(job.CollectionID())
|
||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
||||
job.targetMgr.RemoveCollection(job.req.GetCollectionID())
|
||||
}
|
||||
}
|
||||
|
||||
type ReleaseCollectionJob struct {
|
||||
*BaseJob
|
||||
req *querypb.ReleaseCollectionRequest
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
handoffObserver *observers.HandoffObserver
|
||||
req *querypb.ReleaseCollectionRequest
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
}
|
||||
|
||||
func NewReleaseCollectionJob(ctx context.Context,
|
||||
|
@ -269,15 +259,13 @@ func NewReleaseCollectionJob(ctx context.Context,
|
|||
dist *meta.DistributionManager,
|
||||
meta *meta.Meta,
|
||||
targetMgr *meta.TargetManager,
|
||||
handoffObserver *observers.HandoffObserver,
|
||||
) *ReleaseCollectionJob {
|
||||
return &ReleaseCollectionJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
handoffObserver: handoffObserver,
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -298,8 +286,6 @@ func (job *ReleaseCollectionJob) Execute() error {
|
|||
return utils.WrapError(msg, err)
|
||||
}
|
||||
|
||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
||||
|
||||
err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID())
|
||||
if err != nil {
|
||||
msg := "failed to remove replicas"
|
||||
|
@ -316,12 +302,11 @@ type LoadPartitionJob struct {
|
|||
*BaseJob
|
||||
req *querypb.LoadPartitionsRequest
|
||||
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker meta.Broker
|
||||
nodeMgr *session.NodeManager
|
||||
handoffObserver *observers.HandoffObserver
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker meta.Broker
|
||||
nodeMgr *session.NodeManager
|
||||
}
|
||||
|
||||
func NewLoadPartitionJob(
|
||||
|
@ -332,17 +317,15 @@ func NewLoadPartitionJob(
|
|||
targetMgr *meta.TargetManager,
|
||||
broker meta.Broker,
|
||||
nodeMgr *session.NodeManager,
|
||||
handoffObserver *observers.HandoffObserver,
|
||||
) *LoadPartitionJob {
|
||||
return &LoadPartitionJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
broker: broker,
|
||||
nodeMgr: nodeMgr,
|
||||
handoffObserver: handoffObserver,
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
broker: broker,
|
||||
nodeMgr: nodeMgr,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,6 +385,7 @@ func (job *LoadPartitionJob) Execute() error {
|
|||
req := job.req
|
||||
log := log.Ctx(job.ctx).With(
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||
)
|
||||
|
||||
// Clear stale replicas
|
||||
|
@ -427,19 +411,14 @@ func (job *LoadPartitionJob) Execute() error {
|
|||
zap.Int64s("nodes", replica.GetNodes()))
|
||||
}
|
||||
|
||||
job.handoffObserver.Register(job.CollectionID())
|
||||
err = utils.RegisterTargets(job.ctx,
|
||||
job.targetMgr,
|
||||
job.broker,
|
||||
req.GetCollectionID(),
|
||||
req.GetPartitionIDs())
|
||||
err = job.targetMgr.UpdateCollectionNextTargetWithPartitions(req.GetCollectionID(), req.GetPartitionIDs()...)
|
||||
if err != nil {
|
||||
msg := "failed to register channels and segments"
|
||||
log.Error(msg, zap.Error(err))
|
||||
msg := "failed to update next targets for collection"
|
||||
log.Error(msg,
|
||||
zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||
zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
job.handoffObserver.StartHandoff(job.CollectionID())
|
||||
|
||||
partitions := lo.Map(req.GetPartitionIDs(), func(partition int64, _ int) *meta.Partition {
|
||||
return &meta.Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
|
@ -466,18 +445,16 @@ func (job *LoadPartitionJob) Execute() error {
|
|||
func (job *LoadPartitionJob) PostExecute() {
|
||||
if job.Error() != nil && !job.meta.Exist(job.CollectionID()) {
|
||||
job.meta.ReplicaManager.RemoveCollection(job.CollectionID())
|
||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
||||
job.targetMgr.RemoveCollection(job.req.GetCollectionID())
|
||||
}
|
||||
}
|
||||
|
||||
type ReleasePartitionJob struct {
|
||||
*BaseJob
|
||||
req *querypb.ReleasePartitionsRequest
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
handoffObserver *observers.HandoffObserver
|
||||
req *querypb.ReleasePartitionsRequest
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
}
|
||||
|
||||
func NewReleasePartitionJob(ctx context.Context,
|
||||
|
@ -485,15 +462,13 @@ func NewReleasePartitionJob(ctx context.Context,
|
|||
dist *meta.DistributionManager,
|
||||
meta *meta.Meta,
|
||||
targetMgr *meta.TargetManager,
|
||||
handoffObserver *observers.HandoffObserver,
|
||||
) *ReleasePartitionJob {
|
||||
return &ReleasePartitionJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
handoffObserver: handoffObserver,
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -536,7 +511,6 @@ func (job *ReleasePartitionJob) Execute() error {
|
|||
log.Warn(msg, zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
||||
err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID())
|
||||
if err != nil {
|
||||
log.Warn("failed to remove replicas", zap.Error(err))
|
||||
|
@ -550,9 +524,7 @@ func (job *ReleasePartitionJob) Execute() error {
|
|||
log.Warn(msg, zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
for _, partition := range toRelease {
|
||||
job.targetMgr.RemovePartition(partition)
|
||||
}
|
||||
job.targetMgr.RemovePartition(req.GetCollectionID(), toRelease...)
|
||||
waitCollectionReleased(job.dist, req.GetCollectionID(), toRelease...)
|
||||
}
|
||||
metrics.QueryCoordNumCollections.WithLabelValues().Dec()
|
||||
|
|
|
@ -21,17 +21,17 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -50,14 +50,13 @@ type JobSuite struct {
|
|||
loadTypes map[int64]querypb.LoadType
|
||||
|
||||
// Dependencies
|
||||
kv kv.MetaKv
|
||||
store meta.Store
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker *meta.MockBroker
|
||||
nodeMgr *session.NodeManager
|
||||
handoffObserver *observers.HandoffObserver
|
||||
kv kv.MetaKv
|
||||
store meta.Store
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker *meta.MockBroker
|
||||
nodeMgr *session.NodeManager
|
||||
|
||||
// Test objects
|
||||
scheduler *Scheduler
|
||||
|
@ -131,16 +130,9 @@ func (suite *JobSuite) SetupTest() {
|
|||
suite.store = meta.NewMetaStore(suite.kv)
|
||||
suite.dist = meta.NewDistributionManager()
|
||||
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store)
|
||||
suite.targetMgr = meta.NewTargetManager()
|
||||
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||
suite.nodeMgr = session.NewNodeManager()
|
||||
suite.nodeMgr.Add(&session.NodeInfo{})
|
||||
suite.handoffObserver = observers.NewHandoffObserver(
|
||||
suite.store,
|
||||
suite.meta,
|
||||
suite.dist,
|
||||
suite.targetMgr,
|
||||
suite.broker,
|
||||
)
|
||||
suite.scheduler = NewScheduler()
|
||||
|
||||
suite.scheduler.Start(context.Background())
|
||||
|
@ -187,12 +179,12 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
suite.assertLoaded(collection)
|
||||
}
|
||||
|
||||
|
@ -212,7 +204,6 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -236,7 +227,6 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -262,7 +252,6 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -291,7 +280,6 @@ func (suite *JobSuite) TestLoadCollectionWithReplicas() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -322,12 +310,12 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||
suite.assertLoaded(collection)
|
||||
}
|
||||
|
||||
|
@ -350,7 +338,6 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -368,9 +355,9 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
}
|
||||
// Load with 1 replica
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collection,
|
||||
PartitionIDs: suite.partitions[collection],
|
||||
// ReplicaNumber: 1,
|
||||
CollectionID: collection,
|
||||
PartitionIDs: suite.partitions[collection],
|
||||
ReplicaNumber: 1,
|
||||
}
|
||||
job := NewLoadPartitionJob(
|
||||
ctx,
|
||||
|
@ -380,12 +367,12 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||
suite.assertLoaded(collection)
|
||||
}
|
||||
|
||||
|
@ -408,7 +395,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -434,7 +420,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -460,7 +445,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -485,7 +469,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -515,7 +498,6 @@ func (suite *JobSuite) TestLoadPartitionWithReplicas() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -547,12 +529,12 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||
suite.assertLoaded(collection)
|
||||
}
|
||||
|
||||
|
@ -577,7 +559,6 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -601,7 +582,6 @@ func (suite *JobSuite) TestReleaseCollection() {
|
|||
suite.dist,
|
||||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -620,7 +600,6 @@ func (suite *JobSuite) TestReleaseCollection() {
|
|||
suite.dist,
|
||||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -646,7 +625,6 @@ func (suite *JobSuite) TestReleasePartition() {
|
|||
suite.dist,
|
||||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -671,7 +649,6 @@ func (suite *JobSuite) TestReleasePartition() {
|
|||
suite.dist,
|
||||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -698,7 +675,6 @@ func (suite *JobSuite) TestReleasePartition() {
|
|||
suite.dist,
|
||||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -744,7 +720,6 @@ func (suite *JobSuite) TestLoadCollectionStoreFailed() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
loadErr := job.Wait()
|
||||
|
@ -778,7 +753,6 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
loadErr := job.Wait()
|
||||
|
@ -801,7 +775,6 @@ func (suite *JobSuite) TestLoadCreateReplicaFailed() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -824,7 +797,6 @@ func (suite *JobSuite) loadAll() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -832,6 +804,7 @@ func (suite *JobSuite) loadAll() {
|
|||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
suite.NotNil(suite.meta.GetCollection(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
} else {
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collection,
|
||||
|
@ -845,7 +818,6 @@ func (suite *JobSuite) loadAll() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -853,6 +825,7 @@ func (suite *JobSuite) loadAll() {
|
|||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
suite.NotNil(suite.meta.GetPartitionsByCollection(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -869,7 +842,6 @@ func (suite *JobSuite) releaseAll() {
|
|||
suite.dist,
|
||||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -881,11 +853,11 @@ func (suite *JobSuite) releaseAll() {
|
|||
func (suite *JobSuite) assertLoaded(collection int64) {
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
for _, channel := range suite.channels[collection] {
|
||||
suite.NotNil(suite.targetMgr.GetDmChannel(channel))
|
||||
suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.NotNil(suite.targetMgr.GetSegment(segment))
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -893,11 +865,11 @@ func (suite *JobSuite) assertLoaded(collection int64) {
|
|||
func (suite *JobSuite) assertReleased(collection int64) {
|
||||
suite.False(suite.meta.Exist(collection))
|
||||
for _, channel := range suite.channels[collection] {
|
||||
suite.Nil(suite.targetMgr.GetDmChannel(channel))
|
||||
suite.Nil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.Nil(suite.targetMgr.GetSegment(segment))
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
. "github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
|
|
@ -19,8 +19,9 @@ package meta
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type LeaderView struct {
|
||||
|
@ -28,7 +29,7 @@ type LeaderView struct {
|
|||
CollectionID int64
|
||||
Channel string
|
||||
Segments map[int64]*querypb.SegmentDist
|
||||
GrowingSegments typeutil.UniqueSet
|
||||
GrowingSegments map[int64]*Segment
|
||||
}
|
||||
|
||||
func (view *LeaderView) Clone() *LeaderView {
|
||||
|
@ -36,7 +37,11 @@ func (view *LeaderView) Clone() *LeaderView {
|
|||
for k, v := range view.Segments {
|
||||
segments[k] = v
|
||||
}
|
||||
growings := typeutil.NewUniqueSet(view.GrowingSegments.Collect()...)
|
||||
|
||||
growings := make(map[int64]*Segment)
|
||||
for k, v := range view.GrowingSegments {
|
||||
growings[k] = v
|
||||
}
|
||||
|
||||
return &LeaderView{
|
||||
ID: view.ID,
|
||||
|
@ -75,7 +80,7 @@ func (mgr *LeaderViewManager) GetSegmentByNode(nodeID int64) []int64 {
|
|||
}
|
||||
}
|
||||
if leaderID == nodeID {
|
||||
segments = append(segments, view.GrowingSegments.Collect()...)
|
||||
segments = append(segments, lo.Keys(view.GrowingSegments)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +109,7 @@ func (mgr *LeaderViewManager) GetSegmentDist(segmentID int64) []int64 {
|
|||
if ok {
|
||||
nodes = append(nodes, version.NodeID)
|
||||
}
|
||||
if view.GrowingSegments.Contain(segmentID) {
|
||||
if _, ok := view.GrowingSegments[segmentID]; ok {
|
||||
nodes = append(nodes, leaderID)
|
||||
}
|
||||
}
|
||||
|
@ -135,7 +140,7 @@ func (mgr *LeaderViewManager) GetGrowingSegmentDist(segmentID int64) []int64 {
|
|||
nodes := make([]int64, 0)
|
||||
for leaderID, views := range mgr.views {
|
||||
for _, view := range views {
|
||||
if view.GrowingSegments.Contain(segmentID) {
|
||||
if _, ok := view.GrowingSegments[segmentID]; ok {
|
||||
nodes = append(nodes, leaderID)
|
||||
break
|
||||
}
|
||||
|
@ -151,7 +156,7 @@ func (mgr *LeaderViewManager) GetLeadersByGrowingSegment(segmentID int64) *Leade
|
|||
|
||||
for _, views := range mgr.views {
|
||||
for _, view := range views {
|
||||
if view.GrowingSegments.Contain(segmentID) {
|
||||
if _, ok := view.GrowingSegments[segmentID]; ok {
|
||||
return view
|
||||
}
|
||||
}
|
||||
|
@ -159,7 +164,26 @@ func (mgr *LeaderViewManager) GetLeadersByGrowingSegment(segmentID int64) *Leade
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetSegmentDist returns the list of nodes the given channel on
|
||||
// GetGrowingSegmentDistByCollectionAndNode returns all segments of the given collection and node.
|
||||
func (mgr *LeaderViewManager) GetGrowingSegmentDistByCollectionAndNode(collectionID, nodeID int64) map[int64]*Segment {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
|
||||
segments := make(map[int64]*Segment, 0)
|
||||
if viewsOnNode, ok := mgr.views[nodeID]; ok {
|
||||
for _, view := range viewsOnNode {
|
||||
if view.CollectionID == collectionID {
|
||||
for ID, segment := range view.GrowingSegments {
|
||||
segments[ID] = segment
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
// GetSegmentDist returns the list of nodes the given segment on
|
||||
func (mgr *LeaderViewManager) GetChannelDist(channel string) []int64 {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
|
|
|
@ -19,10 +19,11 @@ package meta
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type LeaderViewManagerSuite struct {
|
||||
|
@ -70,7 +71,7 @@ func (suite *LeaderViewManagerSuite) SetupSuite() {
|
|||
ID: int64(j),
|
||||
CollectionID: collection,
|
||||
Channel: channel,
|
||||
GrowingSegments: typeutil.NewUniqueSet(suite.growingSegments[collection][channel]),
|
||||
GrowingSegments: map[int64]*Segment{suite.growingSegments[collection][channel]: nil},
|
||||
Segments: make(map[int64]*querypb.SegmentDist),
|
||||
}
|
||||
for k, segment := range suite.segments[collection] {
|
||||
|
@ -163,8 +164,8 @@ func (suite *LeaderViewManagerSuite) AssertSegmentDist(segment int64, nodes []in
|
|||
for _, view := range views {
|
||||
version, ok := view.Segments[segment]
|
||||
if ok {
|
||||
if !suite.True(nodeSet.Contain(version.NodeID) ||
|
||||
version.NodeID == leader && view.GrowingSegments.Contain(version.NodeID)) {
|
||||
_, ok = view.GrowingSegments[version.NodeID]
|
||||
if !suite.True(nodeSet.Contain(version.NodeID) || version.NodeID == leader && ok) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,10 +3,8 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
querypb "github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockStore is an autogenerated mock type for the Store type
|
||||
|
@ -157,67 +155,6 @@ func (_c *MockStore_GetReplicas_Call) Return(_a0 []*querypb.Replica, _a1 error)
|
|||
return _c
|
||||
}
|
||||
|
||||
// LoadHandoffWithRevision provides a mock function with given fields:
|
||||
func (_m *MockStore) LoadHandoffWithRevision() ([]string, []string, int64, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []string
|
||||
if rf, ok := ret.Get(0).(func() []string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 []string
|
||||
if rf, ok := ret.Get(1).(func() []string); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
var r2 int64
|
||||
if rf, ok := ret.Get(2).(func() int64); ok {
|
||||
r2 = rf()
|
||||
} else {
|
||||
r2 = ret.Get(2).(int64)
|
||||
}
|
||||
|
||||
var r3 error
|
||||
if rf, ok := ret.Get(3).(func() error); ok {
|
||||
r3 = rf()
|
||||
} else {
|
||||
r3 = ret.Error(3)
|
||||
}
|
||||
|
||||
return r0, r1, r2, r3
|
||||
}
|
||||
|
||||
// MockStore_LoadHandoffWithRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadHandoffWithRevision'
|
||||
type MockStore_LoadHandoffWithRevision_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// LoadHandoffWithRevision is a helper method to define mock.On call
|
||||
func (_e *MockStore_Expecter) LoadHandoffWithRevision() *MockStore_LoadHandoffWithRevision_Call {
|
||||
return &MockStore_LoadHandoffWithRevision_Call{Call: _e.mock.On("LoadHandoffWithRevision")}
|
||||
}
|
||||
|
||||
func (_c *MockStore_LoadHandoffWithRevision_Call) Run(run func()) *MockStore_LoadHandoffWithRevision_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_LoadHandoffWithRevision_Call) Return(_a0 []string, _a1 []string, _a2 int64, _a3 error) *MockStore_LoadHandoffWithRevision_Call {
|
||||
_c.Call.Return(_a0, _a1, _a2, _a3)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ReleaseCollection provides a mock function with given fields: id
|
||||
func (_m *MockStore) ReleaseCollection(id int64) error {
|
||||
ret := _m.Called(id)
|
||||
|
@ -382,43 +319,6 @@ func (_c *MockStore_ReleaseReplicas_Call) Return(_a0 error) *MockStore_ReleaseRe
|
|||
return _c
|
||||
}
|
||||
|
||||
// RemoveHandoffEvent provides a mock function with given fields: segmentInfo
|
||||
func (_m *MockStore) RemoveHandoffEvent(segmentInfo *querypb.SegmentInfo) error {
|
||||
ret := _m.Called(segmentInfo)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*querypb.SegmentInfo) error); ok {
|
||||
r0 = rf(segmentInfo)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStore_RemoveHandoffEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveHandoffEvent'
|
||||
type MockStore_RemoveHandoffEvent_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RemoveHandoffEvent is a helper method to define mock.On call
|
||||
// - segmentInfo *querypb.SegmentInfo
|
||||
func (_e *MockStore_Expecter) RemoveHandoffEvent(segmentInfo interface{}) *MockStore_RemoveHandoffEvent_Call {
|
||||
return &MockStore_RemoveHandoffEvent_Call{Call: _e.mock.On("RemoveHandoffEvent", segmentInfo)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_RemoveHandoffEvent_Call) Run(run func(segmentInfo *querypb.SegmentInfo)) *MockStore_RemoveHandoffEvent_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*querypb.SegmentInfo))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_RemoveHandoffEvent_Call) Return(_a0 error) *MockStore_RemoveHandoffEvent_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SaveCollection provides a mock function with given fields: info
|
||||
func (_m *MockStore) SaveCollection(info *querypb.CollectionLoadInfo) error {
|
||||
ret := _m.Called(info)
|
||||
|
@ -543,45 +443,6 @@ func (_c *MockStore_SaveReplica_Call) Return(_a0 error) *MockStore_SaveReplica_C
|
|||
return _c
|
||||
}
|
||||
|
||||
// WatchHandoffEvent provides a mock function with given fields: revision
|
||||
func (_m *MockStore) WatchHandoffEvent(revision int64) clientv3.WatchChan {
|
||||
ret := _m.Called(revision)
|
||||
|
||||
var r0 clientv3.WatchChan
|
||||
if rf, ok := ret.Get(0).(func(int64) clientv3.WatchChan); ok {
|
||||
r0 = rf(revision)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(clientv3.WatchChan)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStore_WatchHandoffEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WatchHandoffEvent'
|
||||
type MockStore_WatchHandoffEvent_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// WatchHandoffEvent is a helper method to define mock.On call
|
||||
// - revision int64
|
||||
func (_e *MockStore_Expecter) WatchHandoffEvent(revision interface{}) *MockStore_WatchHandoffEvent_Call {
|
||||
return &MockStore_WatchHandoffEvent_Call{Call: _e.mock.On("WatchHandoffEvent", revision)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_WatchHandoffEvent_Call) Run(run func(revision int64)) *MockStore_WatchHandoffEvent_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_WatchHandoffEvent_Call) Return(_a0 clientv3.WatchChan) *MockStore_WatchHandoffEvent_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewMockStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
|
|
|
@ -48,8 +48,6 @@ type WatchStoreChan = clientv3.WatchChan
|
|||
// Store is used to save and get from object storage.
|
||||
type Store interface {
|
||||
metastore.QueryCoordCatalog
|
||||
WatchHandoffEvent(revision int64) WatchStoreChan
|
||||
LoadHandoffWithRevision() ([]string, []string, int64, error)
|
||||
}
|
||||
|
||||
type metaStore struct {
|
||||
|
@ -195,19 +193,6 @@ func (s metaStore) ReleaseReplica(collection, replica int64) error {
|
|||
return s.cli.Remove(key)
|
||||
}
|
||||
|
||||
func (s metaStore) WatchHandoffEvent(revision int64) WatchStoreChan {
|
||||
return s.cli.WatchWithRevision(util.HandoffSegmentPrefix, revision)
|
||||
}
|
||||
|
||||
func (s metaStore) RemoveHandoffEvent(info *querypb.SegmentInfo) error {
|
||||
key := encodeHandoffEventKey(info.CollectionID, info.PartitionID, info.SegmentID)
|
||||
return s.cli.Remove(key)
|
||||
}
|
||||
|
||||
func (s metaStore) LoadHandoffWithRevision() ([]string, []string, int64, error) {
|
||||
return s.cli.LoadWithRevision(util.HandoffSegmentPrefix)
|
||||
}
|
||||
|
||||
func encodeCollectionLoadInfoKey(collection int64) string {
|
||||
return fmt.Sprintf("%s/%d", CollectionLoadInfoPrefix, collection)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
)
|
||||
|
||||
// CollectionTarget collection target is immutable,
|
||||
type CollectionTarget struct {
|
||||
segments map[int64]*datapb.SegmentInfo
|
||||
dmChannels map[string]*DmChannel
|
||||
}
|
||||
|
||||
func NewCollectionTarget(segments map[int64]*datapb.SegmentInfo, dmChannels map[string]*DmChannel) *CollectionTarget {
|
||||
return &CollectionTarget{
|
||||
segments: segments,
|
||||
dmChannels: dmChannels,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *CollectionTarget) GetAllSegments() map[int64]*datapb.SegmentInfo {
|
||||
return p.segments
|
||||
}
|
||||
|
||||
func (p *CollectionTarget) GetAllDmChannels() map[string]*DmChannel {
|
||||
return p.dmChannels
|
||||
}
|
||||
|
||||
func (p *CollectionTarget) GetAllSegmentIDs() []int64 {
|
||||
return lo.Keys(p.segments)
|
||||
}
|
||||
|
||||
func (p *CollectionTarget) GetAllDmChannelNames() []string {
|
||||
return lo.Keys(p.dmChannels)
|
||||
}
|
||||
|
||||
func (p *CollectionTarget) IsEmpty() bool {
|
||||
return len(p.dmChannels)+len(p.segments) == 0
|
||||
}
|
||||
|
||||
type target struct {
|
||||
// just maintain target at collection level
|
||||
collectionTargetMap map[int64]*CollectionTarget
|
||||
}
|
||||
|
||||
func newTarget() *target {
|
||||
return &target{
|
||||
collectionTargetMap: make(map[int64]*CollectionTarget),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *target) updateCollectionTarget(collectionID int64, target *CollectionTarget) {
|
||||
t.collectionTargetMap[collectionID] = target
|
||||
}
|
||||
|
||||
func (t *target) removeCollectionTarget(collectionID int64) {
|
||||
delete(t.collectionTargetMap, collectionID)
|
||||
}
|
||||
|
||||
func (t *target) getCollectionTarget(collectionID int64) *CollectionTarget {
|
||||
return t.collectionTargetMap[collectionID]
|
||||
}
|
|
@ -17,208 +17,386 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type TargetManager struct {
|
||||
rwmutex sync.RWMutex
|
||||
type TargetScope = int32
|
||||
|
||||
segments map[int64]*datapb.SegmentInfo
|
||||
dmChannels map[string]*DmChannel
|
||||
const (
|
||||
CurrentTarget TargetScope = iota + 1
|
||||
NextTarget
|
||||
)
|
||||
|
||||
type TargetManager struct {
|
||||
rwMutex sync.RWMutex
|
||||
broker Broker
|
||||
meta *Meta
|
||||
|
||||
// all read segment/channel operation happens on current -> only current target are visible to outer
|
||||
// all add segment/channel operation happens on next -> changes can only happen on next target
|
||||
// all remove segment/channel operation happens on Both current and next -> delete status should be consistent
|
||||
current *target
|
||||
next *target
|
||||
}
|
||||
|
||||
func NewTargetManager() *TargetManager {
|
||||
func NewTargetManager(broker Broker, meta *Meta) *TargetManager {
|
||||
return &TargetManager{
|
||||
segments: make(map[int64]*datapb.SegmentInfo),
|
||||
dmChannels: make(map[string]*DmChannel),
|
||||
broker: broker,
|
||||
meta: meta,
|
||||
current: newTarget(),
|
||||
next: newTarget(),
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) UpdateCollectionCurrentTarget(collectionID int64, partitionIDs ...int64) {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
log := log.With(zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("PartitionIDs", partitionIDs))
|
||||
|
||||
log.Info("start to update current target for collection")
|
||||
|
||||
newTarget := mgr.next.getCollectionTarget(collectionID)
|
||||
if newTarget == nil || newTarget.IsEmpty() {
|
||||
log.Info("next target does not exist, skip it")
|
||||
return
|
||||
}
|
||||
mgr.current.updateCollectionTarget(collectionID, newTarget)
|
||||
mgr.next.removeCollectionTarget(collectionID)
|
||||
|
||||
log.Info("finish to update current target for collection",
|
||||
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||
}
|
||||
|
||||
// UpdateCollectionNextTargetWithPartitions for collection_loading request, which offer partitionIDs outside
|
||||
func (mgr *TargetManager) UpdateCollectionNextTargetWithPartitions(collectionID int64, partitionIDs ...int64) error {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
|
||||
if len(partitionIDs) == 0 {
|
||||
msg := "failed to update collection next target, due to no partition specified"
|
||||
log.Warn(msg,
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("partitionIDs", partitionIDs))
|
||||
return errors.New(msg)
|
||||
}
|
||||
|
||||
return mgr.updateCollectionNextTarget(collectionID, partitionIDs...)
|
||||
}
|
||||
|
||||
// UpdateCollectionNextTarget for collection_loaded request, which use partition info from meta or broker
|
||||
func (mgr *TargetManager) UpdateCollectionNextTarget(collectionID int64) error {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
|
||||
partitionIDs := make([]int64, 0)
|
||||
collection := mgr.meta.GetCollection(collectionID)
|
||||
if collection != nil {
|
||||
var err error
|
||||
partitionIDs, err = mgr.broker.GetPartitions(context.Background(), collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
partitions := mgr.meta.GetPartitionsByCollection(collectionID)
|
||||
if partitions != nil {
|
||||
partitionIDs = lo.Map(partitions, func(partition *Partition, i int) int64 {
|
||||
return partition.PartitionID
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return mgr.updateCollectionNextTarget(collectionID, partitionIDs...)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) updateCollectionNextTarget(collectionID int64, partitionIDs ...int64) error {
|
||||
log := log.With(zap.Int64("collectionID", collectionID))
|
||||
|
||||
log.Info("start to update next targets for collection")
|
||||
newTarget, err := mgr.PullNextTarget(mgr.broker, collectionID, partitionIDs...)
|
||||
if err != nil {
|
||||
log.Error("failed to get next targets for collection",
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
mgr.next.updateCollectionTarget(collectionID, newTarget)
|
||||
|
||||
log.Info("finish to update next targets for collection",
|
||||
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, partitionIDs ...int64) (*CollectionTarget, error) {
|
||||
log.Info("start to pull next targets for partition",
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("partitionIDs", partitionIDs))
|
||||
|
||||
channelInfos := make(map[string][]*datapb.VchannelInfo)
|
||||
segments := make(map[int64]*datapb.SegmentInfo, 0)
|
||||
for _, partitionID := range partitionIDs {
|
||||
log.Debug("get recovery info...",
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64("partitionID", partitionID))
|
||||
vChannelInfos, binlogs, err := broker.GetRecoveryInfo(context.TODO(), collectionID, partitionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, binlog := range binlogs {
|
||||
segments[binlog.GetSegmentID()] = &datapb.SegmentInfo{
|
||||
ID: binlog.GetSegmentID(),
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
InsertChannel: binlog.GetInsertChannel(),
|
||||
NumOfRows: binlog.GetNumOfRows(),
|
||||
Binlogs: binlog.GetFieldBinlogs(),
|
||||
Statslogs: binlog.GetStatslogs(),
|
||||
Deltalogs: binlog.GetDeltalogs(),
|
||||
}
|
||||
}
|
||||
|
||||
for _, info := range vChannelInfos {
|
||||
channelInfos[info.GetChannelName()] = append(channelInfos[info.GetChannelName()], info)
|
||||
}
|
||||
}
|
||||
|
||||
dmChannels := make(map[string]*DmChannel)
|
||||
for _, infos := range channelInfos {
|
||||
merged := mgr.mergeDmChannelInfo(infos)
|
||||
dmChannels[merged.GetChannelName()] = merged
|
||||
}
|
||||
|
||||
return NewCollectionTarget(segments, dmChannels), nil
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) mergeDmChannelInfo(infos []*datapb.VchannelInfo) *DmChannel {
|
||||
var dmChannel *DmChannel
|
||||
|
||||
for _, info := range infos {
|
||||
if dmChannel == nil {
|
||||
dmChannel = DmChannelFromVChannel(info)
|
||||
continue
|
||||
}
|
||||
|
||||
if info.SeekPosition.GetTimestamp() < dmChannel.SeekPosition.GetTimestamp() {
|
||||
dmChannel.SeekPosition = info.SeekPosition
|
||||
}
|
||||
dmChannel.DroppedSegmentIds = append(dmChannel.DroppedSegmentIds, info.DroppedSegmentIds...)
|
||||
dmChannel.UnflushedSegmentIds = append(dmChannel.UnflushedSegmentIds, info.UnflushedSegmentIds...)
|
||||
dmChannel.FlushedSegmentIds = append(dmChannel.FlushedSegmentIds, info.FlushedSegmentIds...)
|
||||
}
|
||||
|
||||
return dmChannel
|
||||
}
|
||||
|
||||
// RemoveCollection removes all channels and segments in the given collection
|
||||
func (mgr *TargetManager) RemoveCollection(collectionID int64) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
log.Info("remove collection from targets",
|
||||
zap.Int64("collectionID", collectionID))
|
||||
|
||||
mgr.removeCollection(collectionID)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) removeCollection(collectionID int64) {
|
||||
log.Info("remove collection from targets", zap.Int64("collectionID", collectionID))
|
||||
for _, segment := range mgr.segments {
|
||||
if segment.CollectionID == collectionID {
|
||||
mgr.removeSegment(segment.GetID())
|
||||
}
|
||||
}
|
||||
for _, dmChannel := range mgr.dmChannels {
|
||||
if dmChannel.CollectionID == collectionID {
|
||||
mgr.removeDmChannel(dmChannel.GetChannelName())
|
||||
}
|
||||
}
|
||||
mgr.current.removeCollectionTarget(collectionID)
|
||||
mgr.next.removeCollectionTarget(collectionID)
|
||||
}
|
||||
|
||||
// RemovePartition removes all segment in the given partition,
|
||||
// NOTE: this doesn't remove any channel even the given one is the only partition
|
||||
func (mgr *TargetManager) RemovePartition(partitionID int64) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
func (mgr *TargetManager) RemovePartition(collectionID int64, partitionIDs ...int64) {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
|
||||
log.Info("remove partition from targets",
|
||||
zap.Int64("partitionID", partitionID))
|
||||
for _, segment := range mgr.segments {
|
||||
if segment.GetPartitionID() == partitionID {
|
||||
mgr.removeSegment(segment.GetID())
|
||||
log := log.With(zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("PartitionIDs", partitionIDs))
|
||||
|
||||
log.Info("remove partition from targets")
|
||||
|
||||
partitionSet := typeutil.NewUniqueSet(partitionIDs...)
|
||||
|
||||
oldCurrentTarget := mgr.current.getCollectionTarget(collectionID)
|
||||
if oldCurrentTarget != nil {
|
||||
newTarget := mgr.removePartitionFromCollectionTarget(oldCurrentTarget, partitionSet)
|
||||
if newTarget != nil {
|
||||
mgr.current.updateCollectionTarget(collectionID, newTarget)
|
||||
log.Info("finish to remove partition from current target for collection",
|
||||
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||
} else {
|
||||
log.Info("all partitions have been released, release the collection next target now")
|
||||
mgr.current.removeCollectionTarget(collectionID)
|
||||
}
|
||||
}
|
||||
|
||||
oleNextTarget := mgr.next.getCollectionTarget(collectionID)
|
||||
if oleNextTarget != nil {
|
||||
newTarget := mgr.removePartitionFromCollectionTarget(oleNextTarget, partitionSet)
|
||||
if newTarget != nil {
|
||||
mgr.next.updateCollectionTarget(collectionID, newTarget)
|
||||
log.Info("finish to remove partition from next target for collection",
|
||||
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||
} else {
|
||||
log.Info("all partitions have been released, release the collection current target now")
|
||||
mgr.next.removeCollectionTarget(collectionID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) RemoveSegment(segmentID int64) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
|
||||
delete(mgr.segments, segmentID)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) removeSegment(segmentID int64) {
|
||||
delete(mgr.segments, segmentID)
|
||||
log.Info("segment removed from targets", zap.Int64("segmentID", segmentID))
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) Replace(collectionID int64, channels []*DmChannel, segments []*datapb.SegmentInfo) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
|
||||
mgr.addDmChannel(channels...)
|
||||
mgr.addSegment(segments...)
|
||||
}
|
||||
|
||||
// AddSegment adds segment into target set,
|
||||
// requires CollectionID, PartitionID, InsertChannel, SegmentID are set
|
||||
func (mgr *TargetManager) AddSegment(segments ...*datapb.SegmentInfo) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
|
||||
mgr.addSegment(segments...)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) addSegment(segments ...*datapb.SegmentInfo) {
|
||||
for _, segment := range segments {
|
||||
log.Info("add segment into targets",
|
||||
zap.Int64("segmentID", segment.GetID()),
|
||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
||||
)
|
||||
mgr.segments[segment.GetID()] = segment
|
||||
func (mgr *TargetManager) removePartitionFromCollectionTarget(oldTarget *CollectionTarget, partitionSet typeutil.UniqueSet) *CollectionTarget {
|
||||
segments := make(map[int64]*datapb.SegmentInfo)
|
||||
for _, segment := range oldTarget.GetAllSegments() {
|
||||
if !partitionSet.Contain(segment.GetPartitionID()) {
|
||||
segments[segment.GetID()] = segment
|
||||
}
|
||||
}
|
||||
|
||||
// clear partition streaming segment
|
||||
channels := make(map[string]*DmChannel)
|
||||
for _, channel := range oldTarget.GetAllDmChannels() {
|
||||
channels[channel.GetChannelName()] = channel
|
||||
}
|
||||
|
||||
return NewCollectionTarget(segments, channels)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) ContainSegment(id int64) bool {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
func (mgr *TargetManager) removePartitionGrowingSegmentFromChannel(partitionIDSet typeutil.UniqueSet,
|
||||
oldChannel *DmChannel) *DmChannel {
|
||||
newChannel := oldChannel.Clone()
|
||||
|
||||
return mgr.containSegment(id)
|
||||
notMatchPartition := func(s *datapb.SegmentInfo, _ int) bool {
|
||||
return !partitionIDSet.Contain(s.GetPartitionID())
|
||||
}
|
||||
|
||||
getSegmentID := func(s *datapb.SegmentInfo, _ int) int64 {
|
||||
return s.GetID()
|
||||
}
|
||||
|
||||
newChannel.UnflushedSegments = lo.Filter(newChannel.GetUnflushedSegments(), notMatchPartition)
|
||||
newChannel.UnflushedSegmentIds = lo.Map(newChannel.GetUnflushedSegments(), getSegmentID)
|
||||
newChannel.FlushedSegments = lo.Filter(newChannel.GetFlushedSegments(), notMatchPartition)
|
||||
newChannel.FlushedSegmentIds = lo.Map(newChannel.GetFlushedSegments(), getSegmentID)
|
||||
newChannel.DroppedSegments = lo.Filter(newChannel.GetDroppedSegments(), notMatchPartition)
|
||||
newChannel.DroppedSegmentIds = lo.Map(newChannel.GetDroppedSegments(), getSegmentID)
|
||||
|
||||
return newChannel
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) containSegment(id int64) bool {
|
||||
_, ok := mgr.segments[id]
|
||||
return ok
|
||||
func (mgr *TargetManager) getTarget(scope TargetScope) *target {
|
||||
if scope == CurrentTarget {
|
||||
return mgr.current
|
||||
}
|
||||
|
||||
return mgr.next
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetSegmentsByCollection(collection int64, partitions ...int64) []*datapb.SegmentInfo {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
func (mgr *TargetManager) GetStreamingSegmentsByCollection(collectionID int64,
|
||||
scope TargetScope) typeutil.UniqueSet {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
|
||||
segments := make([]*datapb.SegmentInfo, 0)
|
||||
for _, segment := range mgr.segments {
|
||||
if segment.CollectionID == collection &&
|
||||
(len(partitions) == 0 || funcutil.SliceContain(partitions, segment.PartitionID)) {
|
||||
segments = append(segments, segment)
|
||||
targetMap := mgr.getTarget(scope)
|
||||
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||
|
||||
if collectionTarget == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
segments := typeutil.NewUniqueSet()
|
||||
for _, channel := range collectionTarget.GetAllDmChannels() {
|
||||
segments.Insert(channel.GetUnflushedSegmentIds()...)
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetHistoricalSegmentsByCollection(collectionID int64,
|
||||
scope TargetScope) map[int64]*datapb.SegmentInfo {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
|
||||
targetMap := mgr.getTarget(scope)
|
||||
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||
|
||||
if collectionTarget == nil {
|
||||
return nil
|
||||
}
|
||||
return collectionTarget.GetAllSegments()
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetHistoricalSegmentsByPartition(collectionID int64,
|
||||
partitionID int64, scope TargetScope) map[int64]*datapb.SegmentInfo {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
|
||||
targetMap := mgr.getTarget(scope)
|
||||
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||
|
||||
if collectionTarget == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
segments := make(map[int64]*datapb.SegmentInfo)
|
||||
for _, s := range collectionTarget.GetAllSegments() {
|
||||
if s.GetPartitionID() == partitionID {
|
||||
segments[s.GetID()] = s
|
||||
}
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) HandoffSegment(dest *datapb.SegmentInfo, sources ...int64) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
func (mgr *TargetManager) GetDmChannelsByCollection(collectionID int64, scope TargetScope) map[string]*DmChannel {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
|
||||
// add dest to target
|
||||
dest.CompactionFrom = sources
|
||||
mgr.addSegment(dest)
|
||||
}
|
||||
targetMap := mgr.getTarget(scope)
|
||||
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||
|
||||
// AddDmChannel adds a channel into target set,
|
||||
// requires CollectionID, ChannelName are set
|
||||
func (mgr *TargetManager) AddDmChannel(channels ...*DmChannel) {
|
||||
mgr.rwmutex.Lock()
|
||||
defer mgr.rwmutex.Unlock()
|
||||
|
||||
mgr.addDmChannel(channels...)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) addDmChannel(channels ...*DmChannel) {
|
||||
for _, channel := range channels {
|
||||
ts := channel.GetSeekPosition().GetTimestamp()
|
||||
log.Info("add channel into targets",
|
||||
zap.String("channel", channel.GetChannelName()),
|
||||
zap.Uint64("checkpoint", ts),
|
||||
zap.Duration("sinceCheckpoint", time.Since(tsoutil.PhysicalTime(ts))),
|
||||
)
|
||||
mgr.dmChannels[channel.ChannelName] = channel
|
||||
if collectionTarget == nil {
|
||||
return nil
|
||||
}
|
||||
return collectionTarget.GetAllDmChannels()
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetDmChannel(channel string) *DmChannel {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
for _, ch := range mgr.dmChannels {
|
||||
if ch.ChannelName == channel {
|
||||
return ch
|
||||
}
|
||||
func (mgr *TargetManager) GetDmChannel(collectionID int64, channel string, scope TargetScope) *DmChannel {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
|
||||
targetMap := mgr.getTarget(scope)
|
||||
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||
|
||||
if collectionTarget == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return collectionTarget.GetAllDmChannels()[channel]
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) ContainDmChannel(channel string) bool {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
func (mgr *TargetManager) GetHistoricalSegment(collectionID int64, id int64, scope TargetScope) *datapb.SegmentInfo {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
targetMap := mgr.getTarget(scope)
|
||||
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||
|
||||
_, ok := mgr.dmChannels[channel]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) removeDmChannel(channel string) {
|
||||
delete(mgr.dmChannels, channel)
|
||||
log.Info("remove channel from targets", zap.String("channel", channel))
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetDmChannelsByCollection(collectionID int64) []*DmChannel {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
|
||||
channels := make([]*DmChannel, 0)
|
||||
for _, channel := range mgr.dmChannels {
|
||||
if channel.GetCollectionID() == collectionID {
|
||||
channels = append(channels, channel)
|
||||
}
|
||||
if collectionTarget == nil {
|
||||
return nil
|
||||
}
|
||||
return channels
|
||||
return collectionTarget.GetAllSegments()[id]
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetSegment(id int64) *datapb.SegmentInfo {
|
||||
mgr.rwmutex.RLock()
|
||||
defer mgr.rwmutex.RUnlock()
|
||||
func (mgr *TargetManager) IsNextTargetExist(collectionID int64) bool {
|
||||
newChannels := mgr.GetDmChannelsByCollection(collectionID, NextTarget)
|
||||
|
||||
for _, s := range mgr.segments {
|
||||
if s.GetID() == id {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return len(newChannels) > 0
|
||||
}
|
||||
|
|
|
@ -19,9 +19,16 @@ package meta
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type TargetManagerSuite struct {
|
||||
|
@ -36,11 +43,15 @@ type TargetManagerSuite struct {
|
|||
allChannels []string
|
||||
allSegments []int64
|
||||
|
||||
kv *etcdkv.EtcdKV
|
||||
meta *Meta
|
||||
broker *MockBroker
|
||||
// Test object
|
||||
mgr *TargetManager
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) SetupSuite() {
|
||||
Params.Init()
|
||||
suite.collections = []int64{1000, 1001}
|
||||
suite.partitions = map[int64][]int64{
|
||||
1000: {100, 101},
|
||||
|
@ -74,81 +85,178 @@ func (suite *TargetManagerSuite) SetupSuite() {
|
|||
}
|
||||
|
||||
func (suite *TargetManagerSuite) SetupTest() {
|
||||
suite.mgr = NewTargetManager()
|
||||
for collection, channels := range suite.channels {
|
||||
for _, channel := range channels {
|
||||
suite.mgr.AddDmChannel(DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
var err error
|
||||
config := GenerateEtcdConfig()
|
||||
cli, err := etcd.GetEtcdClient(
|
||||
config.UseEmbedEtcd,
|
||||
config.EtcdUseSSL,
|
||||
config.Endpoints,
|
||||
config.EtcdTLSCert,
|
||||
config.EtcdTLSKey,
|
||||
config.EtcdTLSCACert,
|
||||
config.EtcdTLSMinVersion)
|
||||
suite.Require().NoError(err)
|
||||
suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath)
|
||||
|
||||
// meta
|
||||
store := NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
suite.meta = NewMeta(idAllocator, store)
|
||||
suite.broker = NewMockBroker(suite.T())
|
||||
suite.mgr = NewTargetManager(suite.broker, suite.meta)
|
||||
|
||||
for _, collection := range suite.collections {
|
||||
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||
for _, channel := range suite.channels[collection] {
|
||||
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||
CollectionID: collection,
|
||||
ChannelName: channel,
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
||||
for collection, partitions := range suite.segments {
|
||||
for partition, segments := range partitions {
|
||||
|
||||
for partition, segments := range suite.segments[collection] {
|
||||
allSegments := make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range segments {
|
||||
suite.mgr.AddSegment(&datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
CollectionID: collection,
|
||||
PartitionID: partition,
|
||||
|
||||
allSegments = append(allSegments, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: suite.channels[collection][0],
|
||||
})
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, partition).Return(dmChannels, allSegments, nil)
|
||||
}
|
||||
|
||||
suite.mgr.UpdateCollectionNextTargetWithPartitions(collection, suite.partitions[collection]...)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestGet() {
|
||||
mgr := suite.mgr
|
||||
func (suite *TargetManagerSuite) TearDownSuite() {
|
||||
suite.kv.Close()
|
||||
}
|
||||
|
||||
for collection, channels := range suite.channels {
|
||||
results := mgr.GetDmChannelsByCollection(collection)
|
||||
suite.assertChannels(channels, results)
|
||||
for _, channel := range channels {
|
||||
suite.True(mgr.ContainDmChannel(channel))
|
||||
}
|
||||
func (suite *TargetManagerSuite) TestUpdateCurrentTarget() {
|
||||
collectionID := int64(1000)
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]),
|
||||
suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.UpdateCollectionCurrentTarget(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]),
|
||||
suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
||||
collectionID := int64(1003)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.meta.PutCollection(&Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: collectionID,
|
||||
ReplicaNumber: 1},
|
||||
})
|
||||
suite.meta.PutPartition(&Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
CollectionID: collectionID,
|
||||
PartitionID: 1,
|
||||
},
|
||||
})
|
||||
|
||||
nextTargetChannels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: collectionID,
|
||||
ChannelName: "channel-1",
|
||||
},
|
||||
{
|
||||
CollectionID: collectionID,
|
||||
ChannelName: "channel-2",
|
||||
},
|
||||
}
|
||||
|
||||
nextTargetSegments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 11,
|
||||
InsertChannel: "channel-1",
|
||||
},
|
||||
{
|
||||
SegmentID: 12,
|
||||
InsertChannel: "channel-2",
|
||||
},
|
||||
}
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collectionID, int64(1)).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1))
|
||||
suite.assertSegments([]int64{11, 12}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{"channel-1", "channel-2"}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestRemovePartition() {
|
||||
collectionID := int64(1000)
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.RemovePartition(collectionID, 100)
|
||||
suite.assertSegments([]int64{3, 4}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestRemoveCollection() {
|
||||
collectionID := int64(1000)
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.RemoveCollection(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
collectionID = int64(1001)
|
||||
suite.mgr.UpdateCollectionCurrentTarget(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.RemoveCollection(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) getAllSegment(collectionID int64, partitionIDs []int64) []int64 {
|
||||
allSegments := make([]int64, 0)
|
||||
for collection, partitions := range suite.segments {
|
||||
collectionSegments := make([]int64, 0)
|
||||
for partition, segments := range partitions {
|
||||
results := mgr.GetSegmentsByCollection(collection, partition)
|
||||
suite.assertSegments(segments, results)
|
||||
for _, segment := range segments {
|
||||
suite.True(mgr.ContainSegment(segment))
|
||||
if collectionID == collection {
|
||||
for partition, segments := range partitions {
|
||||
if lo.Contains(partitionIDs, partition) {
|
||||
allSegments = append(allSegments, segments...)
|
||||
}
|
||||
}
|
||||
collectionSegments = append(collectionSegments, segments...)
|
||||
}
|
||||
results := mgr.GetSegmentsByCollection(collection)
|
||||
suite.assertSegments(collectionSegments, results)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestRemove() {
|
||||
mgr := suite.mgr
|
||||
|
||||
for collection, partitions := range suite.segments {
|
||||
// Remove first segment of each partition
|
||||
for _, segments := range partitions {
|
||||
mgr.RemoveSegment(segments[0])
|
||||
suite.False(mgr.ContainSegment(segments[0]))
|
||||
}
|
||||
|
||||
// Remove first partition of each collection
|
||||
firstPartition := suite.partitions[collection][0]
|
||||
mgr.RemovePartition(firstPartition)
|
||||
segments := mgr.GetSegmentsByCollection(collection, firstPartition)
|
||||
suite.Empty(segments)
|
||||
}
|
||||
|
||||
// Remove first collection
|
||||
firstCollection := suite.collections[0]
|
||||
mgr.RemoveCollection(firstCollection)
|
||||
channels := mgr.GetDmChannelsByCollection(firstCollection)
|
||||
suite.Empty(channels)
|
||||
segments := mgr.GetSegmentsByCollection(firstCollection)
|
||||
suite.Empty(segments)
|
||||
return allSegments
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) assertChannels(expected []string, actual []*DmChannel) bool {
|
||||
func (suite *TargetManagerSuite) assertChannels(expected []string, actual map[string]*DmChannel) bool {
|
||||
if !suite.Equal(len(expected), len(actual)) {
|
||||
return false
|
||||
}
|
||||
|
@ -161,7 +269,7 @@ func (suite *TargetManagerSuite) assertChannels(expected []string, actual []*DmC
|
|||
return suite.Len(set, 0)
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) assertSegments(expected []int64, actual []*datapb.SegmentInfo) bool {
|
||||
func (suite *TargetManagerSuite) assertSegments(expected []int64, actual map[int64]*datapb.SegmentInfo) bool {
|
||||
if !suite.Equal(len(expected), len(actual)) {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type CollectionObserver struct {
|
||||
|
@ -39,7 +38,6 @@ type CollectionObserver struct {
|
|||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker meta.Broker
|
||||
handoffOb *HandoffObserver
|
||||
collectionLoadedCount map[int64]int
|
||||
partitionLoadedCount map[int64]int
|
||||
|
||||
|
@ -53,7 +51,6 @@ func NewCollectionObserver(
|
|||
meta *meta.Meta,
|
||||
targetMgr *meta.TargetManager,
|
||||
broker meta.Broker,
|
||||
handoffObserver *HandoffObserver,
|
||||
) *CollectionObserver {
|
||||
return &CollectionObserver{
|
||||
stopCh: make(chan struct{}),
|
||||
|
@ -61,7 +58,6 @@ func NewCollectionObserver(
|
|||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
broker: broker,
|
||||
handoffOb: handoffObserver,
|
||||
collectionLoadedCount: make(map[int64]int),
|
||||
partitionLoadedCount: make(map[int64]int),
|
||||
|
||||
|
@ -105,28 +101,17 @@ func (ob *CollectionObserver) Observe() {
|
|||
func (ob *CollectionObserver) observeTimeout() {
|
||||
collections := ob.meta.CollectionManager.GetAllCollections()
|
||||
for _, collection := range collections {
|
||||
if collection.GetStatus() != querypb.LoadStatus_Loading {
|
||||
if collection.GetStatus() != querypb.LoadStatus_Loading ||
|
||||
time.Now().Before(collection.UpdatedAt.Add(Params.QueryCoordCfg.LoadTimeoutSeconds)) {
|
||||
continue
|
||||
}
|
||||
|
||||
refreshTime := collection.UpdatedAt.Add(Params.QueryCoordCfg.RefreshTargetsIntervalSeconds)
|
||||
timeoutTime := collection.UpdatedAt.Add(Params.QueryCoordCfg.LoadTimeoutSeconds)
|
||||
|
||||
now := time.Now()
|
||||
if now.After(timeoutTime) {
|
||||
log.Info("load collection timeout, cancel it",
|
||||
zap.Int64("collectionID", collection.GetCollectionID()),
|
||||
zap.Duration("loadTime", time.Since(collection.CreatedAt)))
|
||||
ob.meta.CollectionManager.RemoveCollection(collection.GetCollectionID())
|
||||
ob.meta.ReplicaManager.RemoveCollection(collection.GetCollectionID())
|
||||
ob.targetMgr.RemoveCollection(collection.GetCollectionID())
|
||||
} else if now.After(refreshTime) {
|
||||
if ob.refreshTargets(collection.UpdatedAt, collection.GetCollectionID()) {
|
||||
log.Info("load for long time, refresh targets of collection",
|
||||
zap.Duration("loadTime", time.Since(collection.CreatedAt)),
|
||||
)
|
||||
}
|
||||
}
|
||||
log.Info("load collection timeout, cancel it",
|
||||
zap.Int64("collectionID", collection.GetCollectionID()),
|
||||
zap.Duration("loadTime", time.Since(collection.CreatedAt)))
|
||||
ob.meta.CollectionManager.RemoveCollection(collection.GetCollectionID())
|
||||
ob.meta.ReplicaManager.RemoveCollection(collection.GetCollectionID())
|
||||
ob.targetMgr.RemoveCollection(collection.GetCollectionID())
|
||||
}
|
||||
|
||||
partitions := utils.GroupPartitionsByCollection(
|
||||
|
@ -139,74 +124,23 @@ func (ob *CollectionObserver) observeTimeout() {
|
|||
zap.Int64("collectionID", collection),
|
||||
)
|
||||
for _, partition := range partitions {
|
||||
if partition.GetStatus() != querypb.LoadStatus_Loading {
|
||||
if partition.GetStatus() != querypb.LoadStatus_Loading ||
|
||||
time.Now().Before(partition.CreatedAt.Add(Params.QueryCoordCfg.LoadTimeoutSeconds)) {
|
||||
continue
|
||||
}
|
||||
|
||||
refreshTime := partition.UpdatedAt.Add(Params.QueryCoordCfg.RefreshTargetsIntervalSeconds)
|
||||
timeoutTime := partition.UpdatedAt.Add(Params.QueryCoordCfg.LoadTimeoutSeconds)
|
||||
|
||||
now := time.Now()
|
||||
if now.After(timeoutTime) {
|
||||
log.Info("load partition timeout, cancel all partitions",
|
||||
zap.Int64("partitionID", partition.GetPartitionID()),
|
||||
zap.Duration("loadTime", time.Since(partition.CreatedAt)))
|
||||
// TODO(yah01): Now, releasing part of partitions is not allowed
|
||||
ob.meta.CollectionManager.RemoveCollection(partition.GetCollectionID())
|
||||
ob.meta.ReplicaManager.RemoveCollection(partition.GetCollectionID())
|
||||
ob.targetMgr.RemoveCollection(partition.GetCollectionID())
|
||||
break
|
||||
} else if now.After(refreshTime) {
|
||||
partitionIDs := lo.Map(partitions, func(partition *meta.Partition, _ int) int64 {
|
||||
return partition.GetPartitionID()
|
||||
})
|
||||
if ob.refreshTargets(partition.UpdatedAt, partition.GetCollectionID(), partitionIDs...) {
|
||||
log.Info("load for long time, refresh targets of partitions",
|
||||
zap.Duration("loadTime", time.Since(partition.CreatedAt)),
|
||||
)
|
||||
}
|
||||
break
|
||||
}
|
||||
log.Info("load partition timeout, cancel all partitions",
|
||||
zap.Int64("partitionID", partition.GetPartitionID()),
|
||||
zap.Duration("loadTime", time.Since(partition.CreatedAt)))
|
||||
// TODO(yah01): Now, releasing part of partitions is not allowed
|
||||
ob.meta.CollectionManager.RemoveCollection(partition.GetCollectionID())
|
||||
ob.meta.ReplicaManager.RemoveCollection(partition.GetCollectionID())
|
||||
ob.targetMgr.RemoveCollection(partition.GetCollectionID())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// refreshTargets refreshes the targets of the given collection,
|
||||
// avoids repeated refreshing by checking the updatedAt,
|
||||
// returns true if actually refreshed the targets,
|
||||
// false otherwise
|
||||
func (ob *CollectionObserver) refreshTargets(updatedAt time.Time, collectionID int64, partitions ...int64) bool {
|
||||
refreshedTime, ok := ob.refreshed[collectionID]
|
||||
if ok && refreshedTime.Equal(updatedAt) {
|
||||
return false
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ob.handoffOb.Unregister(ctx, collectionID)
|
||||
|
||||
if len(partitions) == 0 {
|
||||
var err error
|
||||
partitions, err = ob.broker.GetPartitions(ctx, collectionID)
|
||||
if err != nil {
|
||||
log.Warn("failed to get partitions from RootCoord, will refresh targets later", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
ob.handoffOb.Register(collectionID)
|
||||
channels, segments, err := utils.FetchTargets(ctx, ob.targetMgr, ob.broker, collectionID, partitions)
|
||||
if err != nil {
|
||||
log.Warn("failed to fetch targets from DataCoord, will refresh targets later", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
ob.targetMgr.Replace(collectionID, channels, segments)
|
||||
|
||||
ob.refreshed[collectionID] = updatedAt
|
||||
return true
|
||||
}
|
||||
|
||||
func (ob *CollectionObserver) observeLoadStatus() {
|
||||
collections := ob.meta.CollectionManager.GetAllCollections()
|
||||
for _, collection := range collections {
|
||||
|
@ -231,8 +165,8 @@ func (ob *CollectionObserver) observeLoadStatus() {
|
|||
func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Collection) {
|
||||
log := log.With(zap.Int64("collectionID", collection.GetCollectionID()))
|
||||
|
||||
segmentTargets := ob.targetMgr.GetSegmentsByCollection(collection.GetCollectionID())
|
||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(collection.GetCollectionID())
|
||||
segmentTargets := ob.targetMgr.GetHistoricalSegmentsByCollection(collection.GetCollectionID(), meta.NextTarget)
|
||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(collection.GetCollectionID(), meta.NextTarget)
|
||||
targetNum := len(segmentTargets) + len(channelTargets)
|
||||
log.Info("collection targets",
|
||||
zap.Int("segmentTargetNum", len(segmentTargets)),
|
||||
|
@ -240,45 +174,45 @@ func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Colle
|
|||
zap.Int("totalTargetNum", targetNum),
|
||||
zap.Int32("replicaNum", collection.GetReplicaNumber()),
|
||||
)
|
||||
if targetNum == 0 {
|
||||
log.Info("collection released, skip it")
|
||||
return
|
||||
}
|
||||
|
||||
loadedCount := 0
|
||||
for _, channel := range channelTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collection.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
subChannelCount := loadedCount
|
||||
for _, segment := range segmentTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collection.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
if loadedCount > 0 {
|
||||
log.Info("collection load progress",
|
||||
zap.Int("subChannelCount", subChannelCount),
|
||||
zap.Int("loadSegmentCount", loadedCount-subChannelCount),
|
||||
)
|
||||
}
|
||||
|
||||
updated := collection.Clone()
|
||||
targetNum *= int(collection.GetReplicaNumber())
|
||||
updated.LoadPercentage = int32(loadedCount * 100 / targetNum)
|
||||
if loadedCount <= ob.collectionLoadedCount[collection.GetCollectionID()] {
|
||||
return
|
||||
loadedCount := 0
|
||||
if targetNum == 0 {
|
||||
log.Info("No segment/channel in target need to be loaded!")
|
||||
updated.LoadPercentage = 100
|
||||
} else {
|
||||
for _, channel := range channelTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collection.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
subChannelCount := loadedCount
|
||||
for _, segment := range segmentTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collection.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
if loadedCount > 0 {
|
||||
log.Info("collection load progress",
|
||||
zap.Int("subChannelCount", subChannelCount),
|
||||
zap.Int("loadSegmentCount", loadedCount-subChannelCount),
|
||||
)
|
||||
}
|
||||
|
||||
updated.LoadPercentage = int32(loadedCount * 100 / (targetNum * int(collection.GetReplicaNumber())))
|
||||
}
|
||||
|
||||
if loadedCount <= ob.collectionLoadedCount[collection.GetCollectionID()] && updated.LoadPercentage != 100 {
|
||||
return
|
||||
}
|
||||
ob.collectionLoadedCount[collection.GetCollectionID()] = loadedCount
|
||||
if loadedCount >= targetNum {
|
||||
if updated.LoadPercentage == 100 {
|
||||
delete(ob.collectionLoadedCount, collection.GetCollectionID())
|
||||
ob.targetMgr.UpdateCollectionCurrentTarget(updated.CollectionID)
|
||||
updated.Status = querypb.LoadStatus_Loaded
|
||||
ob.meta.CollectionManager.UpdateCollection(updated)
|
||||
ob.handoffOb.StartHandoff(updated.GetCollectionID())
|
||||
|
||||
elapsed := time.Since(updated.CreatedAt)
|
||||
metrics.QueryCoordLoadLatency.WithLabelValues().Observe(float64(elapsed.Milliseconds()))
|
||||
|
@ -296,8 +230,8 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti
|
|||
zap.Int64("partitionID", partition.GetPartitionID()),
|
||||
)
|
||||
|
||||
segmentTargets := ob.targetMgr.GetSegmentsByCollection(partition.GetCollectionID(), partition.GetPartitionID())
|
||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(partition.GetCollectionID())
|
||||
segmentTargets := ob.targetMgr.GetHistoricalSegmentsByPartition(partition.GetCollectionID(), partition.GetPartitionID(), meta.NextTarget)
|
||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(partition.GetCollectionID(), meta.NextTarget)
|
||||
targetNum := len(segmentTargets) + len(channelTargets)
|
||||
log.Info("partition targets",
|
||||
zap.Int("segmentTargetNum", len(segmentTargets)),
|
||||
|
@ -305,44 +239,44 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti
|
|||
zap.Int("totalTargetNum", targetNum),
|
||||
zap.Int32("replicaNum", partition.GetReplicaNumber()),
|
||||
)
|
||||
if targetNum == 0 {
|
||||
log.Info("partition released, skip it")
|
||||
return
|
||||
}
|
||||
|
||||
loadedCount := 0
|
||||
for _, channel := range channelTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
subChannelCount := loadedCount
|
||||
for _, segment := range segmentTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
if loadedCount > 0 {
|
||||
log.Info("partition load progress",
|
||||
zap.Int("subChannelCount", subChannelCount),
|
||||
zap.Int("loadSegmentCount", loadedCount-subChannelCount))
|
||||
updated := partition.Clone()
|
||||
if targetNum == 0 {
|
||||
log.Info("No segment/channel in target need to be loaded!")
|
||||
updated.LoadPercentage = 100
|
||||
} else {
|
||||
for _, channel := range channelTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
subChannelCount := loadedCount
|
||||
for _, segment := range segmentTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
if loadedCount > 0 {
|
||||
log.Info("partition load progress",
|
||||
zap.Int("subChannelCount", subChannelCount),
|
||||
zap.Int("loadSegmentCount", loadedCount-subChannelCount))
|
||||
}
|
||||
updated.LoadPercentage = int32(loadedCount * 100 / (targetNum * int(partition.GetReplicaNumber())))
|
||||
|
||||
}
|
||||
|
||||
updated := partition.Clone()
|
||||
targetNum *= int(partition.GetReplicaNumber())
|
||||
updated.LoadPercentage = int32(loadedCount * 100 / targetNum)
|
||||
if loadedCount <= ob.partitionLoadedCount[partition.GetPartitionID()] {
|
||||
if loadedCount <= ob.partitionLoadedCount[partition.GetPartitionID()] && updated.LoadPercentage != 100 {
|
||||
return
|
||||
}
|
||||
|
||||
ob.partitionLoadedCount[partition.GetPartitionID()] = loadedCount
|
||||
if loadedCount >= targetNum {
|
||||
if updated.LoadPercentage == 100 {
|
||||
delete(ob.partitionLoadedCount, partition.GetPartitionID())
|
||||
ob.targetMgr.UpdateCollectionCurrentTarget(partition.GetCollectionID(), partition.GetPartitionID())
|
||||
updated.Status = querypb.LoadStatus_Loaded
|
||||
ob.meta.CollectionManager.UpdatePartition(updated)
|
||||
ob.handoffOb.StartHandoff(updated.GetCollectionID())
|
||||
ob.meta.CollectionManager.PutPartition(updated)
|
||||
|
||||
elapsed := time.Since(updated.CreatedAt)
|
||||
metrics.QueryCoordLoadLatency.WithLabelValues().Observe(float64(elapsed.Milliseconds()))
|
||||
|
|
|
@ -21,6 +21,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
|
@ -29,9 +33,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
type CollectionObserverSuite struct {
|
||||
|
@ -58,7 +59,6 @@ type CollectionObserverSuite struct {
|
|||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker *meta.MockBroker
|
||||
handoffOb *HandoffObserver
|
||||
|
||||
// Test object
|
||||
ob *CollectionObserver
|
||||
|
@ -173,15 +173,8 @@ func (suite *CollectionObserverSuite) SetupTest() {
|
|||
// Dependencies
|
||||
suite.dist = meta.NewDistributionManager()
|
||||
suite.meta = meta.NewMeta(suite.idAllocator, suite.store)
|
||||
suite.targetMgr = meta.NewTargetManager()
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
suite.handoffOb = NewHandoffObserver(
|
||||
suite.store,
|
||||
suite.meta,
|
||||
suite.dist,
|
||||
suite.targetMgr,
|
||||
suite.broker,
|
||||
)
|
||||
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||
|
||||
// Test object
|
||||
suite.ob = NewCollectionObserver(
|
||||
|
@ -189,11 +182,9 @@ func (suite *CollectionObserverSuite) SetupTest() {
|
|||
suite.meta,
|
||||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.handoffOb,
|
||||
)
|
||||
|
||||
Params.QueryCoordCfg.LoadTimeoutSeconds = 600 * time.Second
|
||||
Params.QueryCoordCfg.RefreshTargetsIntervalSeconds = 600 * time.Second
|
||||
|
||||
suite.loadAll()
|
||||
}
|
||||
|
@ -267,74 +258,12 @@ func (suite *CollectionObserverSuite) TestObservePartitionsTimeout() {
|
|||
}, timeout*2, timeout/10)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.isCollectionLoaded(suite.collections[0]) &&
|
||||
suite.isCollectionTimeout(suite.collections[1])
|
||||
return suite.isCollectionLoaded(suite.collections[0])
|
||||
}, timeout*2, timeout/10)
|
||||
}
|
||||
|
||||
func (suite *CollectionObserverSuite) TestObserveCollectionRefresh() {
|
||||
const (
|
||||
timeout = 2 * time.Second
|
||||
)
|
||||
// Not timeout
|
||||
Params.QueryCoordCfg.RefreshTargetsIntervalSeconds = timeout
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, int64(100)).Return(suite.partitions[100], nil)
|
||||
for _, partition := range suite.partitions[100] {
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(100), partition).Return(nil, nil, nil)
|
||||
}
|
||||
for _, partition := range suite.partitions[102] {
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, int64(102)).Return(suite.partitions[102], nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(102), partition).Return(nil, nil, nil)
|
||||
}
|
||||
suite.ob.Start(context.Background())
|
||||
|
||||
// Collection 100 refreshed,
|
||||
// collection 101 loaded
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: 101,
|
||||
Channel: "101-dmc0",
|
||||
Segments: map[int64]*querypb.SegmentDist{3: {NodeID: 1, Version: 0}},
|
||||
})
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 2,
|
||||
CollectionID: 101,
|
||||
Channel: "101-dmc1",
|
||||
Segments: map[int64]*querypb.SegmentDist{4: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
time.Sleep(timeout * 2)
|
||||
}
|
||||
|
||||
func (suite *CollectionObserverSuite) TestObservePartitionsRefresh() {
|
||||
const (
|
||||
timeout = 2 * time.Second
|
||||
)
|
||||
// Not timeout
|
||||
Params.QueryCoordCfg.RefreshTargetsIntervalSeconds = timeout
|
||||
for _, partition := range suite.partitions[101] {
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(101), partition).Return(nil, nil, nil)
|
||||
}
|
||||
for _, partition := range suite.partitions[102] {
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, int64(102)).Return(suite.partitions[102], nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(102), partition).Return(nil, nil, nil)
|
||||
}
|
||||
suite.ob.Start(context.Background())
|
||||
|
||||
// Collection 100 loaded,
|
||||
// collection 101 refreshed
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: 100,
|
||||
Channel: "100-dmc0",
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}},
|
||||
})
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 2,
|
||||
CollectionID: 100,
|
||||
Channel: "100-dmc1",
|
||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
time.Sleep(timeout * 2)
|
||||
suite.Eventually(func() bool {
|
||||
return suite.isCollectionTimeout(suite.collections[1])
|
||||
}, timeout*2, timeout/10)
|
||||
}
|
||||
|
||||
func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool {
|
||||
|
@ -342,8 +271,8 @@ func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool
|
|||
percentage := suite.meta.GetLoadPercentage(collection)
|
||||
status := suite.meta.GetStatus(collection)
|
||||
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection)
|
||||
segments := suite.targetMgr.GetSegmentsByCollection(collection)
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
|
||||
return exist &&
|
||||
percentage == 100 &&
|
||||
|
@ -356,8 +285,8 @@ func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool
|
|||
func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool {
|
||||
exist := suite.meta.Exist(collection)
|
||||
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection)
|
||||
segments := suite.targetMgr.GetSegmentsByCollection(collection)
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
|
||||
return !(exist ||
|
||||
len(replicas) > 0 ||
|
||||
|
@ -411,8 +340,24 @@ func (suite *CollectionObserverSuite) load(collection int64) {
|
|||
}
|
||||
}
|
||||
|
||||
suite.targetMgr.AddDmChannel(suite.channels[collection]...)
|
||||
suite.targetMgr.AddSegment(suite.segments[collection]...)
|
||||
allSegments := make([]*datapb.SegmentBinlogs, 0)
|
||||
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||
for _, channel := range suite.channels[collection] {
|
||||
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||
CollectionID: collection,
|
||||
ChannelName: channel.GetChannelName(),
|
||||
})
|
||||
}
|
||||
|
||||
for _, segment := range suite.segments[collection] {
|
||||
allSegments = append(allSegments, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment.GetID(),
|
||||
InsertChannel: segment.GetInsertChannel(),
|
||||
})
|
||||
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, int64(1)).Return(dmChannels, allSegments, nil)
|
||||
suite.targetMgr.UpdateCollectionNextTargetWithPartitions(collection, int64(1))
|
||||
}
|
||||
|
||||
func TestCollectionObserver(t *testing.T) {
|
||||
|
|
|
@ -1,524 +0,0 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package observers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/retry"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type CollectionHandoffStatus int32
|
||||
type HandoffEventStatus int32
|
||||
|
||||
const (
|
||||
// CollectionHandoffStatusRegistered start receive handoff event
|
||||
CollectionHandoffStatusRegistered CollectionHandoffStatus = iota + 1
|
||||
// CollectionHandoffStatusStarted start trigger handoff event
|
||||
CollectionHandoffStatusStarted
|
||||
)
|
||||
|
||||
const (
|
||||
HandoffEventStatusReceived HandoffEventStatus = iota + 1
|
||||
HandoffEventStatusTriggered
|
||||
)
|
||||
|
||||
type HandoffEvent struct {
|
||||
Segment *querypb.SegmentInfo
|
||||
Status HandoffEventStatus
|
||||
}
|
||||
|
||||
type queue []int64
|
||||
|
||||
type HandoffObserver struct {
|
||||
store meta.Store
|
||||
c chan struct{}
|
||||
wg sync.WaitGroup
|
||||
meta *meta.Meta
|
||||
dist *meta.DistributionManager
|
||||
target *meta.TargetManager
|
||||
broker meta.Broker
|
||||
revision int64
|
||||
|
||||
collectionStatus map[int64]CollectionHandoffStatus
|
||||
handoffEventLock sync.RWMutex
|
||||
handoffEvents map[int64]*HandoffEvent
|
||||
// collection id -> queue
|
||||
handoffSubmitOrders map[int64]queue
|
||||
// collectionId -> loaded partitionId, only for load collection case
|
||||
loadedPartitions map[int64]typeutil.UniqueSet
|
||||
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
func NewHandoffObserver(store meta.Store, meta *meta.Meta, dist *meta.DistributionManager, target *meta.TargetManager, broker meta.Broker) *HandoffObserver {
|
||||
return &HandoffObserver{
|
||||
store: store,
|
||||
c: make(chan struct{}),
|
||||
meta: meta,
|
||||
dist: dist,
|
||||
target: target,
|
||||
broker: broker,
|
||||
collectionStatus: map[int64]CollectionHandoffStatus{},
|
||||
handoffEvents: map[int64]*HandoffEvent{},
|
||||
handoffSubmitOrders: map[int64]queue{},
|
||||
loadedPartitions: map[int64]typeutil.Set[int64]{},
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) Register(collectionIDs ...int64) {
|
||||
ob.handoffEventLock.Lock()
|
||||
defer ob.handoffEventLock.Unlock()
|
||||
log.Info("Register handoff for collection",
|
||||
zap.Int64s("collectionIDs", collectionIDs))
|
||||
|
||||
for _, collectionID := range collectionIDs {
|
||||
ob.collectionStatus[collectionID] = CollectionHandoffStatusRegistered
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) Unregister(ctx context.Context, collectionIDs ...int64) {
|
||||
ob.handoffEventLock.Lock()
|
||||
defer ob.handoffEventLock.Unlock()
|
||||
log.Info("Unregister handoff for collection",
|
||||
zap.Int64s("collectionIDs", collectionIDs))
|
||||
|
||||
for _, collectionID := range collectionIDs {
|
||||
delete(ob.collectionStatus, collectionID)
|
||||
delete(ob.handoffSubmitOrders, collectionID)
|
||||
}
|
||||
|
||||
collectionSet := typeutil.NewUniqueSet(collectionIDs...)
|
||||
for segmentID, event := range ob.handoffEvents {
|
||||
if collectionSet.Contain(event.Segment.GetCollectionID()) {
|
||||
delete(ob.handoffEvents, segmentID)
|
||||
ob.cleanEvent(ctx, event.Segment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) StartHandoff(collectionIDs ...int64) {
|
||||
ob.handoffEventLock.Lock()
|
||||
defer ob.handoffEventLock.Unlock()
|
||||
|
||||
for _, collectionID := range collectionIDs {
|
||||
ob.collectionStatus[collectionID] = CollectionHandoffStatusStarted
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) GetEventNum() int {
|
||||
ob.handoffEventLock.Lock()
|
||||
defer ob.handoffEventLock.Unlock()
|
||||
|
||||
return len(ob.handoffEvents)
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) consumeOutdatedHandoffEvent(ctx context.Context) error {
|
||||
_, handoffReqValues, revision, err := ob.store.LoadHandoffWithRevision()
|
||||
if err != nil {
|
||||
log.Error("reloadFromKV: LoadWithRevision from kv failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
// set watch start revision
|
||||
ob.revision = revision
|
||||
|
||||
for _, value := range handoffReqValues {
|
||||
segmentInfo := &querypb.SegmentInfo{}
|
||||
err := proto.Unmarshal([]byte(value), segmentInfo)
|
||||
if err != nil {
|
||||
log.Error("reloadFromKV: unmarshal failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
ob.cleanEvent(ctx, segmentInfo)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) Start(ctx context.Context) error {
|
||||
log.Info("Start reload handoff event from etcd")
|
||||
if err := ob.consumeOutdatedHandoffEvent(ctx); err != nil {
|
||||
log.Error("handoff observer reload from kv failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Info("Finish reload handoff event from etcd")
|
||||
|
||||
ob.wg.Add(1)
|
||||
go ob.schedule(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) Stop() {
|
||||
ob.stopOnce.Do(func() {
|
||||
close(ob.c)
|
||||
ob.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) schedule(ctx context.Context) {
|
||||
defer ob.wg.Done()
|
||||
log.Info("start watch Segment handoff loop")
|
||||
ticker := time.NewTicker(Params.QueryCoordCfg.CheckHandoffInterval)
|
||||
log.Info("handoff interval", zap.String("interval", Params.QueryCoordCfg.CheckHandoffInterval.String()))
|
||||
watchChan := ob.store.WatchHandoffEvent(ob.revision + 1)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info("close handoff handler due to context done!")
|
||||
return
|
||||
case <-ob.c:
|
||||
log.Info("close handoff handler")
|
||||
return
|
||||
|
||||
case resp, ok := <-watchChan:
|
||||
if !ok {
|
||||
log.Error("watch Segment handoff loop failed because watch channel is closed!")
|
||||
return
|
||||
}
|
||||
|
||||
if err := resp.Err(); err != nil {
|
||||
if errors.Is(err, v3rpc.ErrCompacted) {
|
||||
log.Info("Etcd Revision compacted error met, restart observer")
|
||||
err := ob.Start(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("fail to restart handoff observer, aborting querycoord",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
// etcd problem, shall be handled by session keep alive
|
||||
log.Warn("receive error handoff event from etcd",
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
for _, event := range resp.Events {
|
||||
segmentInfo := &querypb.SegmentInfo{}
|
||||
err := proto.Unmarshal(event.Kv.Value, segmentInfo)
|
||||
if err != nil {
|
||||
log.Error("failed to deserialize handoff event", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
switch event.Type {
|
||||
case mvccpb.PUT:
|
||||
ob.tryHandoff(ctx, segmentInfo)
|
||||
default:
|
||||
log.Warn("HandoffObserver: receive event",
|
||||
zap.String("type", event.Type.String()),
|
||||
zap.String("key", string(event.Kv.Key)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
case <-ticker.C:
|
||||
for _, event := range ob.handoffEvents {
|
||||
switch event.Status {
|
||||
case HandoffEventStatusReceived:
|
||||
ob.tryHandoff(ctx, event.Segment)
|
||||
case HandoffEventStatusTriggered:
|
||||
ob.tryRelease(ctx, event)
|
||||
}
|
||||
}
|
||||
|
||||
ob.tryClean(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) tryHandoff(ctx context.Context, segment *querypb.SegmentInfo) {
|
||||
ob.handoffEventLock.Lock()
|
||||
defer ob.handoffEventLock.Unlock()
|
||||
|
||||
indexIDs := lo.Map(segment.GetIndexInfos(), func(indexInfo *querypb.FieldIndexInfo, _ int) int64 { return indexInfo.GetIndexID() })
|
||||
log := log.With(zap.Int64("collectionID", segment.GetCollectionID()),
|
||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
||||
zap.Bool("fake", segment.GetIsFake()),
|
||||
zap.Int64s("indexIDs", indexIDs),
|
||||
)
|
||||
|
||||
log.Info("try handoff segment...")
|
||||
status, collectionRegistered := ob.collectionStatus[segment.GetCollectionID()]
|
||||
if Params.QueryCoordCfg.AutoHandoff &&
|
||||
collectionRegistered &&
|
||||
ob.checkLoadStatus(segment) &&
|
||||
(segment.GetIsFake() || ob.meta.CollectionManager.ContainAnyIndex(segment.GetCollectionID(), indexIDs...)) {
|
||||
event := ob.handoffEvents[segment.SegmentID]
|
||||
if event == nil {
|
||||
// record submit order
|
||||
_, ok := ob.handoffSubmitOrders[segment.GetCollectionID()]
|
||||
if !ok {
|
||||
ob.handoffSubmitOrders[segment.GetCollectionID()] = make([]int64, 0)
|
||||
}
|
||||
ob.handoffSubmitOrders[segment.GetCollectionID()] = append(ob.handoffSubmitOrders[segment.GetCollectionID()], segment.GetSegmentID())
|
||||
}
|
||||
|
||||
if status == CollectionHandoffStatusRegistered {
|
||||
if event == nil {
|
||||
// keep all handoff event, waiting collection ready and to trigger handoff
|
||||
ob.handoffEvents[segment.GetSegmentID()] = &HandoffEvent{
|
||||
Segment: segment,
|
||||
Status: HandoffEventStatusReceived,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ob.handoffEvents[segment.GetSegmentID()] = &HandoffEvent{
|
||||
Segment: segment,
|
||||
Status: HandoffEventStatusTriggered,
|
||||
}
|
||||
|
||||
if !segment.GetIsFake() {
|
||||
log.Info("start to do handoff...")
|
||||
ob.handoff(segment)
|
||||
}
|
||||
} else {
|
||||
// ignore handoff task
|
||||
log.Info("handoff event trigger failed due to collection/partition is not loaded!")
|
||||
ob.cleanEvent(ctx, segment)
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) checkLoadStatus(segment *querypb.SegmentInfo) bool {
|
||||
if ob.meta.GetCollection(segment.GetCollectionID()) != nil {
|
||||
// if collection is loaded, should check whether the partition has been droped!
|
||||
if ob.loadedPartitions[segment.GetCollectionID()] == nil {
|
||||
ob.loadedPartitions[segment.GetCollectionID()] = typeutil.NewUniqueSet()
|
||||
}
|
||||
|
||||
// should updated loaded partitions when meet new partitionID
|
||||
if !ob.loadedPartitions[segment.GetCollectionID()].Contain(segment.GetPartitionID()) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
err := retry.Do(ctx, func() error {
|
||||
partitionIDs, err := ob.broker.GetPartitions(ctx, segment.GetCollectionID())
|
||||
if err == nil {
|
||||
ob.loadedPartitions[segment.GetCollectionID()].Insert(partitionIDs...)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}, retry.Attempts(5))
|
||||
|
||||
if err != nil {
|
||||
// collection has been dropped or released
|
||||
if strings.Contains(err.Error(), "CollectionNotExists") ||
|
||||
ob.meta.GetCollection(segment.GetCollectionID()) == nil {
|
||||
return false
|
||||
}
|
||||
// collection not released , but can get partition list to check handoff
|
||||
log.Warn("handoff check load status failed due to get partitions failed",
|
||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
||||
zap.String("channel", segment.GetDmChannel()),
|
||||
zap.Int64("segmentID", segment.GetSegmentID()))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return ob.loadedPartitions[segment.GetCollectionID()].Contain(segment.GetPartitionID())
|
||||
}
|
||||
|
||||
return ob.meta.GetPartition(segment.GetPartitionID()) != nil
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) handoff(segment *querypb.SegmentInfo) {
|
||||
targets := ob.target.GetSegmentsByCollection(segment.GetCollectionID(), segment.GetPartitionID())
|
||||
// when handoff event load a Segment, it sobuld remove all recursive handoff compact from
|
||||
uniqueSet := typeutil.NewUniqueSet()
|
||||
recursiveCompactFrom := ob.getOverrideSegmentInfo(targets, segment.CompactionFrom...)
|
||||
uniqueSet.Insert(recursiveCompactFrom...)
|
||||
uniqueSet.Insert(segment.GetCompactionFrom()...)
|
||||
|
||||
segmentInfo := &datapb.SegmentInfo{
|
||||
ID: segment.GetSegmentID(),
|
||||
CollectionID: segment.GetCollectionID(),
|
||||
PartitionID: segment.GetPartitionID(),
|
||||
NumOfRows: segment.NumRows,
|
||||
InsertChannel: segment.GetDmChannel(),
|
||||
State: segment.GetSegmentState(),
|
||||
CreatedByCompaction: segment.GetCreatedByCompaction(),
|
||||
CompactionFrom: uniqueSet.Collect(),
|
||||
}
|
||||
|
||||
log.Info("HandoffObserver: handoff Segment, register to target")
|
||||
ob.target.HandoffSegment(segmentInfo, segmentInfo.CompactionFrom...)
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) isSegmentReleased(id int64) bool {
|
||||
return len(ob.dist.LeaderViewManager.GetSegmentDist(id)) == 0
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) isGrowingSegmentReleased(id int64) bool {
|
||||
return len(ob.dist.LeaderViewManager.GetGrowingSegmentDist(id)) == 0
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) isSealedSegmentLoaded(segment *querypb.SegmentInfo) bool {
|
||||
// must be sealed Segment loaded in all replica, in case of handoff between growing and sealed
|
||||
nodes := ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetSegmentID())
|
||||
replicas := utils.GroupNodesByReplica(ob.meta.ReplicaManager, segment.GetCollectionID(), nodes)
|
||||
return len(replicas) == len(ob.meta.ReplicaManager.GetByCollection(segment.GetCollectionID()))
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) getOverrideSegmentInfo(handOffSegments []*datapb.SegmentInfo, segmentIDs ...int64) []int64 {
|
||||
overrideSegments := make([]int64, 0)
|
||||
for _, segmentID := range segmentIDs {
|
||||
for _, segmentInHandoff := range handOffSegments {
|
||||
if segmentID == segmentInHandoff.ID {
|
||||
toReleaseSegments := ob.getOverrideSegmentInfo(handOffSegments, segmentInHandoff.CompactionFrom...)
|
||||
if len(toReleaseSegments) > 0 {
|
||||
overrideSegments = append(overrideSegments, toReleaseSegments...)
|
||||
}
|
||||
|
||||
overrideSegments = append(overrideSegments, segmentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return overrideSegments
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) isAllCompactFromHandoffCompleted(segmentInfo *querypb.SegmentInfo) bool {
|
||||
for _, segID := range segmentInfo.CompactionFrom {
|
||||
_, ok := ob.handoffEvents[segID]
|
||||
if ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) tryRelease(ctx context.Context, event *HandoffEvent) {
|
||||
segment := event.Segment
|
||||
|
||||
if ob.isSealedSegmentLoaded(segment) || !ob.isSegmentExistOnTarget(segment) {
|
||||
// Note: the fake segment will not add into target segments, in order to guarantee
|
||||
// the all parent segments are released we check handoff events list instead of to
|
||||
// check segment from the leader view, or might miss some segments to release.
|
||||
if segment.GetIsFake() && !ob.isAllCompactFromHandoffCompleted(segment) {
|
||||
log.Debug("try to release fake segments fails, due to the dependencies haven't complete handoff.",
|
||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
||||
zap.Bool("faked", segment.GetIsFake()),
|
||||
zap.Int64s("sourceSegments", segment.CompactionFrom),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
compactSource := segment.CompactionFrom
|
||||
if len(compactSource) == 0 {
|
||||
return
|
||||
}
|
||||
log.Info("remove compactFrom segments",
|
||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
||||
zap.Bool("faked", segment.GetIsFake()),
|
||||
zap.Int64s("sourceSegments", compactSource),
|
||||
)
|
||||
for _, toRelease := range compactSource {
|
||||
// when handoff happens between growing and sealed, both with same Segment id, so can't remove from target here
|
||||
if segment.CreatedByCompaction {
|
||||
ob.target.RemoveSegment(toRelease)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) tryClean(ctx context.Context) {
|
||||
ob.handoffEventLock.Lock()
|
||||
defer ob.handoffEventLock.Unlock()
|
||||
|
||||
for collectionID, partitionSubmitOrder := range ob.handoffSubmitOrders {
|
||||
pos := 0
|
||||
for _, segmentID := range partitionSubmitOrder {
|
||||
event, ok := ob.handoffEvents[segmentID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
segment := event.Segment
|
||||
if ob.isAllCompactFromReleased(segment) {
|
||||
log.Info("HandoffObserver: clean handoff event after handoff finished!",
|
||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
||||
zap.Bool("faked", segment.GetIsFake()),
|
||||
)
|
||||
err := ob.cleanEvent(ctx, segment)
|
||||
if err == nil {
|
||||
delete(ob.handoffEvents, segment.GetSegmentID())
|
||||
}
|
||||
pos++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
ob.handoffSubmitOrders[collectionID] = partitionSubmitOrder[pos:]
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) cleanEvent(ctx context.Context, segmentInfo *querypb.SegmentInfo) error {
|
||||
log := log.With(
|
||||
zap.Int64("collectionID", segmentInfo.CollectionID),
|
||||
zap.Int64("partitionID", segmentInfo.PartitionID),
|
||||
zap.Int64("segmentID", segmentInfo.SegmentID),
|
||||
)
|
||||
|
||||
// add retry logic
|
||||
err := retry.Do(ctx, func() error {
|
||||
return ob.store.RemoveHandoffEvent(segmentInfo)
|
||||
}, retry.Attempts(5))
|
||||
|
||||
if err != nil {
|
||||
log.Warn("failed to clean handoff event from etcd", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) isSegmentExistOnTarget(segmentInfo *querypb.SegmentInfo) bool {
|
||||
return ob.target.ContainSegment(segmentInfo.SegmentID)
|
||||
}
|
||||
|
||||
func (ob *HandoffObserver) isAllCompactFromReleased(segmentInfo *querypb.SegmentInfo) bool {
|
||||
if !segmentInfo.CreatedByCompaction {
|
||||
return ob.isGrowingSegmentReleased(segmentInfo.SegmentID)
|
||||
}
|
||||
for _, segment := range segmentInfo.CompactionFrom {
|
||||
if !ob.isSegmentReleased(segment) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,652 +0,0 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package observers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVecFieldID = 1
|
||||
defaultIndexID = 1
|
||||
)
|
||||
|
||||
type HandoffObserverTestSuit struct {
|
||||
suite.Suite
|
||||
// Data
|
||||
collection int64
|
||||
partition int64
|
||||
channel *meta.DmChannel
|
||||
replicaNumber int32
|
||||
nodes []int64
|
||||
growingSegments []*datapb.SegmentInfo
|
||||
sealedSegments []*datapb.SegmentInfo
|
||||
|
||||
//Mocks
|
||||
idAllocator func() (int64, error)
|
||||
etcd *clientv3.Client
|
||||
kv *etcdkv.EtcdKV
|
||||
|
||||
//Dependency
|
||||
store meta.Store
|
||||
meta *meta.Meta
|
||||
dist *meta.DistributionManager
|
||||
target *meta.TargetManager
|
||||
broker *meta.MockBroker
|
||||
|
||||
// Test Object
|
||||
observer *HandoffObserver
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) SetupSuite() {
|
||||
Params.Init()
|
||||
|
||||
suite.collection = 100
|
||||
suite.partition = 10
|
||||
suite.channel = meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
CollectionID: 100,
|
||||
ChannelName: "100-dmc0",
|
||||
})
|
||||
|
||||
suite.sealedSegments = []*datapb.SegmentInfo{
|
||||
{
|
||||
ID: 1,
|
||||
CollectionID: 100,
|
||||
PartitionID: 10,
|
||||
InsertChannel: "100-dmc0",
|
||||
State: commonpb.SegmentState_Sealed,
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
CollectionID: 100,
|
||||
PartitionID: 10,
|
||||
InsertChannel: "100-dmc1",
|
||||
State: commonpb.SegmentState_Sealed,
|
||||
},
|
||||
}
|
||||
suite.replicaNumber = 1
|
||||
suite.nodes = []int64{1, 2, 3}
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) SetupTest() {
|
||||
// Mocks
|
||||
var err error
|
||||
suite.idAllocator = RandomIncrementIDAllocator()
|
||||
log.Debug("create embedded etcd KV...")
|
||||
config := GenerateEtcdConfig()
|
||||
client, err := etcd.GetEtcdClient(
|
||||
config.UseEmbedEtcd,
|
||||
config.EtcdUseSSL,
|
||||
config.Endpoints,
|
||||
config.EtcdTLSCert,
|
||||
config.EtcdTLSKey,
|
||||
config.EtcdTLSCACert,
|
||||
config.EtcdTLSMinVersion)
|
||||
suite.Require().NoError(err)
|
||||
suite.kv = etcdkv.NewEtcdKV(client, Params.EtcdCfg.MetaRootPath+"-"+RandomMetaRootPath())
|
||||
suite.Require().NoError(err)
|
||||
log.Debug("create meta store...")
|
||||
suite.store = meta.NewMetaStore(suite.kv)
|
||||
|
||||
// Dependency
|
||||
suite.meta = meta.NewMeta(suite.idAllocator, suite.store)
|
||||
suite.dist = meta.NewDistributionManager()
|
||||
suite.target = meta.NewTargetManager()
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
|
||||
// Test Object
|
||||
suite.observer = NewHandoffObserver(suite.store, suite.meta, suite.dist, suite.target, suite.broker)
|
||||
suite.observer.Register(suite.collection)
|
||||
suite.observer.StartHandoff(suite.collection)
|
||||
suite.load()
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TearDownTest() {
|
||||
suite.observer.Stop()
|
||||
suite.kv.Close()
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestFlushingHandoff() {
|
||||
// init leader view
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
||||
})
|
||||
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
||||
|
||||
flushingSegment := &querypb.SegmentInfo{
|
||||
SegmentID: 3,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
suite.produceHandOffEvent(flushingSegment)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.target.ContainSegment(3)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
// fake load CompactTo Segment
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
||||
})
|
||||
|
||||
// fake release CompactFrom Segment
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.dist.LeaderViewManager.GetGrowingSegmentDist(3)) == 0
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, suite.collection, suite.partition, 3)
|
||||
value, err := suite.kv.Load(key)
|
||||
return len(value) == 0 && err != nil
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestCompactHandoff() {
|
||||
// init leader view
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
||||
compactSegment := &querypb.SegmentInfo{
|
||||
SegmentID: 3,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{1},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
suite.produceHandOffEvent(compactSegment)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.target.ContainSegment(3)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
// fake load CompactTo Segment
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return !suite.target.ContainSegment(1)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
// fake release CompactFrom Segment
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, suite.collection, suite.partition, 3)
|
||||
value, err := suite.kv.Load(key)
|
||||
return len(value) == 0 && err != nil
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestRecursiveHandoff() {
|
||||
// init leader view
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
||||
})
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
||||
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
|
||||
flushingSegment := &querypb.SegmentInfo{
|
||||
SegmentID: 3,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
|
||||
compactSegment1 := &querypb.SegmentInfo{
|
||||
SegmentID: 4,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{3},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
|
||||
compactSegment2 := &querypb.SegmentInfo{
|
||||
SegmentID: 5,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{4},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
|
||||
suite.produceHandOffEvent(flushingSegment)
|
||||
suite.produceHandOffEvent(compactSegment1)
|
||||
suite.produceHandOffEvent(compactSegment2)
|
||||
|
||||
// fake load CompactTo Segment
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 5: {NodeID: 3, Version: 0}},
|
||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.target.ContainSegment(1) && suite.target.ContainSegment(2) && suite.target.ContainSegment(5)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return !suite.target.ContainSegment(3) && !suite.target.ContainSegment(4)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
// fake release CompactFrom Segment
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 5: {NodeID: 3, Version: 0}},
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.target.ContainSegment(1) && suite.target.ContainSegment(2) && suite.target.ContainSegment(5)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return !suite.target.ContainSegment(3) && !suite.target.ContainSegment(4)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.dist.LeaderViewManager.GetGrowingSegmentDist(3)) == 0
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, suite.collection, suite.partition, 3)
|
||||
value, err := suite.kv.Load(key)
|
||||
return len(value) == 0 && err != nil
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestReloadHandoffEventOrder() {
|
||||
// init leader view
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
||||
})
|
||||
|
||||
// fake handoff event from start
|
||||
flushingSegment := &querypb.SegmentInfo{
|
||||
SegmentID: 3,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
compactSegment1 := &querypb.SegmentInfo{
|
||||
SegmentID: 9,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{3},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
compactSegment2 := &querypb.SegmentInfo{
|
||||
SegmentID: 10,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{4},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
|
||||
suite.produceHandOffEvent(flushingSegment)
|
||||
suite.produceHandOffEvent(compactSegment1)
|
||||
suite.produceHandOffEvent(compactSegment2)
|
||||
|
||||
keys, _, _, err := suite.kv.LoadWithRevision(util.HandoffSegmentPrefix)
|
||||
suite.NoError(err)
|
||||
suite.Equal(true, strings.HasSuffix(keys[0], "3"))
|
||||
suite.Equal(true, strings.HasSuffix(keys[1], "9"))
|
||||
suite.Equal(true, strings.HasSuffix(keys[2], "10"))
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestLoadHandoffEventFromStore() {
|
||||
// init leader view
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
||||
})
|
||||
|
||||
// fake handoff event from start
|
||||
compactSegment1 := &querypb.SegmentInfo{
|
||||
SegmentID: 4,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{1},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
compactSegment2 := &querypb.SegmentInfo{
|
||||
SegmentID: 5,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{3},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
|
||||
suite.produceHandOffEvent(compactSegment1)
|
||||
suite.produceHandOffEvent(compactSegment2)
|
||||
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.target.ContainSegment(1) && suite.target.ContainSegment(2)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return !suite.target.ContainSegment(4) && !suite.target.ContainSegment(5)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) produceHandOffEvent(segmentInfo *querypb.SegmentInfo) {
|
||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
|
||||
value, err := proto.Marshal(segmentInfo)
|
||||
suite.NoError(err)
|
||||
err = suite.kv.Save(key, string(value))
|
||||
suite.NoError(err)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) existHandOffEvent(segmentInfo *querypb.SegmentInfo) bool {
|
||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
|
||||
_, err := suite.kv.Load(key)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) load() {
|
||||
// Mock meta data
|
||||
replicas, err := suite.meta.ReplicaManager.Spawn(suite.collection, suite.replicaNumber)
|
||||
suite.NoError(err)
|
||||
for _, replica := range replicas {
|
||||
replica.AddNode(suite.nodes...)
|
||||
}
|
||||
err = suite.meta.ReplicaManager.Put(replicas...)
|
||||
suite.NoError(err)
|
||||
|
||||
err = suite.meta.PutCollection(&meta.Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: suite.collection,
|
||||
ReplicaNumber: suite.replicaNumber,
|
||||
Status: querypb.LoadStatus_Loaded,
|
||||
FieldIndexID: map[int64]int64{defaultVecFieldID: defaultIndexID},
|
||||
},
|
||||
LoadPercentage: 0,
|
||||
CreatedAt: time.Now(),
|
||||
})
|
||||
suite.NoError(err)
|
||||
|
||||
suite.target.AddDmChannel(suite.channel)
|
||||
suite.target.AddSegment(suite.sealedSegments...)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestHandoffOnUnloadedPartition() {
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 2,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{2222}, nil)
|
||||
|
||||
suite.observer.Register(suite.collection)
|
||||
suite.observer.StartHandoff(suite.collection)
|
||||
defer suite.observer.Unregister(context.TODO(), suite.collection)
|
||||
|
||||
compactSegment1 := &querypb.SegmentInfo{
|
||||
SegmentID: 111,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: 1111,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{1},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
|
||||
compactSegment2 := &querypb.SegmentInfo{
|
||||
SegmentID: 222,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: 2222,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{1},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
suite.produceHandOffEvent(compactSegment1)
|
||||
suite.produceHandOffEvent(compactSegment2)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return !suite.target.ContainSegment(111) && suite.target.ContainSegment(222)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestUnRegisterHandoff() {
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 2,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{1111, 2222}, nil)
|
||||
suite.observer.Register(suite.collection)
|
||||
compactSegment1 := &querypb.SegmentInfo{
|
||||
SegmentID: 111,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: 1111,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{1},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
suite.produceHandOffEvent(compactSegment1)
|
||||
suite.Eventually(func() bool {
|
||||
return suite.observer.GetEventNum() == 1
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
suite.observer.Unregister(context.TODO(), suite.collection)
|
||||
|
||||
suite.observer.Register(suite.collection)
|
||||
defer suite.observer.Unregister(context.TODO(), suite.collection)
|
||||
compactSegment2 := &querypb.SegmentInfo{
|
||||
SegmentID: 222,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: 2222,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{2},
|
||||
CreatedByCompaction: true,
|
||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
||||
}
|
||||
suite.produceHandOffEvent(compactSegment2)
|
||||
suite.Eventually(func() bool {
|
||||
return suite.observer.GetEventNum() == 1
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestFilterOutEventByIndexID() {
|
||||
// init leader view
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
||||
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
|
||||
compactSegment := &querypb.SegmentInfo{
|
||||
SegmentID: 3,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
SegmentState: commonpb.SegmentState_Sealed,
|
||||
CompactionFrom: []int64{1},
|
||||
CreatedByCompaction: true,
|
||||
}
|
||||
suite.produceHandOffEvent(compactSegment)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
suite.observer.handoffEventLock.RLock()
|
||||
defer suite.observer.handoffEventLock.RUnlock()
|
||||
_, ok := suite.observer.handoffEvents[compactSegment.GetSegmentID()]
|
||||
return !ok && !suite.target.ContainSegment(3) && !suite.existHandOffEvent(compactSegment)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *HandoffObserverTestSuit) TestFakedSegmentHandoff() {
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}},
|
||||
})
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
||||
|
||||
Params.QueryCoordCfg.CheckHandoffInterval = 200 * time.Millisecond
|
||||
err := suite.observer.Start(context.Background())
|
||||
suite.NoError(err)
|
||||
|
||||
handoffSegment := &querypb.SegmentInfo{
|
||||
SegmentID: 3,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: suite.partition,
|
||||
CompactionFrom: []int64{1, 2},
|
||||
CreatedByCompaction: true,
|
||||
IsFake: true,
|
||||
}
|
||||
suite.produceHandOffEvent(handoffSegment)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: suite.collection,
|
||||
Channel: suite.channel.ChannelName,
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return !suite.target.ContainSegment(1) && !suite.target.ContainSegment(2)
|
||||
}, 3*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func TestHandoffObserverSuit(t *testing.T) {
|
||||
suite.Run(t, new(HandoffObserverTestSuit))
|
||||
}
|
|
@ -106,7 +106,7 @@ func (o *LeaderObserver) findNeedLoadedSegments(leaderView *meta.LeaderView, dis
|
|||
for _, s := range dists {
|
||||
version, ok := leaderView.Segments[s.GetID()]
|
||||
if ok && version.GetVersion() >= s.Version ||
|
||||
!o.target.ContainSegment(s.GetID()) {
|
||||
o.target.GetHistoricalSegment(s.CollectionID, s.GetID(), meta.CurrentTarget) == nil {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, &querypb.SyncAction{
|
||||
|
@ -128,7 +128,7 @@ func (o *LeaderObserver) findNeedRemovedSegments(leaderView *meta.LeaderView, di
|
|||
}
|
||||
for sid := range leaderView.Segments {
|
||||
_, ok := distMap[sid]
|
||||
if ok || o.target.ContainSegment(sid) {
|
||||
if ok || o.target.GetHistoricalSegment(leaderView.CollectionID, sid, meta.CurrentTarget) != nil {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, &querypb.SyncAction{
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
|
@ -40,6 +41,9 @@ type LeaderObserverTestSuite struct {
|
|||
observer *LeaderObserver
|
||||
kv *etcdkv.EtcdKV
|
||||
mockCluster *session.MockCluster
|
||||
|
||||
meta *meta.Meta
|
||||
broker *meta.MockBroker
|
||||
}
|
||||
|
||||
func (suite *LeaderObserverTestSuite) SetupSuite() {
|
||||
|
@ -63,12 +67,13 @@ func (suite *LeaderObserverTestSuite) SetupTest() {
|
|||
// meta
|
||||
store := meta.NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
testMeta := meta.NewMeta(idAllocator, store)
|
||||
suite.meta = meta.NewMeta(idAllocator, store)
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
|
||||
suite.mockCluster = session.NewMockCluster(suite.T())
|
||||
distManager := meta.NewDistributionManager()
|
||||
targetManager := meta.NewTargetManager()
|
||||
suite.observer = NewLeaderObserver(distManager, testMeta, targetManager, suite.mockCluster)
|
||||
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||
suite.observer = NewLeaderObserver(distManager, suite.meta, targetManager, suite.mockCluster)
|
||||
}
|
||||
|
||||
func (suite *LeaderObserverTestSuite) TearDownTest() {
|
||||
|
@ -80,10 +85,25 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() {
|
|||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
observer.target.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
channels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: 1,
|
||||
ChannelName: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 2, 1, "test-insert-channel"))
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||
expectReq := &querypb.SyncDistributionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SyncDistribution,
|
||||
|
@ -120,13 +140,28 @@ func (suite *LeaderObserverTestSuite) TestIgnoreBalancedSegment() {
|
|||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
observer.target.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
channels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: 1,
|
||||
ChannelName: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
||||
// The leader view saw the segment on new node,
|
||||
// but another nodes not yet
|
||||
leaderView := utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{})
|
||||
leaderView := utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{})
|
||||
leaderView.Segments[1] = &querypb.SegmentDist{
|
||||
NodeID: 2,
|
||||
Version: 2,
|
||||
|
@ -143,12 +178,27 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() {
|
|||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 2))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(2, 1, []int64{3, 4}))
|
||||
observer.target.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
||||
segments := []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
InsertChannel: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
channels := []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: 1,
|
||||
ChannelName: "test-insert-channel",
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||
observer.dist.SegmentDistManager.Update(4, utils.CreateTestSegment(1, 1, 1, 4, 2, "test-insert-channel"))
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
||||
observer.dist.LeaderViewManager.Update(4, utils.CreateTestLeaderView(4, 1, "test-insert-channel", map[int64]int64{1: 4}, []int64{}))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||
observer.dist.LeaderViewManager.Update(4, utils.CreateTestLeaderView(4, 1, "test-insert-channel", map[int64]int64{1: 4}, map[int64]*meta.Segment{}))
|
||||
expectReq := &querypb.SyncDistributionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SyncDistribution,
|
||||
|
@ -187,7 +237,7 @@ func (suite *LeaderObserverTestSuite) TestSyncRemovedSegments() {
|
|||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, []int64{}))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, map[int64]*meta.Segment{}))
|
||||
|
||||
expectReq := &querypb.SyncDistributionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package observers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type TargetObserver struct {
|
||||
c chan struct{}
|
||||
wg sync.WaitGroup
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
distMgr *meta.DistributionManager
|
||||
broker meta.Broker
|
||||
|
||||
nextTargetLastUpdate map[int64]time.Time
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
func NewTargetObserver(meta *meta.Meta, targetMgr *meta.TargetManager, distMgr *meta.DistributionManager, broker meta.Broker) *TargetObserver {
|
||||
return &TargetObserver{
|
||||
c: make(chan struct{}),
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
distMgr: distMgr,
|
||||
broker: broker,
|
||||
nextTargetLastUpdate: make(map[int64]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) Start(ctx context.Context) {
|
||||
ob.wg.Add(1)
|
||||
go ob.schedule(ctx)
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) Stop() {
|
||||
ob.stopOnce.Do(func() {
|
||||
close(ob.c)
|
||||
ob.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) schedule(ctx context.Context) {
|
||||
defer ob.wg.Done()
|
||||
log.Info("Start update next target loop")
|
||||
|
||||
ticker := time.NewTicker(params.Params.QueryCoordCfg.UpdateNextTargetInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info("Close target observer due to context canceled")
|
||||
return
|
||||
case <-ob.c:
|
||||
log.Info("Close target observer")
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
ob.tryUpdateTarget()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) tryUpdateTarget() {
|
||||
collections := ob.meta.GetAll()
|
||||
for _, collectionID := range collections {
|
||||
if ob.shouldUpdateCurrentTarget(collectionID) {
|
||||
ob.updateCurrentTarget(collectionID)
|
||||
}
|
||||
|
||||
if ob.shouldUpdateNextTarget(collectionID) {
|
||||
// update next target in collection level
|
||||
ob.UpdateNextTarget(collectionID)
|
||||
}
|
||||
}
|
||||
|
||||
collectionSet := typeutil.NewUniqueSet(collections...)
|
||||
// for collection which has been removed from target, try to clear nextTargetLastUpdate
|
||||
for collection := range ob.nextTargetLastUpdate {
|
||||
if !collectionSet.Contain(collection) {
|
||||
delete(ob.nextTargetLastUpdate, collection)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) shouldUpdateNextTarget(collectionID int64) bool {
|
||||
return !ob.targetMgr.IsNextTargetExist(collectionID) || ob.isNextTargetExpired(collectionID)
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) isNextTargetExpired(collectionID int64) bool {
|
||||
return time.Since(ob.nextTargetLastUpdate[collectionID]) > params.Params.QueryCoordCfg.NextTargetSurviveTime
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) UpdateNextTarget(collectionID int64) {
|
||||
log := log.With(zap.Int64("collectionID", collectionID))
|
||||
|
||||
log.Warn("observer trigger update next target")
|
||||
err := ob.targetMgr.UpdateCollectionNextTarget(collectionID)
|
||||
if err != nil {
|
||||
log.Error("failed to update next target for collection",
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
ob.updateNextTargetTimestamp(collectionID)
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) updateNextTargetTimestamp(collectionID int64) {
|
||||
ob.nextTargetLastUpdate[collectionID] = time.Now()
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) shouldUpdateCurrentTarget(collectionID int64) bool {
|
||||
replicaNum := len(ob.meta.ReplicaManager.GetByCollection(collectionID))
|
||||
|
||||
// check channel first
|
||||
channelNames := ob.targetMgr.GetDmChannelsByCollection(collectionID, meta.NextTarget)
|
||||
if len(channelNames) == 0 {
|
||||
// next target is empty, no need to update
|
||||
return false
|
||||
}
|
||||
|
||||
for _, channel := range channelNames {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collectionID,
|
||||
ob.distMgr.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
if len(group) < replicaNum {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// and last check historical segment
|
||||
historicalSegments := ob.targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
for _, segment := range historicalSegments {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collectionID,
|
||||
ob.distMgr.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
if len(group) < replicaNum {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) updateCurrentTarget(collectionID int64) {
|
||||
log.Warn("observer trigger update current target",
|
||||
zap.Int64("collectionID", collectionID))
|
||||
ob.targetMgr.UpdateCollectionCurrentTarget(collectionID)
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package observers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
)
|
||||
|
||||
type TargetObserverSuite struct {
|
||||
suite.Suite
|
||||
|
||||
kv *etcdkv.EtcdKV
|
||||
//dependency
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
distMgr *meta.DistributionManager
|
||||
broker *meta.MockBroker
|
||||
|
||||
observer *TargetObserver
|
||||
|
||||
collectionID int64
|
||||
partitionID int64
|
||||
nextTargetSegments []*datapb.SegmentBinlogs
|
||||
nextTargetChannels []*datapb.VchannelInfo
|
||||
}
|
||||
|
||||
func (suite *TargetObserverSuite) SetupSuite() {
|
||||
Params.Init()
|
||||
Params.QueryCoordCfg.UpdateNextTargetInterval = 3 * time.Second
|
||||
}
|
||||
|
||||
func (suite *TargetObserverSuite) SetupTest() {
|
||||
var err error
|
||||
config := GenerateEtcdConfig()
|
||||
cli, err := etcd.GetEtcdClient(
|
||||
config.UseEmbedEtcd,
|
||||
config.EtcdUseSSL,
|
||||
config.Endpoints,
|
||||
config.EtcdTLSCert,
|
||||
config.EtcdTLSKey,
|
||||
config.EtcdTLSCACert,
|
||||
config.EtcdTLSMinVersion)
|
||||
suite.Require().NoError(err)
|
||||
suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath)
|
||||
|
||||
// meta
|
||||
store := meta.NewMetaStore(suite.kv)
|
||||
idAllocator := RandomIncrementIDAllocator()
|
||||
suite.meta = meta.NewMeta(idAllocator, store)
|
||||
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||
suite.distMgr = meta.NewDistributionManager()
|
||||
suite.observer = NewTargetObserver(suite.meta, suite.targetMgr, suite.distMgr, suite.broker)
|
||||
|
||||
suite.observer.Start(context.TODO())
|
||||
|
||||
suite.collectionID = int64(1000)
|
||||
suite.partitionID = int64(100)
|
||||
|
||||
err = suite.meta.CollectionManager.PutCollection(utils.CreateTestCollection(suite.collectionID, 1))
|
||||
suite.NoError(err)
|
||||
err = suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(suite.collectionID, suite.partitionID))
|
||||
suite.NoError(err)
|
||||
|
||||
suite.nextTargetChannels = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: suite.collectionID,
|
||||
ChannelName: "channel-1",
|
||||
},
|
||||
{
|
||||
CollectionID: suite.collectionID,
|
||||
ChannelName: "channel-2",
|
||||
},
|
||||
}
|
||||
|
||||
suite.nextTargetSegments = []*datapb.SegmentBinlogs{
|
||||
{
|
||||
SegmentID: 11,
|
||||
InsertChannel: "channel-1",
|
||||
},
|
||||
{
|
||||
SegmentID: 12,
|
||||
InsertChannel: "channel-2",
|
||||
},
|
||||
}
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, mock.Anything, mock.Anything).Return(suite.nextTargetChannels, suite.nextTargetSegments, nil)
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partitionID}, nil)
|
||||
}
|
||||
|
||||
func (suite *TargetObserverSuite) TestTriggerUpdateTarget() {
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 2
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.NextTarget)) == 2
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
suite.distMgr.SegmentDistManager.Update(2, utils.CreateTestSegment(suite.collectionID, suite.partitionID, 11, 2, 0, "channel-1"))
|
||||
suite.distMgr.SegmentDistManager.Update(2, utils.CreateTestSegment(suite.collectionID, suite.partitionID, 12, 2, 1, "channel-2"))
|
||||
suite.distMgr.ChannelDistManager.Update(2, utils.CreateTestChannel(suite.collectionID, 2, 0, "channel-1"))
|
||||
suite.distMgr.ChannelDistManager.Update(2, utils.CreateTestChannel(suite.collectionID, 2, 1, "channel-2"))
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.CurrentTarget)) == 2
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.CurrentTarget)) == 2
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func (suite *TargetObserverSuite) TearDownSuite() {
|
||||
suite.kv.Close()
|
||||
suite.observer.Stop()
|
||||
}
|
||||
|
||||
func TestTargetManager(t *testing.T) {
|
||||
suite.Run(t, new(TargetObserverSuite))
|
||||
}
|
|
@ -27,12 +27,6 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/samber/lo"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/allocator"
|
||||
|
@ -40,7 +34,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/kv"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/checkers"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/dist"
|
||||
|
@ -57,6 +51,9 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -105,7 +102,7 @@ type Server struct {
|
|||
// Observers
|
||||
collectionObserver *observers.CollectionObserver
|
||||
leaderObserver *observers.LeaderObserver
|
||||
handoffObserver *observers.HandoffObserver
|
||||
targetObserver *observers.TargetObserver
|
||||
|
||||
balancer balance.Balance
|
||||
|
||||
|
@ -266,30 +263,23 @@ func (s *Server) initMeta() error {
|
|||
ChannelDistManager: meta.NewChannelDistManager(),
|
||||
LeaderViewManager: meta.NewLeaderViewManager(),
|
||||
}
|
||||
s.targetMgr = meta.NewTargetManager()
|
||||
s.broker = meta.NewCoordinatorBroker(
|
||||
s.dataCoord,
|
||||
s.rootCoord,
|
||||
s.indexCoord,
|
||||
)
|
||||
s.targetMgr = meta.NewTargetManager(s.broker, s.meta)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) initObserver() {
|
||||
log.Info("init observers")
|
||||
s.handoffObserver = observers.NewHandoffObserver(
|
||||
s.store,
|
||||
s.meta,
|
||||
s.dist,
|
||||
s.targetMgr,
|
||||
s.broker,
|
||||
)
|
||||
s.collectionObserver = observers.NewCollectionObserver(
|
||||
s.dist,
|
||||
s.meta,
|
||||
s.targetMgr,
|
||||
s.broker,
|
||||
s.handoffObserver,
|
||||
)
|
||||
s.leaderObserver = observers.NewLeaderObserver(
|
||||
s.dist,
|
||||
|
@ -297,12 +287,15 @@ func (s *Server) initObserver() {
|
|||
s.targetMgr,
|
||||
s.cluster,
|
||||
)
|
||||
s.targetObserver = observers.NewTargetObserver(
|
||||
s.meta,
|
||||
s.targetMgr,
|
||||
s.dist,
|
||||
s.broker,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Server) afterStart() {
|
||||
now := time.Now()
|
||||
Params.QueryCoordCfg.CreatedTime = now
|
||||
Params.QueryCoordCfg.UpdatedTime = now
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
|
@ -322,12 +315,6 @@ func (s *Server) Start() error {
|
|||
s.wg.Add(1)
|
||||
go s.watchNodes(revision)
|
||||
|
||||
// handoff master start before recover collection, to clean all outdated handoff event.
|
||||
if err := s.handoffObserver.Start(s.ctx); err != nil {
|
||||
log.Error("start handoff observer failed, exit...", zap.Error(err))
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
log.Info("start recovering dist and target")
|
||||
err = s.recover()
|
||||
if err != nil {
|
||||
|
@ -349,6 +336,7 @@ func (s *Server) Start() error {
|
|||
log.Info("start observers...")
|
||||
s.collectionObserver.Start(s.ctx)
|
||||
s.leaderObserver.Start(s.ctx)
|
||||
s.targetObserver.Start(s.ctx)
|
||||
|
||||
if s.enableActiveStandBy {
|
||||
s.activateFunc = func() {
|
||||
|
@ -406,8 +394,8 @@ func (s *Server) Stop() error {
|
|||
if s.leaderObserver != nil {
|
||||
s.leaderObserver.Stop()
|
||||
}
|
||||
if s.handoffObserver != nil {
|
||||
s.handoffObserver.Stop()
|
||||
if s.targetObserver != nil {
|
||||
s.targetObserver.Stop()
|
||||
}
|
||||
|
||||
s.wg.Wait()
|
||||
|
@ -515,31 +503,13 @@ func (s *Server) recover() error {
|
|||
}
|
||||
|
||||
func (s *Server) recoverCollectionTargets(ctx context.Context, collection int64) error {
|
||||
var (
|
||||
partitions []int64
|
||||
err error
|
||||
)
|
||||
if s.meta.GetLoadType(collection) == querypb.LoadType_LoadCollection {
|
||||
partitions, err = s.broker.GetPartitions(ctx, collection)
|
||||
if err != nil {
|
||||
msg := "failed to get partitions from RootCoord"
|
||||
log.Error(msg, zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
} else {
|
||||
partitions = lo.Map(s.meta.GetPartitionsByCollection(collection), func(partition *meta.Partition, _ int) int64 {
|
||||
return partition.GetPartitionID()
|
||||
})
|
||||
err := s.targetMgr.UpdateCollectionNextTarget(collection)
|
||||
if err != nil {
|
||||
msg := "failed to update next target for collection"
|
||||
log.Error(msg, zap.Error(err))
|
||||
return utils.WrapError(msg, err)
|
||||
}
|
||||
|
||||
s.handoffObserver.Register(collection)
|
||||
return utils.RegisterTargets(
|
||||
ctx,
|
||||
s.targetMgr,
|
||||
s.broker,
|
||||
collection,
|
||||
partitions,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) watchNodes(revision int64) {
|
||||
|
@ -626,23 +596,12 @@ func (s *Server) handleNodeDown(node int64) {
|
|||
// are missed, it will recover for a while.
|
||||
channels := s.dist.ChannelDistManager.GetByNode(node)
|
||||
for _, channel := range channels {
|
||||
partitions, err := utils.GetPartitions(s.meta.CollectionManager,
|
||||
s.broker,
|
||||
channel.GetCollectionID())
|
||||
err := s.targetMgr.UpdateCollectionNextTarget(channel.GetCollectionID())
|
||||
if err != nil {
|
||||
log.Warn("failed to refresh targets of collection",
|
||||
zap.Int64("collectionID", channel.GetCollectionID()),
|
||||
zap.Error(err))
|
||||
}
|
||||
err = utils.RegisterTargets(s.ctx,
|
||||
s.targetMgr,
|
||||
s.broker,
|
||||
channel.GetCollectionID(),
|
||||
partitions)
|
||||
if err != nil {
|
||||
log.Warn("failed to refresh targets of collection",
|
||||
zap.Int64("collectionID", channel.GetCollectionID()),
|
||||
msg := "failed to update next targets for collection"
|
||||
log.Error(msg,
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/dist"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/mocks"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
|
@ -272,11 +271,11 @@ func (suite *ServerSuite) loadAll() {
|
|||
func (suite *ServerSuite) assertLoaded(collection int64) {
|
||||
suite.True(suite.server.meta.Exist(collection))
|
||||
for _, channel := range suite.channels[collection] {
|
||||
suite.NotNil(suite.server.targetMgr.GetDmChannel(channel))
|
||||
suite.NotNil(suite.server.targetMgr.GetDmChannel(collection, channel, meta.NextTarget))
|
||||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.NotNil(suite.server.targetMgr.GetSegment(segment))
|
||||
suite.NotNil(suite.server.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -346,6 +345,7 @@ func (suite *ServerSuite) updateCollectionStatus(collectionID int64, status quer
|
|||
func (suite *ServerSuite) hackServer() {
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
suite.server.broker = suite.broker
|
||||
suite.server.targetMgr = meta.NewTargetManager(suite.broker, suite.server.meta)
|
||||
suite.server.taskScheduler = task.NewScheduler(
|
||||
suite.server.ctx,
|
||||
suite.server.meta,
|
||||
|
@ -355,13 +355,6 @@ func (suite *ServerSuite) hackServer() {
|
|||
suite.server.cluster,
|
||||
suite.server.nodeMgr,
|
||||
)
|
||||
suite.server.handoffObserver = observers.NewHandoffObserver(
|
||||
suite.server.store,
|
||||
suite.server.meta,
|
||||
suite.server.dist,
|
||||
suite.server.targetMgr,
|
||||
suite.server.broker,
|
||||
)
|
||||
suite.server.distController = dist.NewDistController(
|
||||
suite.server.cluster,
|
||||
suite.server.nodeMgr,
|
||||
|
|
|
@ -22,10 +22,6 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/errorutil"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
|
@ -35,12 +31,14 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/job"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/errorutil"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/internal/util/timerecord"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -208,7 +206,6 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
|||
s.targetMgr,
|
||||
s.broker,
|
||||
s.nodeMgr,
|
||||
s.handoffObserver,
|
||||
)
|
||||
s.jobScheduler.Add(loadJob)
|
||||
err := loadJob.Wait()
|
||||
|
@ -245,7 +242,6 @@ func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl
|
|||
s.dist,
|
||||
s.meta,
|
||||
s.targetMgr,
|
||||
s.handoffObserver,
|
||||
)
|
||||
s.jobScheduler.Add(releaseJob)
|
||||
err := releaseJob.Wait()
|
||||
|
@ -288,7 +284,6 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
|
|||
s.targetMgr,
|
||||
s.broker,
|
||||
s.nodeMgr,
|
||||
s.handoffObserver,
|
||||
)
|
||||
s.jobScheduler.Add(loadJob)
|
||||
err := loadJob.Wait()
|
||||
|
@ -332,7 +327,6 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
|
|||
s.dist,
|
||||
s.meta,
|
||||
s.targetMgr,
|
||||
s.handoffObserver,
|
||||
)
|
||||
s.jobScheduler.Add(releaseJob)
|
||||
err := releaseJob.Wait()
|
||||
|
@ -661,7 +655,7 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
channels := s.targetMgr.GetDmChannelsByCollection(req.GetCollectionID())
|
||||
channels := s.targetMgr.GetDmChannelsByCollection(req.GetCollectionID(), meta.CurrentTarget)
|
||||
if len(channels) == 0 {
|
||||
msg := "failed to get channels"
|
||||
log.Warn(msg, zap.Error(meta.ErrCollectionNotFound))
|
||||
|
@ -669,7 +663,7 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
currentTargets := s.targetMgr.GetSegmentsByCollection(req.GetCollectionID())
|
||||
currentTargets := s.targetMgr.GetHistoricalSegmentsByCollection(req.GetCollectionID(), meta.CurrentTarget)
|
||||
for _, channel := range channels {
|
||||
log := log.With(zap.String("channel", channel.GetChannelName()))
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/job"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||
|
@ -59,18 +58,17 @@ type ServiceSuite struct {
|
|||
nodes []int64
|
||||
|
||||
// Dependencies
|
||||
kv kv.MetaKv
|
||||
store meta.Store
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker *meta.MockBroker
|
||||
cluster *session.MockCluster
|
||||
nodeMgr *session.NodeManager
|
||||
jobScheduler *job.Scheduler
|
||||
taskScheduler *task.MockScheduler
|
||||
handoffObserver *observers.HandoffObserver
|
||||
balancer balance.Balance
|
||||
kv kv.MetaKv
|
||||
store meta.Store
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
targetMgr *meta.TargetManager
|
||||
broker *meta.MockBroker
|
||||
cluster *session.MockCluster
|
||||
nodeMgr *session.NodeManager
|
||||
jobScheduler *job.Scheduler
|
||||
taskScheduler *task.MockScheduler
|
||||
balancer balance.Balance
|
||||
|
||||
// Test object
|
||||
server *Server
|
||||
|
@ -126,8 +124,8 @@ func (suite *ServiceSuite) SetupTest() {
|
|||
suite.store = meta.NewMetaStore(suite.kv)
|
||||
suite.dist = meta.NewDistributionManager()
|
||||
suite.meta = meta.NewMeta(params.RandomIncrementIDAllocator(), suite.store)
|
||||
suite.targetMgr = meta.NewTargetManager()
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||
suite.nodeMgr = session.NewNodeManager()
|
||||
for _, node := range suite.nodes {
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(node, "localhost"))
|
||||
|
@ -136,13 +134,6 @@ func (suite *ServiceSuite) SetupTest() {
|
|||
suite.jobScheduler = job.NewScheduler()
|
||||
suite.taskScheduler = task.NewMockScheduler(suite.T())
|
||||
suite.jobScheduler.Start(context.Background())
|
||||
suite.handoffObserver = observers.NewHandoffObserver(
|
||||
suite.store,
|
||||
suite.meta,
|
||||
suite.dist,
|
||||
suite.targetMgr,
|
||||
suite.broker,
|
||||
)
|
||||
suite.balancer = balance.NewRowCountBasedBalancer(
|
||||
suite.taskScheduler,
|
||||
suite.nodeMgr,
|
||||
|
@ -165,7 +156,6 @@ func (suite *ServiceSuite) SetupTest() {
|
|||
jobScheduler: suite.jobScheduler,
|
||||
taskScheduler: suite.taskScheduler,
|
||||
balancer: suite.balancer,
|
||||
handoffObserver: suite.handoffObserver,
|
||||
}
|
||||
suite.server.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
}
|
||||
|
@ -972,7 +962,6 @@ func (suite *ServiceSuite) loadAll() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.jobScheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -980,6 +969,7 @@ func (suite *ServiceSuite) loadAll() {
|
|||
suite.EqualValues(suite.replicaNumber[collection], suite.meta.GetReplicaNumber(collection))
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
suite.NotNil(suite.meta.GetCollection(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
} else {
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collection,
|
||||
|
@ -994,7 +984,6 @@ func (suite *ServiceSuite) loadAll() {
|
|||
suite.targetMgr,
|
||||
suite.broker,
|
||||
suite.nodeMgr,
|
||||
suite.handoffObserver,
|
||||
)
|
||||
suite.jobScheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
@ -1002,6 +991,7 @@ func (suite *ServiceSuite) loadAll() {
|
|||
suite.EqualValues(suite.replicaNumber[collection], suite.meta.GetReplicaNumber(collection))
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
suite.NotNil(suite.meta.GetPartitionsByCollection(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1009,11 +999,11 @@ func (suite *ServiceSuite) loadAll() {
|
|||
func (suite *ServiceSuite) assertLoaded(collection int64) {
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
for _, channel := range suite.channels[collection] {
|
||||
suite.NotNil(suite.targetMgr.GetDmChannel(channel))
|
||||
suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.NextTarget))
|
||||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.NotNil(suite.targetMgr.GetSegment(segment))
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1021,7 +1011,7 @@ func (suite *ServiceSuite) assertLoaded(collection int64) {
|
|||
func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ...int64) {
|
||||
suite.True(suite.meta.Exist(collection))
|
||||
for _, channel := range suite.channels[collection] {
|
||||
suite.NotNil(suite.targetMgr.GetDmChannel(channel))
|
||||
suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||
}
|
||||
partitionSet := typeutil.NewUniqueSet(partitions...)
|
||||
for partition, segments := range suite.segments[collection] {
|
||||
|
@ -1029,7 +1019,7 @@ func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ..
|
|||
continue
|
||||
}
|
||||
for _, segment := range segments {
|
||||
suite.NotNil(suite.targetMgr.GetSegment(segment))
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1037,11 +1027,12 @@ func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ..
|
|||
func (suite *ServiceSuite) assertReleased(collection int64) {
|
||||
suite.False(suite.meta.Exist(collection))
|
||||
for _, channel := range suite.channels[collection] {
|
||||
suite.Nil(suite.targetMgr.GetDmChannel(channel))
|
||||
suite.Nil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.Nil(suite.targetMgr.GetSegment(segment))
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package task
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -387,7 +388,12 @@ func (ex *Executor) subDmChannel(task *ChannelTask, step int) error {
|
|||
partitions...,
|
||||
)
|
||||
|
||||
dmChannel := ex.targetMgr.GetDmChannel(action.ChannelName())
|
||||
dmChannel := ex.targetMgr.GetDmChannel(task.CollectionID(), action.ChannelName(), meta.NextTarget)
|
||||
if dmChannel == nil {
|
||||
msg := "channel does not exist in next target, skip it"
|
||||
log.Warn(msg, zap.String("channelName", action.ChannelName()))
|
||||
return errors.New(msg)
|
||||
}
|
||||
req := packSubDmChannelRequest(task, action, schema, loadMeta, dmChannel)
|
||||
err = fillSubDmChannelRequest(ctx, req, ex.broker)
|
||||
if err != nil {
|
||||
|
|
|
@ -23,6 +23,9 @@ import (
|
|||
"runtime"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
|
@ -31,8 +34,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
. "github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -443,7 +444,7 @@ func (scheduler *taskScheduler) GetNodeSegmentCntDelta(nodeID int64) int {
|
|||
continue
|
||||
}
|
||||
segmentAction := action.(*SegmentAction)
|
||||
segment := scheduler.targetMgr.GetSegment(segmentAction.SegmentID())
|
||||
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), segmentAction.SegmentID(), meta.NextTarget)
|
||||
if action.Type() == ActionTypeGrow {
|
||||
delta += int(segment.GetNumOfRows())
|
||||
} else {
|
||||
|
@ -527,7 +528,7 @@ func (scheduler *taskScheduler) isRelated(task Task, node int64) bool {
|
|||
return true
|
||||
}
|
||||
if task, ok := task.(*SegmentTask); ok {
|
||||
segment := scheduler.targetMgr.GetSegment(task.SegmentID())
|
||||
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||
if segment == nil {
|
||||
continue
|
||||
}
|
||||
|
@ -732,7 +733,7 @@ func (scheduler *taskScheduler) checkSegmentTaskStale(task *SegmentTask) bool {
|
|||
for _, action := range task.Actions() {
|
||||
switch action.Type() {
|
||||
case ActionTypeGrow:
|
||||
segment := scheduler.targetMgr.GetSegment(task.SegmentID())
|
||||
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||
if segment == nil {
|
||||
log.Warn("task stale due tu the segment to load not exists in targets",
|
||||
zap.Int64("segment", task.segmentID))
|
||||
|
@ -766,7 +767,7 @@ func (scheduler *taskScheduler) checkChannelTaskStale(task *ChannelTask) bool {
|
|||
for _, action := range task.Actions() {
|
||||
switch action.Type() {
|
||||
case ActionTypeGrow:
|
||||
if !scheduler.targetMgr.ContainDmChannel(task.Channel()) {
|
||||
if scheduler.targetMgr.GetDmChannel(task.collectionID, task.Channel(), meta.NextTarget) == nil {
|
||||
log.Warn("the task is stale, the channel to subscribe not exists in targets",
|
||||
zap.String("channel", task.Channel()))
|
||||
return true
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/mock"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
|
@ -132,8 +132,8 @@ func (suite *TaskSuite) SetupTest() {
|
|||
suite.store = meta.NewMetaStore(suite.kv)
|
||||
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store)
|
||||
suite.dist = meta.NewDistributionManager()
|
||||
suite.target = meta.NewTargetManager()
|
||||
suite.broker = meta.NewMockBroker(suite.T())
|
||||
suite.target = meta.NewTargetManager(suite.broker, suite.meta)
|
||||
suite.nodeMgr = session.NewNodeManager()
|
||||
suite.cluster = session.NewMockCluster(suite.T())
|
||||
|
||||
|
@ -253,12 +253,13 @@ func (suite *TaskSuite) TestSubscribeChannelTask() {
|
|||
|
||||
// Test subscribe channel task
|
||||
tasks := []Task{}
|
||||
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||
for _, channel := range suite.subChannels {
|
||||
suite.target.AddDmChannel(meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||
CollectionID: suite.collection,
|
||||
ChannelName: channel,
|
||||
UnflushedSegmentIds: []int64{suite.growingSegments[channel]},
|
||||
}))
|
||||
})
|
||||
task, err := NewChannelTask(
|
||||
ctx,
|
||||
timeout,
|
||||
|
@ -272,6 +273,8 @@ func (suite *TaskSuite) TestSubscribeChannelTask() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(dmChannels, nil, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.AssertTaskNum(0, len(suite.subChannels), len(suite.subChannels), 0)
|
||||
|
||||
// Process tasks
|
||||
|
@ -313,11 +316,12 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() {
|
|||
|
||||
// Test unsubscribe channel task
|
||||
tasks := []Task{}
|
||||
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||
for _, channel := range suite.unsubChannels {
|
||||
suite.target.AddDmChannel(meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||
CollectionID: suite.collection,
|
||||
ChannelName: channel,
|
||||
}))
|
||||
})
|
||||
task, err := NewChannelTask(
|
||||
ctx,
|
||||
timeout,
|
||||
|
@ -332,6 +336,9 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(dmChannels, nil, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
|
||||
// Only first channel exists
|
||||
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
||||
ID: targetNode,
|
||||
|
@ -395,11 +402,10 @@ func (suite *TaskSuite) TestLoadSegmentTask() {
|
|||
ChannelName: channel.ChannelName,
|
||||
}))
|
||||
tasks := []Task{}
|
||||
segments := make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range suite.loadSegments {
|
||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: partition,
|
||||
segments = append(segments, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.ChannelName,
|
||||
})
|
||||
task, err := NewSegmentTask(
|
||||
|
@ -415,6 +421,8 @@ func (suite *TaskSuite) TestLoadSegmentTask() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -520,11 +528,10 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() {
|
|||
ChannelName: channel.ChannelName,
|
||||
}))
|
||||
tasks := []Task{}
|
||||
segmentInfos := make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range suite.loadSegments {
|
||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: partition,
|
||||
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.ChannelName,
|
||||
})
|
||||
task, err := NewSegmentTask(
|
||||
|
@ -540,6 +547,8 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -655,9 +664,14 @@ func (suite *TaskSuite) TestReleaseGrowingSegmentTask() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
|
||||
growings := map[int64]*meta.Segment{}
|
||||
for _, segment := range suite.releaseSegments[1:] {
|
||||
growings[segment] = utils.CreateTestSegment(suite.collection, 1, segment, targetNode, 1, "")
|
||||
}
|
||||
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
||||
ID: targetNode,
|
||||
GrowingSegments: typeutil.NewUniqueSet(suite.releaseSegments[1:]...),
|
||||
GrowingSegments: growings,
|
||||
})
|
||||
|
||||
segmentsNum := len(suite.releaseSegments)
|
||||
|
@ -730,13 +744,12 @@ func (suite *TaskSuite) TestMoveSegmentTask() {
|
|||
}
|
||||
tasks := []Task{}
|
||||
segments := make([]*meta.Segment, 0)
|
||||
segmentInfos := make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range suite.moveSegments {
|
||||
segments = append(segments,
|
||||
utils.CreateTestSegment(suite.collection, partition, segment, sourceNode, 1, channel.ChannelName))
|
||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: partition,
|
||||
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.ChannelName,
|
||||
})
|
||||
view.Segments[segment] = &querypb.SegmentDist{NodeID: sourceNode, Version: 0}
|
||||
|
@ -755,6 +768,8 @@ func (suite *TaskSuite) TestMoveSegmentTask() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.dist.SegmentDistManager.Update(sourceNode, segments...)
|
||||
suite.dist.LeaderViewManager.Update(leader, view)
|
||||
segmentsNum := len(suite.moveSegments)
|
||||
|
@ -822,12 +837,11 @@ func (suite *TaskSuite) TestTaskCanceled() {
|
|||
ChannelName: channel.ChannelName,
|
||||
}))
|
||||
tasks := []Task{}
|
||||
segmentInfos := []*datapb.SegmentBinlogs{}
|
||||
for _, segment := range suite.loadSegments {
|
||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: partition,
|
||||
InsertChannel: channel.ChannelName,
|
||||
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.GetChannelName(),
|
||||
})
|
||||
task, err := NewSegmentTask(
|
||||
ctx,
|
||||
|
@ -844,6 +858,8 @@ func (suite *TaskSuite) TestTaskCanceled() {
|
|||
}
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, partition).Return(nil, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, partition)
|
||||
|
||||
// Process tasks
|
||||
suite.dispatchAndWait(targetNode)
|
||||
|
@ -898,19 +914,17 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil)
|
||||
|
||||
// Test load segment task
|
||||
suite.meta.ReplicaManager.Put(
|
||||
createReplica(suite.collection, targetNode))
|
||||
suite.meta.ReplicaManager.Put(createReplica(suite.collection, targetNode))
|
||||
suite.dist.ChannelDistManager.Update(targetNode, meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
CollectionID: suite.collection,
|
||||
ChannelName: channel.ChannelName,
|
||||
}))
|
||||
tasks := []Task{}
|
||||
segmentInfos := make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range suite.loadSegments {
|
||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: partition,
|
||||
InsertChannel: channel.ChannelName,
|
||||
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.GetChannelName(),
|
||||
})
|
||||
task, err := NewSegmentTask(
|
||||
ctx,
|
||||
|
@ -925,6 +939,8 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -947,7 +963,15 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||
view.Segments[segment] = &querypb.SegmentDist{NodeID: targetNode, Version: 0}
|
||||
}
|
||||
suite.dist.LeaderViewManager.Update(targetNode, view)
|
||||
suite.target.RemoveSegment(suite.loadSegments[0])
|
||||
segmentInfos = make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range suite.loadSegments[1:] {
|
||||
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.GetChannelName(),
|
||||
})
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(2)).Return(nil, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(2))
|
||||
suite.dispatchAndWait(targetNode)
|
||||
suite.AssertTaskNum(0, 0, 0, 0)
|
||||
|
||||
|
@ -1132,10 +1156,10 @@ func (suite *TaskSuite) TestNoExecutor() {
|
|||
ChannelName: channel.ChannelName,
|
||||
}))
|
||||
tasks := []Task{}
|
||||
segments := make([]*datapb.SegmentInfo, 0)
|
||||
segments := make([]*datapb.SegmentBinlogs, 0)
|
||||
for _, segment := range suite.loadSegments {
|
||||
segments = append(segments, &datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
segments = append(segments, &datapb.SegmentBinlogs{
|
||||
SegmentID: segment,
|
||||
InsertChannel: channel.ChannelName,
|
||||
})
|
||||
task, err := NewSegmentTask(
|
||||
|
@ -1151,7 +1175,8 @@ func (suite *TaskSuite) TestNoExecutor() {
|
|||
err = suite.scheduler.Add(task)
|
||||
suite.NoError(err)
|
||||
}
|
||||
suite.target.AddSegment(segments...)
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
|
|
@ -21,12 +21,10 @@ import (
|
|||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func GetReplicaNodesInfo(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager, replicaID int64) []*session.NodeInfo {
|
||||
|
@ -125,61 +123,3 @@ func SpawnReplicas(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager
|
|||
AssignNodesToReplicas(nodeMgr, replicas...)
|
||||
return replicas, replicaMgr.Put(replicas...)
|
||||
}
|
||||
|
||||
// RegisterTargets fetch channels and segments of given collection(partitions) from DataCoord,
|
||||
// and then registers them on Target Manager
|
||||
func RegisterTargets(ctx context.Context,
|
||||
targetMgr *meta.TargetManager,
|
||||
broker meta.Broker,
|
||||
collection int64,
|
||||
partitions []int64,
|
||||
) error {
|
||||
channels, segments, err := FetchTargets(ctx, targetMgr, broker, collection, partitions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targetMgr.AddDmChannel(channels...)
|
||||
targetMgr.AddSegment(segments...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func FetchTargets(ctx context.Context,
|
||||
targetMgr *meta.TargetManager,
|
||||
broker meta.Broker,
|
||||
collection int64,
|
||||
partitions []int64,
|
||||
) ([]*meta.DmChannel, []*datapb.SegmentInfo, error) {
|
||||
channels := make(map[string][]*datapb.VchannelInfo)
|
||||
segments := make([]*datapb.SegmentInfo, 0)
|
||||
|
||||
for _, partitionID := range partitions {
|
||||
log.Info("get recovery info...",
|
||||
zap.Int64("collectionID", collection),
|
||||
zap.Int64("partitionID", partitionID))
|
||||
vChannelInfos, binlogs, err := broker.GetRecoveryInfo(ctx, collection, partitionID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Register segments
|
||||
for _, segmentBinlogs := range binlogs {
|
||||
segments = append(segments, SegmentBinlogs2SegmentInfo(
|
||||
collection,
|
||||
partitionID,
|
||||
segmentBinlogs))
|
||||
}
|
||||
|
||||
for _, info := range vChannelInfos {
|
||||
channelName := info.GetChannelName()
|
||||
channels[channelName] = append(channels[channelName], info)
|
||||
}
|
||||
}
|
||||
// Merge and register channels
|
||||
dmChannels := make([]*meta.DmChannel, 0, len(channels))
|
||||
for _, channels := range channels {
|
||||
dmChannel := MergeDmChannelInfo(channels)
|
||||
dmChannels = append(dmChannels, dmChannel)
|
||||
}
|
||||
return dmChannels, segments, nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
func CreateTestLeaderView(id, collection int64, channel string, segments map[int64]int64, growings []int64) *meta.LeaderView {
|
||||
func CreateTestLeaderView(id, collection int64, channel string, segments map[int64]int64, growings map[int64]*meta.Segment) *meta.LeaderView {
|
||||
segmentVersions := make(map[int64]*querypb.SegmentDist)
|
||||
for segment, node := range segments {
|
||||
segmentVersions[segment] = &querypb.SegmentDist{
|
||||
|
@ -36,7 +36,7 @@ func CreateTestLeaderView(id, collection int64, channel string, segments map[int
|
|||
CollectionID: collection,
|
||||
Channel: channel,
|
||||
Segments: segmentVersions,
|
||||
GrowingSegments: typeutil.NewUniqueSet(growings...),
|
||||
GrowingSegments: growings,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,6 +71,15 @@ func CreateTestCollection(collection int64, replica int32) *meta.Collection {
|
|||
}
|
||||
}
|
||||
|
||||
func CreateTestPartition(collection int64, partitionID int64) *meta.Partition {
|
||||
return &meta.Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
CollectionID: collection,
|
||||
PartitionID: partitionID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTestSegmentInfo(collection, partition, segment int64, channel string) *datapb.SegmentInfo {
|
||||
return &datapb.SegmentInfo{
|
||||
ID: segment,
|
||||
|
|
|
@ -199,7 +199,7 @@ func (s *DataSyncServiceSuite) TestRemoveEmptyFlowgraphByChannel() {
|
|||
channelName := fmt.Sprintf("%s_%d_1", Params.CommonCfg.RootCoordDml, defaultCollectionID)
|
||||
deltaChannelName, err := funcutil.ConvertChannelName(channelName, Params.CommonCfg.RootCoordDml, Params.CommonCfg.RootCoordDelta)
|
||||
s.Require().NoError(err)
|
||||
err = s.dsService.metaReplica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, channelName, defaultSegmentVersion, segmentTypeSealed)
|
||||
err = s.dsService.metaReplica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, channelName, defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeSealed)
|
||||
s.Require().NoError(err)
|
||||
|
||||
_, err = s.dsService.addFlowGraphsForDeltaChannels(defaultCollectionID, []string{deltaChannelName}, map[string]string{deltaChannelName: deltaChannelName})
|
||||
|
|
|
@ -42,6 +42,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -65,6 +66,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -101,6 +103,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -123,6 +126,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -203,6 +207,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -229,6 +234,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -256,6 +262,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -282,6 +289,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
|
@ -134,8 +135,16 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
panic(err)
|
||||
}
|
||||
if !has {
|
||||
log.Info("Add growing segment", zap.Int64("collectionID", insertMsg.CollectionID), zap.Int64("segmentID", insertMsg.SegmentID))
|
||||
err = iNode.metaReplica.addSegment(insertMsg.SegmentID, insertMsg.PartitionID, insertMsg.CollectionID, insertMsg.ShardName, 0, segmentTypeGrowing)
|
||||
log.Info("Add growing segment",
|
||||
zap.Int64("collectionID", insertMsg.CollectionID),
|
||||
zap.Int64("segmentID", insertMsg.SegmentID),
|
||||
zap.Uint64("startPosition", insertMsg.BeginTs()),
|
||||
)
|
||||
startPosition := &internalpb.MsgPosition{
|
||||
ChannelName: insertMsg.ShardName,
|
||||
Timestamp: insertMsg.BeginTs(),
|
||||
}
|
||||
err = iNode.metaReplica.addSegment(insertMsg.SegmentID, insertMsg.PartitionID, insertMsg.CollectionID, insertMsg.ShardName, 0, startPosition, segmentTypeGrowing)
|
||||
if err != nil {
|
||||
// error occurs when collection or partition cannot be found, collection and partition should be created before
|
||||
err = fmt.Errorf("insertNode addSegment failed, err = %s", err)
|
||||
|
|
|
@ -45,6 +45,7 @@ func getInsertNode() (*insertNode, error) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeGrowing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -1287,9 +1287,13 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||
sealedSegments := node.metaReplica.getSealedSegments()
|
||||
shardClusters := node.ShardClusterService.GetShardClusters()
|
||||
|
||||
channelGrowingsMap := make(map[string][]int64)
|
||||
channelGrowingsMap := make(map[string]map[int64]*internalpb.MsgPosition)
|
||||
for _, s := range growingSegments {
|
||||
channelGrowingsMap[s.vChannelID] = append(channelGrowingsMap[s.vChannelID], s.ID())
|
||||
if _, ok := channelGrowingsMap[s.vChannelID]; !ok {
|
||||
channelGrowingsMap[s.vChannelID] = make(map[int64]*internalpb.MsgPosition)
|
||||
}
|
||||
|
||||
channelGrowingsMap[s.vChannelID][s.ID()] = s.startPosition
|
||||
}
|
||||
|
||||
segmentVersionInfos := make([]*querypb.SegmentVersionInfo, 0, len(sealedSegments))
|
||||
|
@ -1319,10 +1323,10 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||
}
|
||||
}
|
||||
view := &querypb.LeaderView{
|
||||
Collection: sc.collectionID,
|
||||
Channel: sc.vchannelName,
|
||||
SegmentDist: mapping,
|
||||
GrowingSegmentIDs: channelGrowingsMap[sc.vchannelName],
|
||||
Collection: sc.collectionID,
|
||||
Channel: sc.vchannelName,
|
||||
SegmentDist: mapping,
|
||||
GrowingSegments: channelGrowingsMap[sc.vchannelName],
|
||||
}
|
||||
leaderViews = append(leaderViews, view)
|
||||
|
||||
|
|
|
@ -29,8 +29,6 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
|
@ -40,6 +38,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -103,7 +102,7 @@ type ReplicaInterface interface {
|
|||
|
||||
// segment
|
||||
// addSegment add a new segment to collectionReplica
|
||||
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, segType segmentType) error
|
||||
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, seekPosition *internalpb.MsgPosition, segType segmentType) error
|
||||
// setSegment adds a segment to collectionReplica
|
||||
setSegment(segment *Segment) error
|
||||
// removeSegment removes a segment from collectionReplica
|
||||
|
@ -563,7 +562,7 @@ func (replica *metaReplica) getSegmentIDsPrivate(partitionID UniqueID, segType s
|
|||
|
||||
// ----------------------------------------------------------------------------------------------------- segment
|
||||
// addSegment add a new segment to collectionReplica
|
||||
func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, segType segmentType) error {
|
||||
func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, seekPosition *internalpb.MsgPosition, segType segmentType) error {
|
||||
replica.mu.Lock()
|
||||
defer replica.mu.Unlock()
|
||||
|
||||
|
@ -574,7 +573,7 @@ func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID,
|
|||
collection.mu.Lock()
|
||||
defer collection.mu.Unlock()
|
||||
|
||||
seg, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, version)
|
||||
seg, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, version, seekPosition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
|
||||
const segmentNum = 3
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
|
@ -160,7 +160,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
|
||||
const segmentNum = 3
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
|
@ -176,7 +176,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
|
||||
const segmentNum = 3
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
|
@ -195,10 +195,10 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
defer replica.freeAll()
|
||||
|
||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -208,7 +208,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
defer replica.freeAll()
|
||||
|
||||
invalidType := commonpb.SegmentState_NotExist
|
||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, invalidType)
|
||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, invalidType)
|
||||
assert.Error(t, err)
|
||||
_, err = replica.getSegmentByID(defaultSegmentID, invalidType)
|
||||
assert.Error(t, err)
|
||||
|
@ -245,12 +245,12 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
err = replica.setSegment(segment1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "", segmentTypeSealed, defaultSegmentVersion)
|
||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
segment2.setIndexedFieldInfo(fieldID, indexInfo)
|
||||
err = replica.setSegment(segment2)
|
||||
|
@ -277,22 +277,22 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
||||
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
||||
|
||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
err = replica.setSegment(segment1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
err = replica.setSegment(segment2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
err = replica.setSegment(segment3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment4, err := newSegment(collection, UniqueID(4), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion)
|
||||
segment4, err := newSegment(collection, UniqueID(4), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
err = replica.setSegment(segment4)
|
||||
assert.NoError(t, err)
|
||||
|
@ -344,13 +344,13 @@ func TestMetaReplica_BlackList(t *testing.T) {
|
|||
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
||||
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
||||
|
||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion)
|
||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeSealed, defaultSegmentVersion)
|
||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.NoError(t, err)
|
||||
|
||||
replica.addSegmentsLoadingList([]UniqueID{1, 2, 3})
|
||||
|
|
|
@ -93,6 +93,12 @@ const (
|
|||
defaultChannelName = "default-channel"
|
||||
)
|
||||
|
||||
var defaultSegmentStartPosition = &internalpb.MsgPosition{
|
||||
ChannelName: defaultChannelName,
|
||||
MsgID: []byte{},
|
||||
Timestamp: 0,
|
||||
}
|
||||
|
||||
const (
|
||||
defaultMsgLength = 100
|
||||
defaultDelLength = 10
|
||||
|
@ -1241,7 +1247,9 @@ func genSealedSegment(schema *schemapb.CollectionSchema,
|
|||
collectionID,
|
||||
vChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1324,6 +1332,7 @@ func genSimpleReplicaWithGrowingSegment() (ReplicaInterface, error) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeGrowing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -61,7 +61,7 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
|
|||
err = node.metaReplica.addPartition(collection.ID(), defaultPartitionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.metaReplica.addSegment(segmentID, defaultPartitionID, collectionID, "", defaultSegmentVersion, segmentTypeSealed)
|
||||
err = node.metaReplica.addSegment(segmentID, defaultPartitionID, collectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
|
@ -77,10 +78,11 @@ type Segment struct {
|
|||
mut sync.RWMutex // protects segmentPtr
|
||||
segmentPtr C.CSegmentInterface
|
||||
|
||||
segmentID UniqueID
|
||||
partitionID UniqueID
|
||||
collectionID UniqueID
|
||||
version UniqueID
|
||||
segmentID UniqueID
|
||||
partitionID UniqueID
|
||||
collectionID UniqueID
|
||||
version UniqueID
|
||||
startPosition *internalpb.MsgPosition // for growing segment release
|
||||
|
||||
vChannelID Channel
|
||||
lastMemSize int64
|
||||
|
@ -168,7 +170,9 @@ func newSegment(collection *Collection,
|
|||
collectionID UniqueID,
|
||||
vChannelID Channel,
|
||||
segType segmentType,
|
||||
version UniqueID) (*Segment, error) {
|
||||
version UniqueID,
|
||||
startPosition *internalpb.MsgPosition,
|
||||
) (*Segment, error) {
|
||||
/*
|
||||
CSegmentInterface
|
||||
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
|
||||
|
@ -203,6 +207,7 @@ func newSegment(collection *Collection,
|
|||
partitionID: partitionID,
|
||||
collectionID: collectionID,
|
||||
version: version,
|
||||
startPosition: startPosition,
|
||||
vChannelID: vChannelID,
|
||||
indexedFieldInfos: typeutil.NewConcurrentMap[int64, *IndexedFieldInfo](),
|
||||
recentlyModified: atomic.NewBool(false),
|
||||
|
|
|
@ -151,7 +151,7 @@ func (loader *segmentLoader) LoadSegment(ctx context.Context, req *querypb.LoadS
|
|||
return nil, err
|
||||
}
|
||||
|
||||
segment, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segmentType, req.GetVersion())
|
||||
segment, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segmentType, req.GetVersion(), info.StartPosition)
|
||||
if err != nil {
|
||||
log.Error("load segment failed when create new segment",
|
||||
zap.Int64("partitionID", partitionID),
|
||||
|
|
|
@ -229,7 +229,9 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
|
||||
|
@ -383,7 +385,9 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
|
||||
|
@ -422,7 +426,9 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = loader.loadFieldIndexData(ctx, segment, &querypb.FieldIndexInfo{
|
||||
|
@ -462,7 +468,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Nil(t, err)
|
||||
|
||||
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
||||
|
@ -491,7 +497,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Nil(t, err)
|
||||
|
||||
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
||||
|
|
|
@ -47,7 +47,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
deleteSegment(segment)
|
||||
|
@ -57,7 +57,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||
_, err = newSegment(collection,
|
||||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
collectionID, "", 100, defaultSegmentVersion)
|
||||
collectionID, "", 100, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -95,7 +95,7 @@ func TestSegment_getRowCount(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -137,7 +137,7 @@ func TestSegment_retrieve(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -221,7 +221,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -270,7 +270,7 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -305,7 +305,7 @@ func TestSegment_segmentInsert(t *testing.T) {
|
|||
collection := newCollection(collectionID, schema)
|
||||
assert.Equal(t, collection.ID(), collectionID)
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -349,7 +349,7 @@ func TestSegment_segmentDelete(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -443,7 +443,7 @@ func TestSegment_segmentPreInsert(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -462,7 +462,7 @@ func TestSegment_segmentPreDelete(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion)
|
||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -507,7 +507,9 @@ func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
ids := []int64{1, 2, 3}
|
||||
pks := make([]primaryKey, 0)
|
||||
|
@ -584,7 +586,9 @@ func TestSegment_BasicMetrics(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
t.Run("test id binlog row size", func(t *testing.T) {
|
||||
|
@ -631,7 +635,9 @@ func TestSegment_fillIndexedFieldsData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
defaultSegmentVersion)
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
vecCM, err := genVectorChunkManager(ctx, collection)
|
||||
|
@ -980,6 +986,7 @@ func TestUpdateBloomFilter(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
seg, err := replica.getSegmentByID(defaultSegmentID, segmentTypeGrowing)
|
||||
|
@ -1005,6 +1012,7 @@ func TestUpdateBloomFilter(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
defaultSegmentVersion,
|
||||
defaultSegmentStartPosition,
|
||||
segmentTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
seg, err := replica.getSegmentByID(defaultSegmentID, segmentTypeGrowing)
|
||||
|
|
|
@ -731,7 +731,9 @@ type queryCoordConfig struct {
|
|||
LoadTimeoutSeconds time.Duration
|
||||
CheckHandoffInterval time.Duration
|
||||
EnableActiveStandby bool
|
||||
RefreshTargetsIntervalSeconds time.Duration
|
||||
|
||||
NextTargetSurviveTime time.Duration
|
||||
UpdateNextTargetInterval time.Duration
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) init(base *BaseTable) {
|
||||
|
@ -759,7 +761,8 @@ func (p *queryCoordConfig) init(base *BaseTable) {
|
|||
p.initLoadTimeoutSeconds()
|
||||
p.initCheckHandoffInterval()
|
||||
p.initEnableActiveStandby()
|
||||
p.initRefreshTargetsIntervalSeconds()
|
||||
p.initNextTargetSurviveTime()
|
||||
p.initUpdateNextTargetInterval()
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) initTaskRetryNum() {
|
||||
|
@ -895,13 +898,22 @@ func (p *queryCoordConfig) GetNodeID() UniqueID {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) initRefreshTargetsIntervalSeconds() {
|
||||
interval := p.Base.LoadWithDefault("queryCoord.refreshTargetsIntervalSeconds", "300")
|
||||
refreshInterval, err := strconv.ParseInt(interval, 10, 64)
|
||||
func (p *queryCoordConfig) initNextTargetSurviveTime() {
|
||||
interval := p.Base.LoadWithDefault("queryCoord.NextTargetSurviveTime", "300")
|
||||
nextTargetSurviveTime, err := strconv.ParseInt(interval, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.RefreshTargetsIntervalSeconds = time.Duration(refreshInterval) * time.Second
|
||||
p.NextTargetSurviveTime = time.Duration(nextTargetSurviveTime) * time.Second
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) initUpdateNextTargetInterval() {
|
||||
interval := p.Base.LoadWithDefault("queryCoord.UpdateNextTargetInterval", "30")
|
||||
updateNextTargetInterval, err := strconv.ParseInt(interval, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.UpdateNextTargetInterval = time.Duration(updateNextTargetInterval) * time.Second
|
||||
}
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////////
|
||||
|
|
Loading…
Reference in New Issue