mirror of https://github.com/milvus-io/milvus.git
Refactor dmChannelWatchInfo to queryCoord meta (#13543)
Signed-off-by: xige-16 <xi.ge@zilliz.com>pull/13682/head
parent
398a11cda0
commit
11c8461c11
|
@ -255,9 +255,8 @@ enum LoadType {
|
|||
|
||||
message DmChannelWatchInfo {
|
||||
int64 collectionID = 1;
|
||||
repeated string channelIDs = 2; // will deprecate
|
||||
string dmChannel = 3;
|
||||
int64 nodeID_loaded = 4;
|
||||
string dmChannel = 2;
|
||||
int64 nodeID_loaded = 3;
|
||||
}
|
||||
|
||||
message QueryChannelInfo {
|
||||
|
@ -295,11 +294,10 @@ message CollectionInfo {
|
|||
int64 collectionID = 1;
|
||||
repeated int64 partitionIDs = 2;
|
||||
repeated PartitionStates partition_states = 3;
|
||||
repeated DmChannelWatchInfo channel_infos = 4;
|
||||
LoadType load_type = 5;
|
||||
schema.CollectionSchema schema = 6;
|
||||
repeated int64 released_partitionIDs = 7;
|
||||
int64 inMemory_percentage = 8;
|
||||
LoadType load_type = 4;
|
||||
schema.CollectionSchema schema = 5;
|
||||
repeated int64 released_partitionIDs = 6;
|
||||
int64 inMemory_percentage = 7;
|
||||
}
|
||||
|
||||
//---- synchronize messages proto between QueryCoord and QueryNode -----
|
||||
|
|
|
@ -1669,9 +1669,8 @@ func (m *LoadBalanceRequest) GetSealedSegmentIDs() []int64 {
|
|||
|
||||
type DmChannelWatchInfo struct {
|
||||
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
ChannelIDs []string `protobuf:"bytes,2,rep,name=channelIDs,proto3" json:"channelIDs,omitempty"`
|
||||
DmChannel string `protobuf:"bytes,3,opt,name=dmChannel,proto3" json:"dmChannel,omitempty"`
|
||||
NodeIDLoaded int64 `protobuf:"varint,4,opt,name=nodeID_loaded,json=nodeIDLoaded,proto3" json:"nodeID_loaded,omitempty"`
|
||||
DmChannel string `protobuf:"bytes,2,opt,name=dmChannel,proto3" json:"dmChannel,omitempty"`
|
||||
NodeIDLoaded int64 `protobuf:"varint,3,opt,name=nodeID_loaded,json=nodeIDLoaded,proto3" json:"nodeID_loaded,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -1709,13 +1708,6 @@ func (m *DmChannelWatchInfo) GetCollectionID() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *DmChannelWatchInfo) GetChannelIDs() []string {
|
||||
if m != nil {
|
||||
return m.ChannelIDs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DmChannelWatchInfo) GetDmChannel() string {
|
||||
if m != nil {
|
||||
return m.DmChannel
|
||||
|
@ -2003,11 +1995,10 @@ type CollectionInfo struct {
|
|||
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
PartitionIDs []int64 `protobuf:"varint,2,rep,packed,name=partitionIDs,proto3" json:"partitionIDs,omitempty"`
|
||||
PartitionStates []*PartitionStates `protobuf:"bytes,3,rep,name=partition_states,json=partitionStates,proto3" json:"partition_states,omitempty"`
|
||||
ChannelInfos []*DmChannelWatchInfo `protobuf:"bytes,4,rep,name=channel_infos,json=channelInfos,proto3" json:"channel_infos,omitempty"`
|
||||
LoadType LoadType `protobuf:"varint,5,opt,name=load_type,json=loadType,proto3,enum=milvus.proto.query.LoadType" json:"load_type,omitempty"`
|
||||
Schema *schemapb.CollectionSchema `protobuf:"bytes,6,opt,name=schema,proto3" json:"schema,omitempty"`
|
||||
ReleasedPartitionIDs []int64 `protobuf:"varint,7,rep,packed,name=released_partitionIDs,json=releasedPartitionIDs,proto3" json:"released_partitionIDs,omitempty"`
|
||||
InMemoryPercentage int64 `protobuf:"varint,8,opt,name=inMemory_percentage,json=inMemoryPercentage,proto3" json:"inMemory_percentage,omitempty"`
|
||||
LoadType LoadType `protobuf:"varint,4,opt,name=load_type,json=loadType,proto3,enum=milvus.proto.query.LoadType" json:"load_type,omitempty"`
|
||||
Schema *schemapb.CollectionSchema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"`
|
||||
ReleasedPartitionIDs []int64 `protobuf:"varint,6,rep,packed,name=released_partitionIDs,json=releasedPartitionIDs,proto3" json:"released_partitionIDs,omitempty"`
|
||||
InMemoryPercentage int64 `protobuf:"varint,7,opt,name=inMemory_percentage,json=inMemoryPercentage,proto3" json:"inMemory_percentage,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -2059,13 +2050,6 @@ func (m *CollectionInfo) GetPartitionStates() []*PartitionStates {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *CollectionInfo) GetChannelInfos() []*DmChannelWatchInfo {
|
||||
if m != nil {
|
||||
return m.ChannelInfos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CollectionInfo) GetLoadType() LoadType {
|
||||
if m != nil {
|
||||
return m.LoadType
|
||||
|
@ -2244,147 +2228,145 @@ func init() {
|
|||
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
|
||||
|
||||
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
|
||||
// 2241 bytes of a gzipped FileDescriptorProto
|
||||
// 2209 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0x4d, 0x73, 0xdc, 0x48,
|
||||
0xd5, 0x9a, 0x2f, 0x7b, 0xde, 0x8c, 0x67, 0xe4, 0x4e, 0xec, 0x9d, 0x0c, 0xbb, 0xd9, 0xac, 0x76,
|
||||
0x93, 0x0d, 0x59, 0xd6, 0x09, 0x0e, 0x50, 0x6c, 0x01, 0x87, 0xd8, 0x26, 0x5e, 0x93, 0x8d, 0x63,
|
||||
0x64, 0x67, 0x29, 0x52, 0xa9, 0x12, 0x9a, 0x51, 0x7b, 0xac, 0x8a, 0xa4, 0x9e, 0xa8, 0x35, 0x6b,
|
||||
0x3b, 0x67, 0x0e, 0x70, 0xa0, 0xf8, 0x01, 0x50, 0x5c, 0x80, 0x82, 0x54, 0xb1, 0x47, 0xee, 0xb9,
|
||||
0xf0, 0x13, 0xb8, 0x52, 0x1c, 0xe0, 0x27, 0x70, 0xa7, 0xba, 0xd5, 0xd2, 0xe8, 0xa3, 0x65, 0xcb,
|
||||
0x76, 0x79, 0x93, 0xa2, 0xb8, 0x49, 0xaf, 0x5f, 0xbf, 0xf7, 0xfa, 0x7d, 0x77, 0x3f, 0x58, 0x78,
|
||||
0x3e, 0xc1, 0xfe, 0x91, 0x31, 0x24, 0xc4, 0xb7, 0x96, 0xc7, 0x3e, 0x09, 0x08, 0x42, 0xae, 0xed,
|
||||
0x7c, 0x31, 0xa1, 0xe1, 0xdf, 0x32, 0x5f, 0xef, 0xb7, 0x87, 0xc4, 0x75, 0x89, 0x17, 0xc2, 0xfa,
|
||||
0xed, 0x24, 0x46, 0xbf, 0x63, 0x7b, 0x01, 0xf6, 0x3d, 0xd3, 0x89, 0x56, 0xe9, 0x70, 0x1f, 0xbb,
|
||||
0xa6, 0xf8, 0x53, 0x2d, 0x33, 0x30, 0x93, 0xf4, 0xfb, 0x0b, 0xb6, 0x67, 0xe1, 0xc3, 0x24, 0x48,
|
||||
0xfb, 0xb9, 0x02, 0x4b, 0x3b, 0xfb, 0xe4, 0x60, 0x8d, 0x38, 0x0e, 0x1e, 0x06, 0x36, 0xf1, 0xa8,
|
||||
0x8e, 0x9f, 0x4f, 0x30, 0x0d, 0xd0, 0x1d, 0xa8, 0x0d, 0x4c, 0x8a, 0x7b, 0xca, 0x35, 0xe5, 0x66,
|
||||
0x6b, 0xe5, 0xed, 0xe5, 0x94, 0x70, 0x42, 0xaa, 0x87, 0x74, 0xb4, 0x6a, 0x52, 0xac, 0x73, 0x4c,
|
||||
0x84, 0xa0, 0x66, 0x0d, 0x36, 0xd7, 0x7b, 0x95, 0x6b, 0xca, 0xcd, 0xaa, 0xce, 0xbf, 0xd1, 0x07,
|
||||
0x30, 0x3f, 0x8c, 0x69, 0x6f, 0xae, 0xd3, 0x5e, 0xf5, 0x5a, 0xf5, 0x66, 0x55, 0x4f, 0x03, 0xb5,
|
||||
0x3f, 0x29, 0xf0, 0x56, 0x4e, 0x0c, 0x3a, 0x26, 0x1e, 0xc5, 0xe8, 0x2e, 0x34, 0x68, 0x60, 0x06,
|
||||
0x13, 0x2a, 0x24, 0xf9, 0x9a, 0x54, 0x92, 0x1d, 0x8e, 0xa2, 0x0b, 0xd4, 0x3c, 0xdb, 0x8a, 0x84,
|
||||
0x2d, 0xfa, 0x26, 0x5c, 0xb6, 0xbd, 0x87, 0xd8, 0x25, 0xfe, 0x91, 0x31, 0xc6, 0xfe, 0x10, 0x7b,
|
||||
0x81, 0x39, 0xc2, 0x91, 0x8c, 0x97, 0xa2, 0xb5, 0xed, 0xe9, 0x92, 0xf6, 0x47, 0x05, 0x16, 0x99,
|
||||
0xa4, 0xdb, 0xa6, 0x1f, 0xd8, 0x17, 0xa0, 0x2f, 0x0d, 0xda, 0x49, 0x19, 0x7b, 0x55, 0xbe, 0x96,
|
||||
0x82, 0x31, 0x9c, 0x71, 0xc4, 0x9e, 0x9d, 0xad, 0xc6, 0xc5, 0x4d, 0xc1, 0xb4, 0x3f, 0x08, 0xc3,
|
||||
0x26, 0xe5, 0x3c, 0x8f, 0x42, 0xb3, 0x3c, 0x2b, 0x79, 0x9e, 0x67, 0x51, 0xe7, 0x2b, 0x05, 0x16,
|
||||
0x3f, 0x23, 0xa6, 0x35, 0x35, 0xfc, 0x57, 0xaf, 0xce, 0x1f, 0x40, 0x23, 0x0c, 0x9c, 0x5e, 0x8d,
|
||||
0xf3, 0xba, 0x9e, 0xe6, 0x25, 0x82, 0x6a, 0x2a, 0xe1, 0x0e, 0x07, 0xe8, 0x62, 0x93, 0xf6, 0x5b,
|
||||
0x05, 0x7a, 0x3a, 0x76, 0xb0, 0x49, 0xf1, 0xeb, 0x3c, 0xc5, 0x12, 0x34, 0x3c, 0x62, 0xe1, 0xcd,
|
||||
0x75, 0x7e, 0x8a, 0xaa, 0x2e, 0xfe, 0xb4, 0x7f, 0x09, 0x0d, 0xbf, 0xe1, 0x0e, 0x9b, 0xb0, 0x42,
|
||||
0xfd, 0x2c, 0x56, 0x78, 0x35, 0xb5, 0xc2, 0x9b, 0x7e, 0xd2, 0xa9, 0xa5, 0xea, 0x29, 0x4b, 0xfd,
|
||||
0x14, 0xae, 0xac, 0xf9, 0xd8, 0x0c, 0xf0, 0x8f, 0x59, 0xe6, 0x5f, 0xdb, 0x37, 0x3d, 0x0f, 0x3b,
|
||||
0xd1, 0x11, 0xb2, 0xcc, 0x15, 0x09, 0xf3, 0x1e, 0xcc, 0x8e, 0x7d, 0x72, 0x78, 0x14, 0xcb, 0x1d,
|
||||
0xfd, 0x6a, 0x7f, 0x56, 0xa0, 0x2f, 0xa3, 0x7d, 0x9e, 0x8c, 0xf0, 0x3e, 0xcc, 0x8b, 0x12, 0x16,
|
||||
0x52, 0xe3, 0x3c, 0x9b, 0x7a, 0xfb, 0x79, 0x82, 0x03, 0xba, 0x03, 0x97, 0x43, 0x24, 0x1f, 0xd3,
|
||||
0x89, 0x13, 0xc4, 0xb8, 0x55, 0x8e, 0x8b, 0xf8, 0x9a, 0xce, 0x97, 0xc4, 0x0e, 0xed, 0xa5, 0x02,
|
||||
0x57, 0x36, 0x70, 0x10, 0x1b, 0x91, 0x71, 0xc5, 0x6f, 0x68, 0x92, 0xfd, 0x52, 0x81, 0xbe, 0x4c,
|
||||
0xd6, 0xf3, 0xa8, 0xf5, 0x09, 0x2c, 0xc5, 0x3c, 0x0c, 0x0b, 0xd3, 0xa1, 0x6f, 0x8f, 0xb9, 0x33,
|
||||
0xf3, 0x94, 0xdb, 0x5a, 0x79, 0x7f, 0x39, 0xdf, 0x25, 0x2c, 0x67, 0x25, 0x58, 0x8c, 0x49, 0xac,
|
||||
0x27, 0x28, 0x68, 0xbf, 0x52, 0x60, 0x71, 0x03, 0x07, 0x3b, 0x78, 0xe4, 0x62, 0x2f, 0xd8, 0xf4,
|
||||
0xf6, 0xc8, 0xd9, 0xf5, 0x7a, 0x15, 0x80, 0x0a, 0x3a, 0x71, 0x39, 0x48, 0x40, 0xca, 0xe8, 0x98,
|
||||
0x77, 0x1f, 0x59, 0x79, 0xce, 0xa3, 0xbb, 0x6f, 0x43, 0xdd, 0xf6, 0xf6, 0x48, 0xa4, 0xaa, 0x77,
|
||||
0x65, 0xaa, 0x4a, 0x32, 0x0b, 0xb1, 0xb5, 0xbf, 0x56, 0x61, 0xe9, 0x9e, 0x65, 0xc9, 0xc2, 0xee,
|
||||
0xf4, 0x7a, 0x99, 0x46, 0x77, 0x25, 0x19, 0xdd, 0xa5, 0x7c, 0x2e, 0x17, 0x52, 0xb5, 0x53, 0x84,
|
||||
0x54, 0xbd, 0x28, 0xa4, 0xd0, 0x77, 0xe0, 0xad, 0x91, 0x43, 0x06, 0xa6, 0x63, 0x50, 0x6c, 0x3a,
|
||||
0xd8, 0x32, 0x62, 0x33, 0xf5, 0x1a, 0xdc, 0x6e, 0x8b, 0xe1, 0xf2, 0x0e, 0x5f, 0x8d, 0x14, 0xb4,
|
||||
0x8e, 0x36, 0x60, 0x9e, 0x62, 0xfc, 0xcc, 0x18, 0x13, 0xca, 0x7d, 0xa9, 0x37, 0xcb, 0xb5, 0xa0,
|
||||
0xa5, 0xb5, 0x10, 0x37, 0xa1, 0x0f, 0xe9, 0x68, 0x5b, 0x60, 0xea, 0x6d, 0xb6, 0x31, 0xfa, 0x43,
|
||||
0x8f, 0x61, 0x49, 0x2a, 0x00, 0xed, 0xcd, 0x95, 0x33, 0xd4, 0x65, 0x89, 0x80, 0x54, 0xfb, 0xa7,
|
||||
0x02, 0x57, 0x74, 0xec, 0x92, 0x2f, 0xf0, 0xff, 0xaa, 0xe9, 0xb4, 0x7f, 0x57, 0x60, 0xe9, 0x27,
|
||||
0x66, 0x30, 0xdc, 0x5f, 0x77, 0x05, 0x88, 0xbe, 0x9e, 0xf3, 0x95, 0x29, 0x6c, 0x71, 0xf8, 0xd5,
|
||||
0x65, 0x56, 0x65, 0xd7, 0x91, 0xe5, 0xcf, 0xc5, 0x91, 0x13, 0xe1, 0x97, 0xa8, 0xfc, 0x8d, 0x33,
|
||||
0x54, 0x7e, 0xb4, 0x06, 0xf3, 0xf8, 0x70, 0xe8, 0x4c, 0x2c, 0x6c, 0x84, 0xdc, 0x67, 0x39, 0xf7,
|
||||
0xab, 0x12, 0xee, 0x49, 0x97, 0x6a, 0x8b, 0x4d, 0x9b, 0x3c, 0x05, 0xbc, 0x52, 0xe0, 0x4a, 0xa8,
|
||||
0x67, 0xec, 0x04, 0xe6, 0xeb, 0x55, 0x75, 0xac, 0xc6, 0xda, 0x69, 0xd4, 0xa8, 0xfd, 0xbe, 0x06,
|
||||
0x5d, 0x71, 0x40, 0xd6, 0xef, 0xb1, 0x25, 0xf4, 0x36, 0x34, 0xa7, 0xb1, 0x1e, 0xb6, 0x0c, 0x53,
|
||||
0x00, 0xba, 0x06, 0xad, 0x84, 0xfd, 0x84, 0xa4, 0x49, 0x50, 0x29, 0x71, 0xa3, 0x02, 0x5b, 0x4b,
|
||||
0x14, 0xd8, 0x77, 0x00, 0xf6, 0x9c, 0x09, 0xdd, 0x37, 0x02, 0xdb, 0xc5, 0xa2, 0xcd, 0x69, 0x72,
|
||||
0xc8, 0xae, 0xed, 0x62, 0x74, 0x0f, 0xda, 0x03, 0xdb, 0x73, 0xc8, 0xc8, 0x18, 0x9b, 0xc1, 0x3e,
|
||||
0xe5, 0x59, 0x48, 0x6e, 0xb1, 0xfb, 0x36, 0x76, 0xac, 0x55, 0x8e, 0xab, 0xb7, 0xc2, 0x3d, 0xdb,
|
||||
0x6c, 0x0b, 0xba, 0x0a, 0x2d, 0x6f, 0xe2, 0x1a, 0x64, 0xcf, 0xf0, 0xc9, 0x01, 0xe5, 0x99, 0xa9,
|
||||
0xaa, 0x37, 0xbd, 0x89, 0xfb, 0x68, 0x4f, 0x27, 0x07, 0x14, 0x7d, 0x1f, 0x9a, 0xac, 0x28, 0x50,
|
||||
0x87, 0x8c, 0xa2, 0x2c, 0x73, 0x12, 0xfd, 0xe9, 0x06, 0xb6, 0xdb, 0x62, 0x8e, 0xc0, 0x77, 0x37,
|
||||
0xcb, 0xed, 0x8e, 0x37, 0xa0, 0x1b, 0xd0, 0x19, 0x12, 0x77, 0x6c, 0x72, 0x0d, 0xdd, 0xf7, 0x89,
|
||||
0xdb, 0x03, 0x1e, 0x2d, 0x19, 0x28, 0x7a, 0x0f, 0xda, 0xd8, 0x33, 0x07, 0x0e, 0x73, 0x5c, 0x0b,
|
||||
0x1f, 0xf6, 0x5a, 0xd7, 0x94, 0x9b, 0x73, 0x7a, 0x2b, 0x84, 0x6d, 0x32, 0x10, 0x7a, 0x04, 0x6a,
|
||||
0x78, 0x69, 0x67, 0x8a, 0x12, 0xfe, 0xdd, 0xe6, 0xf2, 0x5c, 0xcf, 0x66, 0x61, 0x0b, 0x1f, 0x2e,
|
||||
0xf3, 0x4d, 0xf7, 0x6d, 0x07, 0x33, 0x25, 0x71, 0xe7, 0xe8, 0xf0, 0x85, 0xe8, 0x97, 0x6a, 0x2f,
|
||||
0x2b, 0x70, 0x89, 0xb9, 0x47, 0x94, 0x44, 0xcf, 0xee, 0xe2, 0xef, 0x00, 0x58, 0x34, 0x30, 0x52,
|
||||
0x6e, 0xde, 0xb4, 0x68, 0xb0, 0x15, 0x7a, 0xfa, 0x27, 0x91, 0x17, 0x57, 0x8b, 0xdb, 0x96, 0x8c,
|
||||
0xbb, 0xe6, 0x13, 0xc2, 0x59, 0x2e, 0x64, 0x2c, 0x15, 0x53, 0x32, 0xf1, 0x87, 0xd8, 0x48, 0xb5,
|
||||
0xd9, 0xed, 0x10, 0xb8, 0x25, 0x0f, 0xc4, 0x86, 0xa4, 0x3d, 0xf9, 0x87, 0x02, 0x4b, 0xe2, 0x4e,
|
||||
0x71, 0x7e, 0x75, 0x15, 0x65, 0x84, 0x28, 0x7c, 0xaa, 0xc7, 0xf4, 0xa7, 0xb5, 0x12, 0x09, 0xb9,
|
||||
0x2e, 0x49, 0xc8, 0xe9, 0x1e, 0xad, 0x91, 0xed, 0xd1, 0xb4, 0x5f, 0x2b, 0xb0, 0xf4, 0xa9, 0xe9,
|
||||
0x59, 0x64, 0x6f, 0xef, 0xfc, 0x07, 0x5c, 0x83, 0x36, 0x9d, 0xe6, 0xd7, 0xd2, 0x3d, 0x58, 0x6a,
|
||||
0x93, 0xf6, 0x8b, 0x0a, 0x20, 0xe6, 0x0e, 0xab, 0xa6, 0x63, 0x7a, 0x43, 0x7c, 0x76, 0x69, 0xae,
|
||||
0x43, 0x27, 0xe5, 0x04, 0xf1, 0x0b, 0x50, 0xd2, 0x0b, 0x28, 0x7a, 0x00, 0x9d, 0x41, 0xc8, 0xca,
|
||||
0xf0, 0xb1, 0x49, 0x89, 0xc7, 0xed, 0xd0, 0x59, 0xf9, 0x40, 0x26, 0xf6, 0xae, 0x6f, 0x8f, 0x46,
|
||||
0xd8, 0x5f, 0x23, 0x9e, 0x15, 0x76, 0x39, 0xf3, 0x83, 0x48, 0x4c, 0xb6, 0x15, 0xbd, 0x0b, 0xad,
|
||||
0x69, 0x44, 0x44, 0x25, 0x12, 0xe2, 0x90, 0xa0, 0xe8, 0x23, 0x58, 0xc8, 0x76, 0x60, 0x91, 0xe1,
|
||||
0x54, 0x9a, 0x6e, 0xbe, 0xa8, 0xf6, 0x1b, 0x05, 0x50, 0x5c, 0xf5, 0x79, 0x6d, 0xe2, 0x29, 0xbd,
|
||||
0xcc, 0x45, 0xf0, 0x2a, 0x40, 0x54, 0x20, 0xc4, 0xc1, 0x9b, 0x7a, 0x02, 0xc2, 0xca, 0x82, 0x15,
|
||||
0x51, 0x16, 0x57, 0xb1, 0x29, 0x80, 0xc5, 0x4f, 0x78, 0x04, 0xc3, 0x21, 0xa6, 0x85, 0xad, 0xc8,
|
||||
0xfd, 0x42, 0xe0, 0x67, 0x1c, 0xa6, 0x7d, 0x59, 0x01, 0x35, 0xd9, 0x75, 0x95, 0x96, 0xed, 0x62,
|
||||
0xae, 0x8d, 0xc7, 0xb4, 0x98, 0xb5, 0x73, 0xb4, 0x98, 0xf9, 0x16, 0xb8, 0x7e, 0xb6, 0x16, 0x58,
|
||||
0xfb, 0x9d, 0x02, 0xdd, 0xcc, 0x2d, 0x2d, 0x5b, 0x7f, 0x95, 0x7c, 0xfd, 0xfd, 0x2e, 0xd4, 0x59,
|
||||
0x51, 0xc2, 0x5c, 0x49, 0x9d, 0x2c, 0x5b, 0xd9, 0xdd, 0x4f, 0x0f, 0x37, 0xa0, 0xdb, 0x70, 0x49,
|
||||
0xf2, 0x16, 0x27, 0xb2, 0x0c, 0xca, 0x3f, 0xc5, 0x69, 0x7f, 0xa9, 0x41, 0x2b, 0xa1, 0x8f, 0x13,
|
||||
0x5a, 0x87, 0xac, 0xa5, 0x2b, 0x12, 0x4b, 0x67, 0x8e, 0x57, 0xcd, 0x1f, 0xaf, 0xe0, 0xcd, 0x0a,
|
||||
0x5d, 0x81, 0x39, 0x17, 0xbb, 0x06, 0xb5, 0x5f, 0x44, 0xcd, 0xc3, 0xac, 0x8b, 0xdd, 0x1d, 0xfb,
|
||||
0x05, 0x66, 0x4b, 0xac, 0xee, 0xf3, 0xa2, 0x1f, 0xe6, 0xec, 0x59, 0x6f, 0xe2, 0xf2, 0x92, 0xff,
|
||||
0x0e, 0x40, 0x58, 0x2b, 0x3d, 0xd3, 0xc5, 0xbc, 0x23, 0x68, 0xea, 0x4d, 0x0e, 0xd9, 0x32, 0x5d,
|
||||
0x8c, 0x7a, 0x30, 0xcb, 0x7f, 0x36, 0xd7, 0x7b, 0x73, 0xe1, 0x46, 0xf1, 0x9b, 0x0e, 0x87, 0x66,
|
||||
0x36, 0x1c, 0xca, 0x56, 0xf3, 0x3b, 0x70, 0x69, 0xc8, 0x9f, 0x58, 0xac, 0xd5, 0xa3, 0xb5, 0x78,
|
||||
0x49, 0x14, 0x75, 0xd9, 0x12, 0xba, 0xcf, 0x9c, 0x8b, 0x6b, 0xd4, 0x08, 0xad, 0xdc, 0xe6, 0x56,
|
||||
0x7e, 0x4f, 0x7e, 0xd5, 0x0d, 0x31, 0x43, 0x23, 0x47, 0x49, 0x93, 0xff, 0xe5, 0xfa, 0x88, 0xf9,
|
||||
0x72, 0x7d, 0x44, 0xe7, 0x3c, 0x7d, 0xc4, 0xdf, 0xab, 0xd0, 0x99, 0x56, 0xe0, 0xd2, 0xd1, 0x5f,
|
||||
0xe6, 0x19, 0x79, 0x0b, 0xd4, 0xe9, 0x0b, 0x08, 0x57, 0xcc, 0xb1, 0x4d, 0x44, 0xf6, 0xed, 0xa3,
|
||||
0x3b, 0xce, 0x84, 0xd9, 0x03, 0x98, 0x17, 0xf9, 0xc3, 0x48, 0xf6, 0xd5, 0x37, 0x64, 0xc4, 0xf2,
|
||||
0x09, 0x57, 0x6f, 0x27, 0x7a, 0x6d, 0x8a, 0x3e, 0x81, 0x26, 0xcb, 0x8a, 0x46, 0x70, 0x34, 0x0e,
|
||||
0x7d, 0xb3, 0x93, 0x2d, 0x47, 0x21, 0x21, 0x96, 0x26, 0x77, 0x8f, 0xc6, 0x58, 0x9f, 0x73, 0xc4,
|
||||
0xd7, 0x79, 0xef, 0x39, 0x77, 0x61, 0xd1, 0x0f, 0x9b, 0x11, 0xcb, 0x48, 0xe9, 0x70, 0x96, 0xeb,
|
||||
0xf0, 0x72, 0xb4, 0xb8, 0x9d, 0xd4, 0x65, 0x41, 0x1a, 0x98, 0x2b, 0x4c, 0x03, 0xff, 0x51, 0x60,
|
||||
0x41, 0xb8, 0x1a, 0xd3, 0xc4, 0x88, 0xdf, 0x8f, 0x58, 0xd2, 0x26, 0x9e, 0x63, 0x7b, 0x71, 0x4b,
|
||||
0x25, 0x6c, 0x1b, 0x02, 0x45, 0x4b, 0xf5, 0x29, 0x74, 0x05, 0x52, 0x9c, 0x7b, 0x4b, 0xf6, 0x00,
|
||||
0x9d, 0x70, 0x5f, 0x9c, 0x75, 0xaf, 0x43, 0x87, 0xec, 0xed, 0x25, 0xf9, 0x85, 0xc9, 0x63, 0x5e,
|
||||
0x40, 0x05, 0xc3, 0x1f, 0x81, 0x1a, 0xa1, 0x9d, 0x36, 0xdb, 0x77, 0xc5, 0xc6, 0xf8, 0x2d, 0xe1,
|
||||
0x97, 0x0a, 0xf4, 0xd2, 0xb9, 0x3f, 0x71, 0xfc, 0xd3, 0xb7, 0x1f, 0xdf, 0x4b, 0xbf, 0x44, 0x5d,
|
||||
0x3f, 0x46, 0x9e, 0x29, 0x1f, 0xd1, 0xff, 0xde, 0x7a, 0x01, 0x9d, 0xb4, 0x53, 0xa3, 0x36, 0xcc,
|
||||
0x6d, 0x91, 0xe0, 0x87, 0x87, 0x36, 0x0d, 0xd4, 0x19, 0xd4, 0x01, 0xd8, 0x22, 0xc1, 0xb6, 0x8f,
|
||||
0x29, 0xf6, 0x02, 0x55, 0x41, 0x00, 0x8d, 0x47, 0xde, 0xba, 0x4d, 0x9f, 0xa9, 0x15, 0x74, 0x49,
|
||||
0x94, 0x19, 0xd3, 0xd9, 0x14, 0xc6, 0x55, 0xab, 0x6c, 0x7b, 0xfc, 0x57, 0x43, 0x2a, 0xb4, 0x63,
|
||||
0x94, 0x8d, 0xed, 0xc7, 0x6a, 0x1d, 0x35, 0xa1, 0x1e, 0x7e, 0x36, 0x6e, 0x59, 0xa0, 0x66, 0xdb,
|
||||
0x1c, 0x46, 0xf3, 0xb1, 0xf7, 0xc0, 0x23, 0x07, 0x31, 0x48, 0x9d, 0x41, 0x2d, 0x98, 0x15, 0xad,
|
||||
0xa3, 0xaa, 0xa0, 0x2e, 0xb4, 0x12, 0x5d, 0x9b, 0x5a, 0x61, 0x80, 0x0d, 0x7f, 0x3c, 0x14, 0xfd,
|
||||
0x5b, 0x28, 0x02, 0xb3, 0xda, 0x3a, 0x39, 0xf0, 0xd4, 0xda, 0xad, 0x7b, 0x30, 0x17, 0x05, 0x08,
|
||||
0x3b, 0x4d, 0x48, 0x9d, 0xfd, 0xa9, 0x33, 0x68, 0x01, 0xe6, 0x53, 0xf3, 0x0a, 0x55, 0x41, 0x08,
|
||||
0x3a, 0x4e, 0x6a, 0x48, 0xa4, 0x56, 0x56, 0xfe, 0xd6, 0x02, 0x08, 0x1b, 0x10, 0x42, 0x7c, 0x0b,
|
||||
0x8d, 0x01, 0x6d, 0xe0, 0x80, 0x25, 0x57, 0xe2, 0x45, 0x89, 0x91, 0xa2, 0x3b, 0x05, 0x75, 0x3a,
|
||||
0x8f, 0x2a, 0x24, 0xed, 0xdf, 0x28, 0xd8, 0x91, 0x41, 0xd7, 0x66, 0x90, 0xcb, 0x39, 0xb2, 0xfb,
|
||||
0xec, 0xae, 0x3d, 0x7c, 0x16, 0x77, 0x2e, 0xc5, 0x1c, 0x33, 0xa8, 0x11, 0xc7, 0x4c, 0x52, 0x13,
|
||||
0x3f, 0x3b, 0x81, 0x6f, 0x7b, 0xa3, 0xe8, 0x39, 0x54, 0x9b, 0x41, 0xcf, 0xe1, 0xf2, 0x06, 0xe6,
|
||||
0xdc, 0x6d, 0x1a, 0xd8, 0x43, 0x1a, 0x31, 0x5c, 0x29, 0x66, 0x98, 0x43, 0x3e, 0x25, 0x4b, 0x07,
|
||||
0xba, 0x99, 0xa1, 0x2c, 0xba, 0x25, 0x75, 0x64, 0xe9, 0x00, 0xb9, 0xff, 0x51, 0x29, 0xdc, 0x98,
|
||||
0x9b, 0x0d, 0x9d, 0xf4, 0xc0, 0x12, 0x7d, 0xbd, 0x88, 0x40, 0x6e, 0xc2, 0xd3, 0xbf, 0x55, 0x06,
|
||||
0x35, 0x66, 0xf5, 0x04, 0x3a, 0xe9, 0x91, 0x98, 0x9c, 0x95, 0x74, 0x6c, 0xd6, 0x3f, 0xee, 0x25,
|
||||
0x5a, 0x9b, 0x41, 0x3f, 0x83, 0x85, 0xdc, 0x1c, 0x0a, 0x7d, 0x43, 0x46, 0xbe, 0x68, 0x5c, 0x75,
|
||||
0x12, 0x07, 0x21, 0xfd, 0x54, 0x8b, 0xc5, 0xd2, 0xe7, 0x06, 0x92, 0xe5, 0xa5, 0x4f, 0x90, 0x3f,
|
||||
0x4e, 0xfa, 0x53, 0x73, 0x98, 0x00, 0xca, 0x4f, 0xa2, 0xd0, 0xc7, 0x32, 0x16, 0x85, 0xd3, 0xb0,
|
||||
0xfe, 0x72, 0x59, 0xf4, 0xd8, 0xe4, 0x13, 0x1e, 0xad, 0xd9, 0x0e, 0x5c, 0xca, 0xb6, 0x70, 0xfa,
|
||||
0x24, 0x67, 0x5b, 0x3c, 0x00, 0x0a, 0x9d, 0x3a, 0x3d, 0xe0, 0x90, 0xdb, 0x4a, 0x3a, 0x94, 0x91,
|
||||
0x3b, 0xb5, 0x7c, 0x5e, 0xa2, 0xcd, 0xa0, 0xdd, 0x54, 0x0e, 0x46, 0x37, 0x8a, 0x7c, 0x22, 0x7d,
|
||||
0xb5, 0x3e, 0xc9, 0x5c, 0x06, 0xc0, 0x06, 0x0e, 0x1e, 0xe2, 0xc0, 0xb7, 0x87, 0x34, 0x4b, 0x54,
|
||||
0xfc, 0x4c, 0x11, 0x22, 0xa2, 0x1f, 0x9e, 0x88, 0x17, 0x89, 0xbd, 0xf2, 0x12, 0xa0, 0xc9, 0x6d,
|
||||
0xc6, 0xca, 0xc3, 0xff, 0xd3, 0xf8, 0x05, 0xa4, 0xf1, 0xa7, 0xd0, 0xcd, 0x4c, 0xb7, 0xe4, 0x69,
|
||||
0x5c, 0x3e, 0x02, 0x3b, 0xc9, 0x41, 0x06, 0x80, 0xf2, 0x33, 0x18, 0x79, 0x60, 0x15, 0xce, 0x6a,
|
||||
0x4e, 0xe2, 0xf1, 0x14, 0xba, 0x99, 0x21, 0x88, 0xfc, 0x04, 0xf2, 0x49, 0x49, 0x89, 0x13, 0xe4,
|
||||
0x9f, 0xfe, 0xe5, 0x27, 0x28, 0x1c, 0x11, 0x9c, 0xc4, 0xe3, 0x73, 0x68, 0x27, 0x5f, 0x5d, 0xd1,
|
||||
0x87, 0x45, 0xd1, 0x99, 0x79, 0x87, 0x7b, 0xfd, 0xf9, 0xfa, 0xe2, 0xeb, 0xd9, 0x53, 0xe8, 0x66,
|
||||
0x5e, 0x59, 0xe5, 0xd6, 0x95, 0x3f, 0xc5, 0x9e, 0x44, 0xfd, 0x2b, 0xcc, 0xc0, 0x17, 0x9d, 0x2b,
|
||||
0x57, 0xbf, 0xf5, 0x64, 0x65, 0x64, 0x07, 0xfb, 0x93, 0x01, 0x3b, 0xe5, 0xed, 0x10, 0xf3, 0x63,
|
||||
0x9b, 0x88, 0xaf, 0xdb, 0x51, 0xd2, 0xb8, 0xcd, 0x29, 0xdd, 0xe6, 0xd2, 0x8e, 0x07, 0x83, 0x06,
|
||||
0xff, 0xbd, 0xfb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x8c, 0x66, 0xd7, 0x72, 0x28, 0x00,
|
||||
0xd5, 0x9a, 0x2f, 0x7b, 0xde, 0x8c, 0x67, 0xe4, 0xf6, 0xc7, 0x8e, 0x87, 0x24, 0xeb, 0xd5, 0xae,
|
||||
0xb3, 0xc1, 0xcb, 0xda, 0xc1, 0x01, 0x8a, 0x2d, 0xe0, 0x10, 0xdb, 0xc4, 0x6b, 0x76, 0xe3, 0x98,
|
||||
0xb1, 0xb3, 0x14, 0xa9, 0x54, 0x09, 0xcd, 0xa8, 0x3d, 0x56, 0x45, 0x52, 0x4f, 0xd4, 0x9a, 0xb5,
|
||||
0x1d, 0xae, 0x1c, 0x96, 0x03, 0xc5, 0x1f, 0xa0, 0xb8, 0x00, 0x05, 0xa9, 0x62, 0x8f, 0xdc, 0x73,
|
||||
0xe1, 0x6f, 0x50, 0x1c, 0xe0, 0x27, 0x70, 0xa7, 0xba, 0xd5, 0xd2, 0x48, 0x9a, 0x96, 0x2d, 0xdb,
|
||||
0xe5, 0x24, 0x45, 0x71, 0x93, 0x5e, 0xbf, 0xd7, 0xef, 0xf5, 0xfb, 0xee, 0x7e, 0x30, 0xf3, 0x7c,
|
||||
0x88, 0xbd, 0x53, 0xbd, 0x47, 0x88, 0x67, 0xae, 0x0e, 0x3c, 0xe2, 0x13, 0x84, 0x1c, 0xcb, 0xfe,
|
||||
0x72, 0x48, 0x83, 0xbf, 0x55, 0xbe, 0xde, 0xae, 0xf7, 0x88, 0xe3, 0x10, 0x37, 0x80, 0xb5, 0xeb,
|
||||
0x71, 0x8c, 0x76, 0xc3, 0x72, 0x7d, 0xec, 0xb9, 0x86, 0x1d, 0xae, 0xd2, 0xde, 0x11, 0x76, 0x0c,
|
||||
0xf1, 0xa7, 0x9a, 0x86, 0x6f, 0xc4, 0xf7, 0x6f, 0xcf, 0x58, 0xae, 0x89, 0x4f, 0xe2, 0x20, 0xed,
|
||||
0x57, 0x0a, 0x2c, 0xec, 0x1f, 0x91, 0xe3, 0x4d, 0x62, 0xdb, 0xb8, 0xe7, 0x5b, 0xc4, 0xa5, 0x1d,
|
||||
0xfc, 0x7c, 0x88, 0xa9, 0x8f, 0xee, 0x42, 0xa9, 0x6b, 0x50, 0xdc, 0x52, 0x96, 0x94, 0x3b, 0xb5,
|
||||
0xf5, 0x1b, 0xab, 0x09, 0xe1, 0x84, 0x54, 0x0f, 0x69, 0x7f, 0xc3, 0xa0, 0xb8, 0xc3, 0x31, 0x11,
|
||||
0x82, 0x92, 0xd9, 0xdd, 0xd9, 0x6a, 0x15, 0x96, 0x94, 0x3b, 0xc5, 0x0e, 0xff, 0x46, 0x1f, 0xc0,
|
||||
0x74, 0x2f, 0xda, 0x7b, 0x67, 0x8b, 0xb6, 0x8a, 0x4b, 0xc5, 0x3b, 0xc5, 0x4e, 0x12, 0xa8, 0xfd,
|
||||
0x59, 0x81, 0x77, 0xc6, 0xc4, 0xa0, 0x03, 0xe2, 0x52, 0x8c, 0xee, 0x41, 0x85, 0xfa, 0x86, 0x3f,
|
||||
0xa4, 0x42, 0x92, 0x6f, 0x48, 0x25, 0xd9, 0xe7, 0x28, 0x1d, 0x81, 0x3a, 0xce, 0xb6, 0x20, 0x61,
|
||||
0x8b, 0xbe, 0x0d, 0x73, 0x96, 0xfb, 0x10, 0x3b, 0xc4, 0x3b, 0xd5, 0x07, 0xd8, 0xeb, 0x61, 0xd7,
|
||||
0x37, 0xfa, 0x38, 0x94, 0x71, 0x36, 0x5c, 0xdb, 0x1b, 0x2d, 0x69, 0x7f, 0x52, 0x60, 0x9e, 0x49,
|
||||
0xba, 0x67, 0x78, 0xbe, 0x75, 0x0d, 0xfa, 0xd2, 0xa0, 0x1e, 0x97, 0xb1, 0x55, 0xe4, 0x6b, 0x09,
|
||||
0x18, 0xc3, 0x19, 0x84, 0xec, 0xd9, 0xd9, 0x4a, 0x5c, 0xdc, 0x04, 0x4c, 0xfb, 0xa3, 0x30, 0x6c,
|
||||
0x5c, 0xce, 0xab, 0x28, 0x34, 0xcd, 0xb3, 0x30, 0xce, 0xf3, 0x32, 0xea, 0x7c, 0xa5, 0xc0, 0xfc,
|
||||
0xe7, 0xc4, 0x30, 0x47, 0x86, 0x7f, 0xfd, 0xea, 0xfc, 0x11, 0x54, 0x82, 0xc0, 0x69, 0x95, 0x38,
|
||||
0xaf, 0xe5, 0x24, 0x2f, 0x11, 0x54, 0x23, 0x09, 0xf7, 0x39, 0xa0, 0x23, 0x88, 0xb4, 0xdf, 0x29,
|
||||
0xd0, 0xea, 0x60, 0x1b, 0x1b, 0x14, 0xbf, 0xc9, 0x53, 0x2c, 0x40, 0xc5, 0x25, 0x26, 0xde, 0xd9,
|
||||
0xe2, 0xa7, 0x28, 0x76, 0xc4, 0x9f, 0xf6, 0x2f, 0xa1, 0xe1, 0xb7, 0xdc, 0x61, 0x63, 0x56, 0x28,
|
||||
0x5f, 0xc6, 0x0a, 0xaf, 0x46, 0x56, 0x78, 0xdb, 0x4f, 0x3a, 0xb2, 0x54, 0x39, 0x61, 0xa9, 0x9f,
|
||||
0xc3, 0xe2, 0xa6, 0x87, 0x0d, 0x1f, 0xff, 0x94, 0x65, 0xfe, 0xcd, 0x23, 0xc3, 0x75, 0xb1, 0x1d,
|
||||
0x1e, 0x21, 0xcd, 0x5c, 0x91, 0x30, 0x6f, 0xc1, 0xe4, 0xc0, 0x23, 0x27, 0xa7, 0x91, 0xdc, 0xe1,
|
||||
0xaf, 0xf6, 0x17, 0x05, 0xda, 0xb2, 0xbd, 0xaf, 0x92, 0x11, 0xde, 0x87, 0x69, 0x51, 0xc2, 0x82,
|
||||
0xdd, 0x38, 0xcf, 0x6a, 0xa7, 0xfe, 0x3c, 0xc6, 0x01, 0xdd, 0x85, 0xb9, 0x00, 0xc9, 0xc3, 0x74,
|
||||
0x68, 0xfb, 0x11, 0x6e, 0x91, 0xe3, 0x22, 0xbe, 0xd6, 0xe1, 0x4b, 0x82, 0x42, 0x7b, 0xa9, 0xc0,
|
||||
0xe2, 0x36, 0xf6, 0x23, 0x23, 0x32, 0xae, 0xf8, 0x2d, 0x4d, 0xb2, 0x5f, 0x2b, 0xd0, 0x96, 0xc9,
|
||||
0x7a, 0x15, 0xb5, 0x3e, 0x81, 0x85, 0x88, 0x87, 0x6e, 0x62, 0xda, 0xf3, 0xac, 0x01, 0x77, 0x66,
|
||||
0x9e, 0x72, 0x6b, 0xeb, 0xef, 0xaf, 0x8e, 0x77, 0x09, 0xab, 0x69, 0x09, 0xe6, 0xa3, 0x2d, 0xb6,
|
||||
0x62, 0x3b, 0x68, 0xbf, 0x51, 0x60, 0x7e, 0x1b, 0xfb, 0xfb, 0xb8, 0xef, 0x60, 0xd7, 0xdf, 0x71,
|
||||
0x0f, 0xc9, 0xe5, 0xf5, 0x7a, 0x0b, 0x80, 0x8a, 0x7d, 0xa2, 0x72, 0x10, 0x83, 0xe4, 0xd1, 0x31,
|
||||
0xef, 0x3e, 0xd2, 0xf2, 0x5c, 0x45, 0x77, 0xdf, 0x85, 0xb2, 0xe5, 0x1e, 0x92, 0x50, 0x55, 0xef,
|
||||
0xca, 0x54, 0x15, 0x67, 0x16, 0x60, 0x6b, 0x7f, 0x2b, 0xc2, 0xc2, 0x7d, 0xd3, 0x94, 0x85, 0xdd,
|
||||
0xc5, 0xf5, 0x32, 0x8a, 0xee, 0x42, 0x3c, 0xba, 0x73, 0xf9, 0xdc, 0x58, 0x48, 0x95, 0x2e, 0x10,
|
||||
0x52, 0xe5, 0xac, 0x90, 0x42, 0xdf, 0x83, 0x77, 0xfa, 0x36, 0xe9, 0x1a, 0xb6, 0x4e, 0xb1, 0x61,
|
||||
0x63, 0x53, 0x8f, 0xcc, 0xd4, 0xaa, 0x70, 0xbb, 0xcd, 0x07, 0xcb, 0xfb, 0x7c, 0x35, 0x54, 0xd0,
|
||||
0x16, 0xda, 0x86, 0x69, 0x8a, 0xf1, 0x33, 0x7d, 0x40, 0x28, 0xf7, 0xa5, 0xd6, 0x24, 0xd7, 0x82,
|
||||
0x96, 0xd4, 0x42, 0xd4, 0x84, 0x3e, 0xa4, 0xfd, 0x3d, 0x81, 0xd9, 0xa9, 0x33, 0xc2, 0xf0, 0x0f,
|
||||
0x3d, 0x86, 0x05, 0xa9, 0x00, 0xb4, 0x35, 0x95, 0xcf, 0x50, 0x73, 0x12, 0x01, 0xa9, 0xf6, 0x4f,
|
||||
0x05, 0x16, 0x3b, 0xd8, 0x21, 0x5f, 0xe2, 0xff, 0x55, 0xd3, 0x69, 0xff, 0x2e, 0xc0, 0xc2, 0xcf,
|
||||
0x0c, 0xbf, 0x77, 0xb4, 0xe5, 0x08, 0x10, 0x7d, 0x33, 0xe7, 0xcb, 0x53, 0xd8, 0xa2, 0xf0, 0x2b,
|
||||
0xcb, 0xac, 0xca, 0xae, 0x23, 0xab, 0x5f, 0x88, 0x23, 0xc7, 0xc2, 0x2f, 0x56, 0xf9, 0x2b, 0x97,
|
||||
0xa8, 0xfc, 0x68, 0x13, 0xa6, 0xf1, 0x49, 0xcf, 0x1e, 0x9a, 0x58, 0x0f, 0xb8, 0x4f, 0x72, 0xee,
|
||||
0xb7, 0x24, 0xdc, 0xe3, 0x2e, 0x55, 0x17, 0x44, 0x3b, 0x3c, 0x05, 0xbc, 0x52, 0x60, 0x31, 0xd0,
|
||||
0x33, 0xb6, 0x7d, 0xe3, 0xcd, 0xaa, 0x3a, 0x52, 0x63, 0xe9, 0x22, 0x6a, 0xd4, 0xfe, 0x50, 0x82,
|
||||
0xa6, 0x38, 0x20, 0xeb, 0xf7, 0xd8, 0x12, 0xba, 0x01, 0xd5, 0x51, 0xac, 0x07, 0x2d, 0xc3, 0x08,
|
||||
0x80, 0x96, 0xa0, 0x16, 0xb3, 0x9f, 0x90, 0x34, 0x0e, 0xca, 0x25, 0x6e, 0x58, 0x60, 0x4b, 0xb1,
|
||||
0x02, 0x7b, 0x13, 0xe0, 0xd0, 0x1e, 0xd2, 0x23, 0xdd, 0xb7, 0x1c, 0x2c, 0xda, 0x9c, 0x2a, 0x87,
|
||||
0x1c, 0x58, 0x0e, 0x46, 0xf7, 0xa1, 0xde, 0xb5, 0x5c, 0x9b, 0xf4, 0xf5, 0x81, 0xe1, 0x1f, 0x51,
|
||||
0x9e, 0x85, 0xe4, 0x16, 0x7b, 0x60, 0x61, 0xdb, 0xdc, 0xe0, 0xb8, 0x9d, 0x5a, 0x40, 0xb3, 0xc7,
|
||||
0x48, 0xd0, 0x2d, 0xa8, 0xb9, 0x43, 0x47, 0x27, 0x87, 0xba, 0x47, 0x8e, 0x29, 0xcf, 0x4c, 0xc5,
|
||||
0x4e, 0xd5, 0x1d, 0x3a, 0x8f, 0x0e, 0x3b, 0xe4, 0x98, 0xa2, 0x1f, 0x42, 0x95, 0x15, 0x05, 0x6a,
|
||||
0x93, 0x7e, 0x98, 0x65, 0xce, 0xdb, 0x7f, 0x44, 0xc0, 0xa8, 0x4d, 0xe6, 0x08, 0x9c, 0xba, 0x9a,
|
||||
0x8f, 0x3a, 0x22, 0x40, 0xb7, 0xa1, 0xd1, 0x23, 0xce, 0xc0, 0xe0, 0x1a, 0x7a, 0xe0, 0x11, 0xa7,
|
||||
0x05, 0x3c, 0x5a, 0x52, 0x50, 0xf4, 0x1e, 0xd4, 0xb1, 0x6b, 0x74, 0x6d, 0xe6, 0xb8, 0x26, 0x3e,
|
||||
0x69, 0xd5, 0x96, 0x94, 0x3b, 0x53, 0x9d, 0x5a, 0x00, 0xdb, 0x61, 0x20, 0xf4, 0x08, 0xd4, 0xe0,
|
||||
0xd2, 0xce, 0x14, 0x25, 0xfc, 0xbb, 0xce, 0xe5, 0x59, 0x4e, 0x67, 0x61, 0x13, 0x9f, 0xac, 0x72,
|
||||
0xa2, 0x07, 0x96, 0x8d, 0x99, 0x92, 0xb8, 0x73, 0x34, 0xf8, 0x42, 0xf8, 0x4b, 0xb5, 0x97, 0x05,
|
||||
0x98, 0x65, 0xee, 0x11, 0x26, 0xd1, 0xcb, 0xbb, 0xf8, 0x4d, 0x00, 0x93, 0xfa, 0x7a, 0xc2, 0xcd,
|
||||
0xab, 0x26, 0xf5, 0x77, 0x03, 0x4f, 0xff, 0x24, 0xf4, 0xe2, 0x62, 0x76, 0xdb, 0x92, 0x72, 0xd7,
|
||||
0xf1, 0x84, 0x70, 0x99, 0x0b, 0x19, 0x4b, 0xc5, 0x94, 0x0c, 0xbd, 0x1e, 0xd6, 0x13, 0x6d, 0x76,
|
||||
0x3d, 0x00, 0xee, 0xca, 0x03, 0xb1, 0x22, 0x69, 0x4f, 0xfe, 0xa1, 0xc0, 0x82, 0xb8, 0x53, 0x5c,
|
||||
0x5d, 0x5d, 0x59, 0x19, 0x21, 0x0c, 0x9f, 0xe2, 0x19, 0xfd, 0x69, 0x29, 0x47, 0x42, 0x2e, 0x4b,
|
||||
0x12, 0x72, 0xb2, 0x47, 0xab, 0xa4, 0x7b, 0x34, 0xed, 0xb7, 0x0a, 0x2c, 0x7c, 0x6a, 0xb8, 0x26,
|
||||
0x39, 0x3c, 0xbc, 0xfa, 0x01, 0x37, 0xa1, 0x4e, 0x47, 0xf9, 0x35, 0x77, 0x0f, 0x96, 0x20, 0xd2,
|
||||
0xbe, 0x2a, 0x00, 0x62, 0xee, 0xb0, 0x61, 0xd8, 0x86, 0xdb, 0xc3, 0x97, 0x97, 0x66, 0x19, 0x1a,
|
||||
0x09, 0x27, 0x88, 0x5e, 0x80, 0xe2, 0x5e, 0x40, 0xd1, 0x67, 0xd0, 0xe8, 0x06, 0xac, 0x74, 0x0f,
|
||||
0x1b, 0x94, 0xb8, 0xdc, 0x0e, 0x8d, 0xf5, 0x0f, 0x64, 0x62, 0x1f, 0x78, 0x56, 0xbf, 0x8f, 0xbd,
|
||||
0x4d, 0xe2, 0x9a, 0x41, 0x97, 0x33, 0xdd, 0x0d, 0xc5, 0x64, 0xa4, 0xe8, 0x5d, 0xa8, 0x8d, 0x22,
|
||||
0x22, 0x2c, 0x91, 0x10, 0x85, 0x04, 0x45, 0x1f, 0xc1, 0x4c, 0xba, 0x03, 0x0b, 0x0d, 0xa7, 0xd2,
|
||||
0x64, 0xf3, 0x45, 0xb5, 0x5f, 0x02, 0x8a, 0x8a, 0x3e, 0x2f, 0x4d, 0x3c, 0xa3, 0xe7, 0xb9, 0x07,
|
||||
0xde, 0x80, 0xaa, 0x19, 0x52, 0x8a, 0x5b, 0xd9, 0x08, 0xc0, 0xc2, 0x23, 0x90, 0x50, 0xb7, 0x89,
|
||||
0x61, 0x62, 0x33, 0x4c, 0xea, 0x01, 0xf0, 0x73, 0x0e, 0xd3, 0xbe, 0x2e, 0x80, 0x1a, 0x6f, 0xaa,
|
||||
0x72, 0xf3, 0xbe, 0x9e, 0x5b, 0xe1, 0x19, 0x1d, 0x64, 0xe9, 0x0a, 0x1d, 0xe4, 0x78, 0x87, 0x5b,
|
||||
0xbe, 0x5c, 0x87, 0xab, 0xfd, 0x5e, 0x81, 0x66, 0xea, 0x12, 0x96, 0x2e, 0xaf, 0xca, 0x78, 0x79,
|
||||
0xfd, 0x3e, 0x94, 0x59, 0xcd, 0xc1, 0x5c, 0x49, 0x8d, 0x34, 0x5b, 0xd9, 0xd5, 0xae, 0x13, 0x10,
|
||||
0xa0, 0x35, 0x98, 0x95, 0x3c, 0xb5, 0x09, 0x53, 0xa2, 0xf1, 0x97, 0x36, 0xed, 0xaf, 0x25, 0xa8,
|
||||
0xc5, 0xf4, 0x71, 0x4e, 0x67, 0x90, 0xb6, 0x74, 0x41, 0x62, 0xe9, 0xd4, 0xf1, 0x8a, 0xe3, 0xc7,
|
||||
0xcb, 0x78, 0x92, 0x42, 0x8b, 0x30, 0xe5, 0x60, 0x47, 0xa7, 0xd6, 0x8b, 0xb0, 0x37, 0x98, 0x74,
|
||||
0xb0, 0xb3, 0x6f, 0xbd, 0xc0, 0x6c, 0x89, 0x95, 0x75, 0x5e, 0xd3, 0x83, 0x94, 0x3c, 0xe9, 0x0e,
|
||||
0x1d, 0x5e, 0xd1, 0x6f, 0x02, 0x04, 0xa5, 0xd0, 0x35, 0x1c, 0xcc, 0x0b, 0x7e, 0xb5, 0x53, 0xe5,
|
||||
0x90, 0x5d, 0xc3, 0xc1, 0xa8, 0x05, 0x93, 0xfc, 0x67, 0x67, 0xab, 0x35, 0x15, 0x10, 0x8a, 0xdf,
|
||||
0x64, 0x38, 0x54, 0xd3, 0xe1, 0x90, 0xb7, 0x58, 0xdf, 0x85, 0xd9, 0x1e, 0x7f, 0x41, 0x31, 0x37,
|
||||
0x4e, 0x37, 0xa3, 0x25, 0x51, 0xb3, 0x65, 0x4b, 0xe8, 0x01, 0x73, 0x2e, 0xae, 0x51, 0x3d, 0xb0,
|
||||
0x72, 0x9d, 0x5b, 0xf9, 0x3d, 0xf9, 0x4d, 0x36, 0xc0, 0x0c, 0x8c, 0x1c, 0xe6, 0x44, 0xfe, 0x37,
|
||||
0xd6, 0x26, 0x4c, 0xe7, 0x6b, 0x13, 0x1a, 0x57, 0x69, 0x13, 0xbe, 0x2a, 0x42, 0x63, 0x54, 0x60,
|
||||
0x73, 0x47, 0x7f, 0x9e, 0x57, 0xe2, 0x5d, 0x50, 0x47, 0x0f, 0x1c, 0x5c, 0x31, 0x67, 0xf6, 0x08,
|
||||
0xe9, 0xa7, 0x8d, 0xe6, 0x20, 0x15, 0x66, 0x9f, 0x40, 0x95, 0x25, 0x32, 0xdd, 0x3f, 0x1d, 0x60,
|
||||
0xee, 0x68, 0x8d, 0x74, 0x81, 0x08, 0x36, 0x62, 0x99, 0xed, 0xe0, 0x74, 0x80, 0x3b, 0x53, 0xb6,
|
||||
0xf8, 0xba, 0xe2, 0x9b, 0x23, 0xba, 0x07, 0xf3, 0x5e, 0xd0, 0x1e, 0x98, 0x7a, 0xe2, 0xd8, 0x41,
|
||||
0xa5, 0x9d, 0x0b, 0x17, 0xf7, 0xe2, 0xc7, 0xcf, 0x88, 0xdc, 0xc9, 0xcc, 0xc8, 0xfd, 0x8f, 0x02,
|
||||
0x33, 0xc2, 0x3b, 0x98, 0xcf, 0xf6, 0xf9, 0x8d, 0x85, 0xe5, 0x59, 0xe2, 0xda, 0x96, 0x1b, 0x35,
|
||||
0x39, 0xc2, 0x1c, 0x01, 0x50, 0x34, 0x39, 0x9f, 0x42, 0x53, 0x20, 0x45, 0xe9, 0x32, 0x67, 0x55,
|
||||
0x6e, 0x04, 0x74, 0x51, 0xa2, 0x5c, 0x86, 0x06, 0x39, 0x3c, 0x8c, 0xf3, 0x0b, 0xe2, 0x7d, 0x5a,
|
||||
0x40, 0x05, 0xc3, 0x9f, 0x80, 0x1a, 0xa2, 0x5d, 0x34, 0x41, 0x37, 0x05, 0x61, 0x74, 0xbb, 0xff,
|
||||
0xb5, 0x02, 0xad, 0x64, 0xba, 0x8e, 0x1d, 0xff, 0xe2, 0x0d, 0xc1, 0x0f, 0x92, 0x6f, 0x43, 0xcb,
|
||||
0x67, 0xc8, 0x33, 0xe2, 0x23, 0x3a, 0xd2, 0x95, 0x17, 0xd0, 0x48, 0xfa, 0x21, 0xaa, 0xc3, 0xd4,
|
||||
0x2e, 0xf1, 0x7f, 0x7c, 0x62, 0x51, 0x5f, 0x9d, 0x40, 0x0d, 0x80, 0x5d, 0xe2, 0xef, 0x79, 0x98,
|
||||
0x62, 0xd7, 0x57, 0x15, 0x04, 0x50, 0x79, 0xe4, 0x6e, 0x59, 0xf4, 0x99, 0x5a, 0x40, 0xb3, 0xa2,
|
||||
0x32, 0x18, 0xf6, 0x8e, 0x30, 0xae, 0x5a, 0x64, 0xe4, 0xd1, 0x5f, 0x09, 0xa9, 0x50, 0x8f, 0x50,
|
||||
0xb6, 0xf7, 0x1e, 0xab, 0x65, 0x54, 0x85, 0x72, 0xf0, 0x59, 0x59, 0x31, 0x41, 0x4d, 0x37, 0x1e,
|
||||
0x6c, 0xcf, 0xc7, 0xee, 0x67, 0x2e, 0x39, 0x8e, 0x40, 0xea, 0x04, 0xaa, 0xc1, 0xa4, 0x68, 0xe6,
|
||||
0x54, 0x05, 0x35, 0xa1, 0x16, 0xeb, 0xa3, 0xd4, 0x02, 0x03, 0x6c, 0x7b, 0x83, 0x9e, 0xe8, 0xa8,
|
||||
0x02, 0x11, 0x98, 0xd5, 0xb6, 0xc8, 0xb1, 0xab, 0x96, 0x56, 0xee, 0xc3, 0x54, 0x18, 0x20, 0xec,
|
||||
0x34, 0xc1, 0xee, 0xec, 0x4f, 0x9d, 0x40, 0x33, 0x30, 0x9d, 0x98, 0x20, 0xa8, 0x0a, 0x42, 0xd0,
|
||||
0xb0, 0x13, 0x63, 0x1b, 0xb5, 0xb0, 0xfe, 0xf7, 0x1a, 0x40, 0xd0, 0x33, 0x10, 0xe2, 0x99, 0x68,
|
||||
0x00, 0x68, 0x1b, 0xfb, 0x2c, 0x1f, 0x12, 0x37, 0xcc, 0x65, 0x14, 0xdd, 0xcd, 0x28, 0xad, 0xe3,
|
||||
0xa8, 0x42, 0xd2, 0xf6, 0xed, 0x0c, 0x8a, 0x14, 0xba, 0x36, 0x81, 0x1c, 0xce, 0x91, 0xdd, 0x30,
|
||||
0x0f, 0xac, 0xde, 0xb3, 0xa8, 0xd9, 0xc8, 0xe6, 0x98, 0x42, 0x0d, 0x39, 0xa6, 0xf2, 0x90, 0xf8,
|
||||
0xd9, 0xf7, 0x3d, 0xcb, 0xed, 0x87, 0x0f, 0x94, 0xda, 0x04, 0x7a, 0x0e, 0x73, 0xdb, 0x98, 0x73,
|
||||
0xb7, 0xa8, 0x6f, 0xf5, 0x68, 0xc8, 0x70, 0x3d, 0x9b, 0xe1, 0x18, 0xf2, 0x05, 0x59, 0xda, 0xd0,
|
||||
0x4c, 0x8d, 0x49, 0xd1, 0x8a, 0xd4, 0x91, 0xa5, 0x23, 0xdd, 0xf6, 0x47, 0xb9, 0x70, 0x23, 0x6e,
|
||||
0x16, 0x34, 0x92, 0x23, 0x44, 0xf4, 0xcd, 0xac, 0x0d, 0xc6, 0x66, 0x2e, 0xed, 0x95, 0x3c, 0xa8,
|
||||
0x11, 0xab, 0x27, 0xd0, 0x48, 0x0e, 0xa9, 0xe4, 0xac, 0xa4, 0x83, 0xac, 0xf6, 0x59, 0x6f, 0xc3,
|
||||
0xda, 0x04, 0xfa, 0x05, 0xcc, 0x8c, 0x4d, 0x86, 0xd0, 0xb7, 0x64, 0xdb, 0x67, 0x0d, 0x90, 0xce,
|
||||
0xe3, 0x20, 0xa4, 0x1f, 0x69, 0x31, 0x5b, 0xfa, 0xb1, 0x11, 0x61, 0x7e, 0xe9, 0x63, 0xdb, 0x9f,
|
||||
0x25, 0xfd, 0x85, 0x39, 0x0c, 0x01, 0x8d, 0xcf, 0x86, 0xd0, 0xc7, 0x32, 0x16, 0x99, 0xf3, 0xa9,
|
||||
0xf6, 0x6a, 0x5e, 0xf4, 0xc8, 0xe4, 0x43, 0x1e, 0xad, 0xe9, 0xa6, 0x59, 0xca, 0x36, 0x73, 0x1e,
|
||||
0x24, 0x67, 0x9b, 0x3d, 0x92, 0x09, 0x9c, 0x3a, 0x39, 0x72, 0x90, 0xdb, 0x4a, 0x3a, 0x26, 0x91,
|
||||
0x3b, 0xb5, 0x7c, 0x82, 0xa1, 0x4d, 0xa0, 0x83, 0x44, 0x0e, 0x46, 0xb7, 0xb3, 0x7c, 0x22, 0x79,
|
||||
0xd9, 0x3d, 0xcf, 0x5c, 0x3a, 0xc0, 0x36, 0xf6, 0x1f, 0x62, 0xdf, 0xb3, 0x7a, 0x34, 0xbd, 0xa9,
|
||||
0xf8, 0x19, 0x21, 0x84, 0x9b, 0x7e, 0x78, 0x2e, 0x5e, 0x28, 0xf6, 0xfa, 0x4b, 0x80, 0x2a, 0xb7,
|
||||
0x19, 0x2b, 0x0f, 0xff, 0x4f, 0xe3, 0xd7, 0x90, 0xc6, 0x9f, 0x42, 0x33, 0x35, 0x6f, 0x92, 0xa7,
|
||||
0x71, 0xf9, 0x50, 0xea, 0x3c, 0x07, 0xe9, 0x02, 0x1a, 0x9f, 0x8a, 0xc8, 0x03, 0x2b, 0x73, 0x7a,
|
||||
0x72, 0x1e, 0x8f, 0xa7, 0xd0, 0x4c, 0x8d, 0x25, 0xe4, 0x27, 0x90, 0xcf, 0x2e, 0x72, 0x9c, 0x60,
|
||||
0xfc, 0x31, 0x5e, 0x7e, 0x82, 0xcc, 0x47, 0xfb, 0xf3, 0x78, 0x7c, 0x01, 0xf5, 0xf8, 0x3b, 0x28,
|
||||
0xfa, 0x30, 0x2b, 0x3a, 0x53, 0x2f, 0x63, 0x6f, 0x3e, 0x5f, 0x5f, 0x7f, 0x3d, 0x7b, 0x0a, 0xcd,
|
||||
0xd4, 0xbb, 0xa7, 0xdc, 0xba, 0xf2, 0xc7, 0xd1, 0xf3, 0x76, 0x7f, 0x8d, 0x19, 0xf8, 0xba, 0x73,
|
||||
0xe5, 0xc6, 0x77, 0x9e, 0xac, 0xf7, 0x2d, 0xff, 0x68, 0xd8, 0x65, 0xa7, 0x5c, 0x0b, 0x30, 0x3f,
|
||||
0xb6, 0x88, 0xf8, 0x5a, 0x0b, 0x93, 0xc6, 0x1a, 0xdf, 0x69, 0x8d, 0x4b, 0x3b, 0xe8, 0x76, 0x2b,
|
||||
0xfc, 0xf7, 0xde, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x78, 0x64, 0xb3, 0xad, 0x04, 0x28, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"sort"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
)
|
||||
|
@ -31,51 +33,54 @@ func defaultChannelAllocatePolicy() ChannelAllocatePolicy {
|
|||
}
|
||||
|
||||
// ChannelAllocatePolicy helper function definition to allocate dmChannel to queryNode
|
||||
type ChannelAllocatePolicy func(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, wait bool, excludeNodeIDs []int64) error
|
||||
type ChannelAllocatePolicy func(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64) error
|
||||
|
||||
func shuffleChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, wait bool, excludeNodeIDs []int64) error {
|
||||
func shuffleChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64) error {
|
||||
if len(reqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
availableNodes, err := cluster.onlineNodes()
|
||||
if err != nil {
|
||||
log.Debug(err.Error())
|
||||
onlineNodeIDs := cluster.onlineNodeIDs()
|
||||
if len(onlineNodeIDs) == 0 {
|
||||
err := errors.New("no online QueryNode to allocate")
|
||||
log.Error("shuffleChannelsToQueryNode failed", zap.Error(err))
|
||||
if !wait {
|
||||
return err
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(shuffleWaitInterval)
|
||||
continue
|
||||
}
|
||||
for _, id := range excludeNodeIDs {
|
||||
delete(availableNodes, id)
|
||||
}
|
||||
|
||||
var availableNodeIDs []int64
|
||||
nodeID2NumChannels := make(map[int64]int)
|
||||
for nodeID := range availableNodes {
|
||||
numChannels, err := cluster.getNumDmChannels(nodeID)
|
||||
if err != nil {
|
||||
delete(availableNodes, nodeID)
|
||||
for _, nodeID := range onlineNodeIDs {
|
||||
// nodeID in excludeNodeIDs
|
||||
if nodeIncluded(nodeID, excludeNodeIDs) {
|
||||
continue
|
||||
}
|
||||
nodeID2NumChannels[nodeID] = numChannels
|
||||
watchedChannelInfos := metaCache.getDmChannelInfosByNodeID(nodeID)
|
||||
nodeID2NumChannels[nodeID] = len(watchedChannelInfos)
|
||||
availableNodeIDs = append(availableNodeIDs, nodeID)
|
||||
}
|
||||
|
||||
if len(availableNodes) > 0 {
|
||||
nodeIDSlice := make([]int64, 0, len(availableNodes))
|
||||
for nodeID := range availableNodes {
|
||||
nodeIDSlice = append(nodeIDSlice, nodeID)
|
||||
}
|
||||
|
||||
if len(availableNodeIDs) > 0 {
|
||||
log.Debug("shuffleChannelsToQueryNode: shuffle channel to available QueryNode", zap.Int64s("available nodeIDs", availableNodeIDs))
|
||||
for _, req := range reqs {
|
||||
sort.Slice(nodeIDSlice, func(i, j int) bool {
|
||||
return nodeID2NumChannels[nodeIDSlice[i]] < nodeID2NumChannels[nodeIDSlice[j]]
|
||||
sort.Slice(availableNodeIDs, func(i, j int) bool {
|
||||
return nodeID2NumChannels[availableNodeIDs[i]] < nodeID2NumChannels[availableNodeIDs[j]]
|
||||
})
|
||||
req.NodeID = nodeIDSlice[0]
|
||||
nodeID2NumChannels[nodeIDSlice[0]]++
|
||||
selectedNodeID := availableNodeIDs[0]
|
||||
req.NodeID = selectedNodeID
|
||||
nodeID2NumChannels[selectedNodeID]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !wait {
|
||||
return errors.New("no queryNode to allocate")
|
||||
err := errors.New("no available queryNode to allocate")
|
||||
log.Error("shuffleChannelsToQueryNode failed", zap.Int64s("online nodeIDs", onlineNodeIDs), zap.Int64s("exclude nodeIDs", excludeNodeIDs), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
time.Sleep(shuffleWaitInterval)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
|
|||
}
|
||||
reqs := []*querypb.WatchDmChannelsRequest{firstReq, secondReq}
|
||||
|
||||
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, false, nil)
|
||||
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, meta, false, nil)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
node, err := startQueryNodeServer(baseCtx)
|
||||
|
@ -78,7 +78,8 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
|
|||
nodeID := node.queryNodeID
|
||||
cluster.registerNode(baseCtx, nodeSession, nodeID, disConnect)
|
||||
waitQueryNodeOnline(cluster, nodeID)
|
||||
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, false, nil)
|
||||
|
||||
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, meta, false, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, nodeID, firstReq.NodeID)
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
|
@ -42,27 +41,22 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
queryNodeMetaPrefix = "queryCoord-queryNodeMeta"
|
||||
queryNodeInfoPrefix = "queryCoord-queryNodeInfo"
|
||||
)
|
||||
|
||||
// Cluster manages all query node connections and grpc requests
|
||||
type Cluster interface {
|
||||
reloadFromKV() error
|
||||
getComponentInfos(ctx context.Context) ([]*internalpb.ComponentInfo, error)
|
||||
getComponentInfos(ctx context.Context) []*internalpb.ComponentInfo
|
||||
|
||||
loadSegments(ctx context.Context, nodeID int64, in *querypb.LoadSegmentsRequest) error
|
||||
releaseSegments(ctx context.Context, nodeID int64, in *querypb.ReleaseSegmentsRequest) error
|
||||
getNumSegments(nodeID int64) (int, error)
|
||||
|
||||
watchDmChannels(ctx context.Context, nodeID int64, in *querypb.WatchDmChannelsRequest) error
|
||||
watchDeltaChannels(ctx context.Context, nodeID int64, in *querypb.WatchDeltaChannelsRequest) error
|
||||
//TODO:: removeDmChannel
|
||||
getNumDmChannels(nodeID int64) (int, error)
|
||||
|
||||
hasWatchedQueryChannel(ctx context.Context, nodeID int64, collectionID UniqueID) bool
|
||||
hasWatchedDeltaChannel(ctx context.Context, nodeID int64, collectionID UniqueID) bool
|
||||
getCollectionInfosByID(ctx context.Context, nodeID int64) []*querypb.CollectionInfo
|
||||
addQueryChannel(ctx context.Context, nodeID int64, in *querypb.AddQueryChannelRequest) error
|
||||
removeQueryChannel(ctx context.Context, nodeID int64, in *querypb.RemoveQueryChannelRequest) error
|
||||
releaseCollection(ctx context.Context, nodeID int64, in *querypb.ReleaseCollectionRequest) error
|
||||
|
@ -75,9 +69,9 @@ type Cluster interface {
|
|||
getNodeInfoByID(nodeID int64) (Node, error)
|
||||
removeNodeInfo(nodeID int64) error
|
||||
stopNode(nodeID int64)
|
||||
onlineNodes() (map[int64]Node, error)
|
||||
onlineNodeIDs() []int64
|
||||
isOnline(nodeID int64) (bool, error)
|
||||
offlineNodes() (map[int64]Node, error)
|
||||
offlineNodeIDs() []int64
|
||||
hasNode(nodeID int64) bool
|
||||
|
||||
allocateSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error
|
||||
|
@ -204,27 +198,6 @@ func (c *queryNodeCluster) reloadFromKV() error {
|
|||
}
|
||||
}
|
||||
|
||||
// load collection meta of queryNode from etcd
|
||||
for _, nodeID := range toLoadMetaNodeIDs {
|
||||
infoPrefix := fmt.Sprintf("%s/%d", queryNodeMetaPrefix, nodeID)
|
||||
_, collectionValues, err := c.client.LoadWithPrefix(infoPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, value := range collectionValues {
|
||||
collectionInfo := &querypb.CollectionInfo{}
|
||||
err = proto.Unmarshal([]byte(value), collectionInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.nodes[nodeID].setCollectionInfo(collectionInfo)
|
||||
if err != nil {
|
||||
log.Warn("reloadFromKV: failed to add queryNode meta to cluster", zap.Int64("nodeID", nodeID), zap.String("error info", err.Error()))
|
||||
return err
|
||||
}
|
||||
log.Debug("reloadFromKV: reload collection info from etcd", zap.Any("info", collectionInfo))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -232,21 +205,16 @@ func (c *queryNodeCluster) getSessionVersion() int64 {
|
|||
return c.sessionVersion
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getComponentInfos(ctx context.Context) ([]*internalpb.ComponentInfo, error) {
|
||||
func (c *queryNodeCluster) getComponentInfos(ctx context.Context) []*internalpb.ComponentInfo {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
subComponentInfos := make([]*internalpb.ComponentInfo, 0)
|
||||
nodes, err := c.getOnlineNodes()
|
||||
if err != nil {
|
||||
log.Debug("getComponentInfos: failed get on service nodes", zap.String("error info", err.Error()))
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
var subComponentInfos []*internalpb.ComponentInfo
|
||||
for _, node := range c.nodes {
|
||||
componentState := node.getComponentInfo(ctx)
|
||||
subComponentInfos = append(subComponentInfos, componentState)
|
||||
}
|
||||
|
||||
return subComponentInfos, nil
|
||||
return subComponentInfos
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) loadSegments(ctx context.Context, nodeID int64, in *querypb.LoadSegmentsRequest) error {
|
||||
|
@ -308,15 +276,18 @@ func (c *queryNodeCluster) watchDmChannels(ctx context.Context, nodeID int64, in
|
|||
log.Debug("watchDmChannels: queryNode watch dm channel error", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
channels := make([]string, 0)
|
||||
for _, info := range in.Infos {
|
||||
channels = append(channels, info.ChannelName)
|
||||
dmChannelWatchInfo := make([]*querypb.DmChannelWatchInfo, len(in.Infos))
|
||||
for index, info := range in.Infos {
|
||||
dmChannelWatchInfo[index] = &querypb.DmChannelWatchInfo{
|
||||
CollectionID: info.CollectionID,
|
||||
DmChannel: info.ChannelName,
|
||||
NodeIDLoaded: nodeID,
|
||||
}
|
||||
}
|
||||
|
||||
collectionID := in.CollectionID
|
||||
err = c.clusterMeta.addDmChannel(collectionID, nodeID, channels)
|
||||
err = c.clusterMeta.setDmChannelInfos(dmChannelWatchInfo)
|
||||
if err != nil {
|
||||
log.Debug("watchDmChannels: queryNode watch dm channel error", zap.String("error", err.Error()))
|
||||
log.Debug("watchDmChannels: update dmChannelWatchInfos to meta failed", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -339,11 +310,6 @@ func (c *queryNodeCluster) watchDeltaChannels(ctx context.Context, nodeID int64,
|
|||
log.Debug("watchDeltaChannels: queryNode watch delta channel error", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
err = c.clusterMeta.setDeltaChannel(in.CollectionID, in.Infos)
|
||||
if err != nil {
|
||||
log.Debug("watchDeltaChannels: queryNode watch delta channel error", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -419,11 +385,7 @@ func (c *queryNodeCluster) releaseCollection(ctx context.Context, nodeID int64,
|
|||
log.Debug("releaseCollection: queryNode release collection error", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
err = c.clusterMeta.releaseCollection(in.CollectionID)
|
||||
if err != nil {
|
||||
log.Debug("releaseCollection: meta release collection error", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -445,13 +407,6 @@ func (c *queryNodeCluster) releasePartitions(ctx context.Context, nodeID int64,
|
|||
return err
|
||||
}
|
||||
|
||||
for _, partitionID := range in.PartitionIDs {
|
||||
err = c.clusterMeta.releasePartition(in.CollectionID, partitionID)
|
||||
if err != nil {
|
||||
log.Debug("releasePartitions: meta release partitions error", zap.String("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -579,49 +534,6 @@ func (c *queryNodeCluster) getMetrics(ctx context.Context, in *milvuspb.GetMetri
|
|||
return ret
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getNumDmChannels(nodeID int64) (int, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
if _, ok := c.nodes[nodeID]; !ok {
|
||||
return 0, fmt.Errorf("getNumDmChannels: can't find query node by nodeID, nodeID = %d", nodeID)
|
||||
}
|
||||
|
||||
numChannel := 0
|
||||
collectionInfos := c.clusterMeta.showCollections()
|
||||
for _, info := range collectionInfos {
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
numChannel++
|
||||
}
|
||||
}
|
||||
}
|
||||
return numChannel, nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getNumSegments(nodeID int64) (int, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
if _, ok := c.nodes[nodeID]; !ok {
|
||||
return 0, fmt.Errorf("getNumSegments: can't find query node by nodeID, nodeID = %d", nodeID)
|
||||
}
|
||||
|
||||
numSegment := 0
|
||||
segmentInfos := make([]*querypb.SegmentInfo, 0)
|
||||
collectionInfos := c.clusterMeta.showCollections()
|
||||
for _, info := range collectionInfos {
|
||||
res := c.clusterMeta.showSegmentInfos(info.CollectionID, nil)
|
||||
segmentInfos = append(segmentInfos, res...)
|
||||
}
|
||||
for _, info := range segmentInfos {
|
||||
if info.NodeID == nodeID {
|
||||
numSegment++
|
||||
}
|
||||
}
|
||||
return numSegment, nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) registerNode(ctx context.Context, session *sessionutil.Session, id UniqueID, state nodeState) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
@ -678,14 +590,8 @@ func (c *queryNodeCluster) removeNodeInfo(nodeID int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if _, ok := c.nodes[nodeID]; ok {
|
||||
err = c.nodes[nodeID].clearNodeInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(c.nodes, nodeID)
|
||||
log.Debug("removeNodeInfo: delete nodeInfo in cluster MetaReplica and etcd", zap.Int64("nodeID", nodeID))
|
||||
}
|
||||
delete(c.nodes, nodeID)
|
||||
log.Debug("removeNodeInfo: delete nodeInfo in cluster MetaReplica", zap.Int64("nodeID", nodeID))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -700,32 +606,32 @@ func (c *queryNodeCluster) stopNode(nodeID int64) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) onlineNodes() (map[int64]Node, error) {
|
||||
func (c *queryNodeCluster) onlineNodeIDs() []int64 {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.getOnlineNodes()
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getOnlineNodes() (map[int64]Node, error) {
|
||||
nodes := make(map[int64]Node)
|
||||
var onlineNodeIDs []int64
|
||||
for nodeID, node := range c.nodes {
|
||||
if node.isOnline() {
|
||||
nodes[nodeID] = node
|
||||
onlineNodeIDs = append(onlineNodeIDs, nodeID)
|
||||
}
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.New("getOnlineNodes: no queryNode is alive")
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
return onlineNodeIDs
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) offlineNodes() (map[int64]Node, error) {
|
||||
func (c *queryNodeCluster) offlineNodeIDs() []int64 {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.getOfflineNodes()
|
||||
var offlineNodeIDs []int64
|
||||
for nodeID, node := range c.nodes {
|
||||
if node.isOffline() {
|
||||
offlineNodeIDs = append(offlineNodeIDs, nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
return offlineNodeIDs
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) hasNode(nodeID int64) bool {
|
||||
|
@ -739,20 +645,6 @@ func (c *queryNodeCluster) hasNode(nodeID int64) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getOfflineNodes() (map[int64]Node, error) {
|
||||
nodes := make(map[int64]Node)
|
||||
for nodeID, node := range c.nodes {
|
||||
if node.isOffline() {
|
||||
nodes[nodeID] = node
|
||||
}
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.New("getOfflineNodes: no queryNode is offline")
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) isOnline(nodeID int64) (bool, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
@ -783,22 +675,12 @@ func (c *queryNodeCluster) isOnline(nodeID int64) (bool, error) {
|
|||
// }
|
||||
//}
|
||||
|
||||
func (c *queryNodeCluster) getCollectionInfosByID(ctx context.Context, nodeID int64) []*querypb.CollectionInfo {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
return node.showCollections()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) allocateSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
|
||||
return c.segmentAllocator(ctx, reqs, c, wait, excludeNodeIDs, includeNodeIDs)
|
||||
return c.segmentAllocator(ctx, reqs, c, c.clusterMeta, wait, excludeNodeIDs, includeNodeIDs)
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) allocateChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, wait bool, excludeNodeIDs []int64) error {
|
||||
return c.channelAllocator(ctx, reqs, c, wait, excludeNodeIDs)
|
||||
return c.channelAllocator(ctx, reqs, c, c.clusterMeta, wait, excludeNodeIDs)
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) estimateSegmentsSize(segments *querypb.LoadSegmentsRequest) (int64, error) {
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -445,22 +444,11 @@ func TestReloadClusterFromKV(t *testing.T) {
|
|||
sessionKey := fmt.Sprintf("%s/%d", queryNodeInfoPrefix, 100)
|
||||
kvs[sessionKey] = string(sessionBlob)
|
||||
|
||||
collectionInfo := &querypb.CollectionInfo{
|
||||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
collectionBlobs, err := proto.Marshal(collectionInfo)
|
||||
assert.Nil(t, err)
|
||||
nodeKey := fmt.Sprintf("%s/%d", queryNodeMetaPrefix, 100)
|
||||
kvs[nodeKey] = string(collectionBlobs)
|
||||
|
||||
err = kv.MultiSave(kvs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
cluster.reloadFromKV()
|
||||
|
||||
assert.Equal(t, 1, len(cluster.nodes))
|
||||
collection := cluster.getCollectionInfosByID(context.Background(), 100)
|
||||
assert.Equal(t, defaultCollectionID, collection[0].CollectionID)
|
||||
|
||||
err = removeAllSession()
|
||||
assert.Nil(t, err)
|
||||
|
@ -512,8 +500,8 @@ func TestGrpcRequest(t *testing.T) {
|
|||
waitQueryNodeOnline(cluster, nodeID)
|
||||
|
||||
t.Run("Test GetComponentInfos", func(t *testing.T) {
|
||||
_, err := cluster.getComponentInfos(baseCtx)
|
||||
assert.Nil(t, err)
|
||||
infos := cluster.getComponentInfos(baseCtx)
|
||||
assert.Equal(t, 1, len(infos))
|
||||
})
|
||||
|
||||
t.Run("Test LoadSegments", func(t *testing.T) {
|
||||
|
@ -523,9 +511,10 @@ func TestGrpcRequest(t *testing.T) {
|
|||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
loadSegmentReq := &querypb.LoadSegmentsRequest{
|
||||
DstNodeID: nodeID,
|
||||
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
|
||||
Schema: genCollectionSchema(defaultCollectionID, false),
|
||||
DstNodeID: nodeID,
|
||||
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
|
||||
Schema: genCollectionSchema(defaultCollectionID, false),
|
||||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
err := cluster.loadSegments(baseCtx, nodeID, loadSegmentReq)
|
||||
assert.Nil(t, err)
|
||||
|
@ -678,8 +667,9 @@ func TestEstimateSegmentSize(t *testing.T) {
|
|||
}
|
||||
|
||||
loadReq := &querypb.LoadSegmentsRequest{
|
||||
Schema: schema,
|
||||
Infos: []*querypb.SegmentLoadInfo{loadInfo},
|
||||
Schema: schema,
|
||||
Infos: []*querypb.SegmentLoadInfo{loadInfo},
|
||||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
|
||||
size, err := estimateSegmentsSize(loadReq, dataKV)
|
||||
|
|
|
@ -641,6 +641,7 @@ func (qc *QueryCoord) ReleasePartitions(ctx context.Context, req *querypb.Releas
|
|||
baseTask: baseTask,
|
||||
ReleasePartitionsRequest: req,
|
||||
cluster: qc.cluster,
|
||||
meta: qc.meta,
|
||||
}
|
||||
}
|
||||
err := qc.scheduler.Enqueue(releaseTask)
|
||||
|
|
|
@ -60,8 +60,7 @@ func TestReloadFromKV(t *testing.T) {
|
|||
err = kv.Save(key, string(value))
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta.addCollection(defaultCollectionID, genCollectionSchema(defaultCollectionID, false))
|
||||
meta.setLoadType(defaultCollectionID, querypb.LoadType_LoadPartition)
|
||||
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadPartition, genCollectionSchema(defaultCollectionID, false))
|
||||
|
||||
t.Run("Test_PartitionNotExist", func(t *testing.T) {
|
||||
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil, nil, nil)
|
||||
|
@ -129,8 +128,7 @@ func TestCheckIndexLoop(t *testing.T) {
|
|||
childCancel()
|
||||
indexChecker.wg.Wait()
|
||||
})
|
||||
meta.addCollection(defaultCollectionID, genCollectionSchema(defaultCollectionID, false))
|
||||
meta.setLoadType(defaultCollectionID, querypb.LoadType_loadCollection)
|
||||
meta.addCollection(defaultCollectionID, querypb.LoadType_loadCollection, genCollectionSchema(defaultCollectionID, false))
|
||||
t.Run("Test_GetIndexInfo", func(t *testing.T) {
|
||||
childCtx, childCancel := context.WithCancel(context.Background())
|
||||
indexChecker, err := newIndexChecker(childCtx, kv, meta, nil, nil, rootCoord, indexCoord, nil)
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
|
||||
const (
|
||||
collectionMetaPrefix = "queryCoord-collectionMeta"
|
||||
dmChannelMetaPrefix = "queryCoord-dmChannelWatchInfo"
|
||||
queryChannelMetaPrefix = "queryCoord-queryChannel"
|
||||
deltaChannelMetaPrefix = "queryCoord-deltaChannel"
|
||||
globalQuerySeekPositionPrefix = "queryCoord-globalQuerySeekPosition"
|
||||
|
@ -58,26 +59,23 @@ type Meta interface {
|
|||
showCollections() []*querypb.CollectionInfo
|
||||
hasCollection(collectionID UniqueID) bool
|
||||
getCollectionInfoByID(collectionID UniqueID) (*querypb.CollectionInfo, error)
|
||||
addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error
|
||||
addCollection(collectionID UniqueID, loadType querypb.LoadType, schema *schemapb.CollectionSchema) error
|
||||
releaseCollection(collectionID UniqueID) error
|
||||
|
||||
addPartition(collectionID UniqueID, partitionID UniqueID) error
|
||||
addPartitions(collectionID UniqueID, partitionIDs []UniqueID) error
|
||||
showPartitions(collectionID UniqueID) ([]*querypb.PartitionStates, error)
|
||||
hasPartition(collectionID UniqueID, partitionID UniqueID) bool
|
||||
hasReleasePartition(collectionID UniqueID, partitionID UniqueID) bool
|
||||
releasePartition(collectionID UniqueID, partitionID UniqueID) error
|
||||
releasePartitions(collectionID UniqueID, partitionIDs []UniqueID) error
|
||||
|
||||
deleteSegmentInfoByNodeID(nodeID UniqueID) error
|
||||
setSegmentInfos(segmentInfos map[UniqueID]*querypb.SegmentInfo) error
|
||||
showSegmentInfos(collectionID UniqueID, partitionIDs []UniqueID) []*querypb.SegmentInfo
|
||||
getSegmentInfoByID(segmentID UniqueID) (*querypb.SegmentInfo, error)
|
||||
getSegmentInfosByNode(nodeID int64) []*querypb.SegmentInfo
|
||||
|
||||
getPartitionStatesByID(collectionID UniqueID, partitionID UniqueID) (*querypb.PartitionStates, error)
|
||||
|
||||
getDmChannelsByNodeID(collectionID UniqueID, nodeID int64) ([]string, error)
|
||||
addDmChannel(collectionID UniqueID, nodeID int64, channels []string) error
|
||||
removeDmChannel(collectionID UniqueID, nodeID int64, channels []string) error
|
||||
getDmChannelInfosByNodeID(nodeID int64) []*querypb.DmChannelWatchInfo
|
||||
setDmChannelInfos(channelInfos []*querypb.DmChannelWatchInfo) error
|
||||
|
||||
getDeltaChannelsByCollectionID(collectionID UniqueID) ([]*datapb.VchannelInfo, error)
|
||||
setDeltaChannel(collectionID UniqueID, info []*datapb.VchannelInfo) error
|
||||
|
@ -86,7 +84,6 @@ type Meta interface {
|
|||
getQueryStreamByID(collectionID UniqueID) (msgstream.MsgStream, error)
|
||||
|
||||
setLoadType(collectionID UniqueID, loadType querypb.LoadType) error
|
||||
getLoadType(collectionID UniqueID) (querypb.LoadType, error)
|
||||
setLoadPercentage(collectionID UniqueID, partitionID UniqueID, percentage int64, loadType querypb.LoadType) error
|
||||
//printMeta()
|
||||
saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2SealedSegmentChangeInfos, error)
|
||||
|
@ -111,6 +108,8 @@ type MetaReplica struct {
|
|||
channelMu sync.RWMutex
|
||||
deltaChannelInfos map[UniqueID][]*datapb.VchannelInfo
|
||||
deltaChannelMu sync.RWMutex
|
||||
dmChannelInfos map[string]*querypb.DmChannelWatchInfo
|
||||
dmChannelMu sync.RWMutex
|
||||
queryStreams map[UniqueID]msgstream.MsgStream
|
||||
streamMu sync.RWMutex
|
||||
|
||||
|
@ -124,6 +123,7 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory msgstream.Factory, idAll
|
|||
segmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
|
||||
queryChannelInfos := make(map[UniqueID]*querypb.QueryChannelInfo)
|
||||
deltaChannelInfos := make(map[UniqueID][]*datapb.VchannelInfo)
|
||||
dmChannelInfos := make(map[string]*querypb.DmChannelWatchInfo)
|
||||
queryMsgStream := make(map[UniqueID]msgstream.MsgStream)
|
||||
position := &internalpb.MsgPosition{}
|
||||
|
||||
|
@ -138,6 +138,7 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory msgstream.Factory, idAll
|
|||
segmentInfos: segmentInfos,
|
||||
queryChannelInfos: queryChannelInfos,
|
||||
deltaChannelInfos: deltaChannelInfos,
|
||||
dmChannelInfos: dmChannelInfos,
|
||||
queryStreams: queryMsgStream,
|
||||
globalSeekPosition: position,
|
||||
}
|
||||
|
@ -221,6 +222,20 @@ func (m *MetaReplica) reloadFromKV() error {
|
|||
m.deltaChannelInfos[collectionID] = append(m.deltaChannelInfos[collectionID], deltaChannelInfo)
|
||||
}
|
||||
|
||||
dmChannelKeys, dmChannelValues, err := m.client.LoadWithPrefix(dmChannelMetaPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for index := range dmChannelKeys {
|
||||
dmChannel := filepath.Base(dmChannelKeys[index])
|
||||
dmChannelWatchInfo := &querypb.DmChannelWatchInfo{}
|
||||
err = proto.Unmarshal([]byte(dmChannelValues[index]), dmChannelWatchInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.dmChannelInfos[dmChannel] = dmChannelWatchInfo
|
||||
}
|
||||
|
||||
globalSeekPosValue, err := m.client.Load(globalQuerySeekPositionPrefix)
|
||||
if err == nil {
|
||||
position := &internalpb.MsgPosition{}
|
||||
|
@ -308,17 +323,16 @@ func (m *MetaReplica) hasReleasePartition(collectionID UniqueID, partitionID Uni
|
|||
return false
|
||||
}
|
||||
|
||||
func (m *MetaReplica) addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error {
|
||||
func (m *MetaReplica) addCollection(collectionID UniqueID, loadType querypb.LoadType, schema *schemapb.CollectionSchema) error {
|
||||
hasCollection := m.hasCollection(collectionID)
|
||||
if !hasCollection {
|
||||
partitions := make([]UniqueID, 0)
|
||||
partitionStates := make([]*querypb.PartitionStates, 0)
|
||||
channels := make([]*querypb.DmChannelWatchInfo, 0)
|
||||
var partitionIDs []UniqueID
|
||||
var partitionStates []*querypb.PartitionStates
|
||||
newCollection := &querypb.CollectionInfo{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitions,
|
||||
PartitionIDs: partitionIDs,
|
||||
PartitionStates: partitionStates,
|
||||
ChannelInfos: channels,
|
||||
LoadType: loadType,
|
||||
Schema: schema,
|
||||
}
|
||||
err := saveGlobalCollectionInfo(collectionID, newCollection, m.client)
|
||||
|
@ -334,79 +348,122 @@ func (m *MetaReplica) addCollection(collectionID UniqueID, schema *schemapb.Coll
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *MetaReplica) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
func (m *MetaReplica) addPartitions(collectionID UniqueID, partitionIDs []UniqueID) error {
|
||||
m.collectionMu.Lock()
|
||||
defer m.collectionMu.Unlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
col := proto.Clone(info).(*querypb.CollectionInfo)
|
||||
log.Debug("add a partition to MetaReplica...", zap.Int64s("partitionIDs", col.PartitionIDs))
|
||||
for _, id := range col.PartitionIDs {
|
||||
if id == partitionID {
|
||||
return nil
|
||||
collectionInfo := proto.Clone(info).(*querypb.CollectionInfo)
|
||||
loadedPartitionID2State := make(map[UniqueID]*querypb.PartitionStates)
|
||||
for _, partitionID := range partitionIDs {
|
||||
loadedPartitionID2State[partitionID] = &querypb.PartitionStates{
|
||||
PartitionID: partitionID,
|
||||
State: querypb.PartitionState_NotPresent,
|
||||
}
|
||||
}
|
||||
col.PartitionIDs = append(col.PartitionIDs, partitionID)
|
||||
releasedPartitionIDs := make([]UniqueID, 0)
|
||||
for _, id := range col.ReleasedPartitionIDs {
|
||||
if id != partitionID {
|
||||
releasedPartitionIDs = append(releasedPartitionIDs, id)
|
||||
}
|
||||
}
|
||||
col.ReleasedPartitionIDs = releasedPartitionIDs
|
||||
col.PartitionStates = append(col.PartitionStates, &querypb.PartitionStates{
|
||||
PartitionID: partitionID,
|
||||
State: querypb.PartitionState_NotPresent,
|
||||
})
|
||||
|
||||
log.Debug("add a partition to MetaReplica", zap.Int64s("partitionIDs", col.PartitionIDs))
|
||||
err := saveGlobalCollectionInfo(collectionID, col, m.client)
|
||||
for offset, partitionID := range collectionInfo.PartitionIDs {
|
||||
loadedPartitionID2State[partitionID] = collectionInfo.PartitionStates[offset]
|
||||
}
|
||||
|
||||
newPartitionIDs := make([]UniqueID, 0)
|
||||
newPartitionStates := make([]*querypb.PartitionStates, 0)
|
||||
for partitionID, state := range loadedPartitionID2State {
|
||||
newPartitionIDs = append(newPartitionIDs, partitionID)
|
||||
newPartitionStates = append(newPartitionStates, state)
|
||||
}
|
||||
|
||||
newReleasedPartitionIDs := make([]UniqueID, 0)
|
||||
for _, releasedPartitionID := range collectionInfo.ReleasedPartitionIDs {
|
||||
if _, ok = loadedPartitionID2State[releasedPartitionID]; !ok {
|
||||
newReleasedPartitionIDs = append(newReleasedPartitionIDs, releasedPartitionID)
|
||||
}
|
||||
}
|
||||
|
||||
collectionInfo.PartitionIDs = newPartitionIDs
|
||||
collectionInfo.PartitionStates = newPartitionStates
|
||||
collectionInfo.ReleasedPartitionIDs = newReleasedPartitionIDs
|
||||
|
||||
log.Debug("add a partition to MetaReplica", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", collectionInfo.PartitionIDs))
|
||||
err := saveGlobalCollectionInfo(collectionID, collectionInfo, m.client)
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
log.Error("save collectionInfo error", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", collectionInfo.PartitionIDs), zap.Any("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
m.collectionInfos[collectionID] = col
|
||||
m.collectionInfos[collectionID] = collectionInfo
|
||||
return nil
|
||||
}
|
||||
return errors.New("addPartition: can't find collection when add partition")
|
||||
return fmt.Errorf("addPartition: can't find collection %d when add partition", collectionID)
|
||||
}
|
||||
|
||||
func (m *MetaReplica) deleteSegmentInfoByNodeID(nodeID UniqueID) error {
|
||||
m.segmentMu.Lock()
|
||||
defer m.segmentMu.Unlock()
|
||||
|
||||
segmentIDsToRemove := make([]UniqueID, 0)
|
||||
for segmentID, info := range m.segmentInfos {
|
||||
if info.NodeID == nodeID {
|
||||
segmentIDsToRemove = append(segmentIDsToRemove, segmentID)
|
||||
}
|
||||
}
|
||||
|
||||
err := multiRemoveSegmentInfo(segmentIDsToRemove, m.client)
|
||||
func (m *MetaReplica) releaseCollection(collectionID UniqueID) error {
|
||||
err := removeCollectionMeta(collectionID, m.client)
|
||||
if err != nil {
|
||||
log.Error("remove segmentInfo from etcd error", zap.Any("error", err.Error()), zap.Int64s("segmentIDs", segmentIDsToRemove))
|
||||
log.Warn("remove collectionInfo from etcd failed", zap.Int64("collectionID", collectionID), zap.Any("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
for _, segmentID := range segmentIDsToRemove {
|
||||
delete(m.segmentInfos, segmentID)
|
||||
|
||||
m.collectionMu.Lock()
|
||||
delete(m.collectionInfos, collectionID)
|
||||
m.collectionMu.Unlock()
|
||||
|
||||
m.deltaChannelMu.Lock()
|
||||
delete(m.deltaChannelInfos, collectionID)
|
||||
m.deltaChannelMu.Unlock()
|
||||
|
||||
m.dmChannelMu.Lock()
|
||||
for dmChannel, info := range m.dmChannelInfos {
|
||||
if info.CollectionID == collectionID {
|
||||
delete(m.dmChannelInfos, dmChannel)
|
||||
}
|
||||
}
|
||||
m.dmChannelMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MetaReplica) setSegmentInfos(segmentInfos map[UniqueID]*querypb.SegmentInfo) error {
|
||||
m.segmentMu.Lock()
|
||||
defer m.segmentMu.Unlock()
|
||||
func (m *MetaReplica) releasePartitions(collectionID UniqueID, releasedPartitionIDs []UniqueID) error {
|
||||
m.collectionMu.Lock()
|
||||
defer m.collectionMu.Unlock()
|
||||
info, ok := m.collectionInfos[collectionID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
collectionInfo := proto.Clone(info).(*querypb.CollectionInfo)
|
||||
|
||||
err := multiSaveSegmentInfos(segmentInfos, m.client)
|
||||
releasedPartitionMap := make(map[UniqueID]struct{})
|
||||
for _, partitionID := range releasedPartitionIDs {
|
||||
releasedPartitionMap[partitionID] = struct{}{}
|
||||
}
|
||||
for _, partitionID := range collectionInfo.ReleasedPartitionIDs {
|
||||
releasedPartitionMap[partitionID] = struct{}{}
|
||||
}
|
||||
|
||||
newPartitionIDs := make([]UniqueID, 0)
|
||||
newPartitionStates := make([]*querypb.PartitionStates, 0)
|
||||
for offset, partitionID := range collectionInfo.PartitionIDs {
|
||||
if _, ok = releasedPartitionMap[partitionID]; !ok {
|
||||
newPartitionIDs = append(newPartitionIDs, partitionID)
|
||||
newPartitionStates = append(newPartitionStates, collectionInfo.PartitionStates[offset])
|
||||
}
|
||||
}
|
||||
|
||||
newReleasedPartitionIDs := make([]UniqueID, 0)
|
||||
for partitionID := range releasedPartitionMap {
|
||||
newReleasedPartitionIDs = append(newReleasedPartitionIDs, partitionID)
|
||||
}
|
||||
|
||||
collectionInfo.PartitionIDs = newPartitionIDs
|
||||
collectionInfo.PartitionStates = newPartitionStates
|
||||
collectionInfo.ReleasedPartitionIDs = newReleasedPartitionIDs
|
||||
|
||||
err := saveGlobalCollectionInfo(collectionID, collectionInfo, m.client)
|
||||
if err != nil {
|
||||
log.Error("save segmentInfos error", zap.Any("segmentInfos", segmentInfos), zap.Error(err))
|
||||
log.Error("releasePartition: remove partition infos error", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", releasedPartitionIDs), zap.Any("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
|
||||
for segmentID, info := range segmentInfos {
|
||||
m.segmentInfos[segmentID] = info
|
||||
}
|
||||
m.collectionInfos[collectionID] = collectionInfo
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -418,7 +475,7 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
|
|||
// generate segment change info according segment info to updated
|
||||
col2SegmentChangeInfos := make(col2SealedSegmentChangeInfos)
|
||||
|
||||
segmentsCompactionFrom := make([]UniqueID, 0)
|
||||
segmentsCompactionFrom := make([]*querypb.SegmentInfo, 0)
|
||||
// get segmentInfos to colSegmentInfos
|
||||
for collectionID, onlineInfos := range saves {
|
||||
segmentsChangeInfo := &querypb.SealedSegmentsChangeInfo{
|
||||
|
@ -453,7 +510,7 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
|
|||
OfflineNodeID: compactionSegmentInfo.NodeID,
|
||||
OfflineSegments: []*querypb.SegmentInfo{compactionSegmentInfo},
|
||||
})
|
||||
segmentsCompactionFrom = append(segmentsCompactionFrom, compactionSegmentID)
|
||||
segmentsCompactionFrom = append(segmentsCompactionFrom, compactionSegmentInfo)
|
||||
} else {
|
||||
return nil, fmt.Errorf("saveGlobalSealedSegInfos: the compacted segment %d has not been loaded into memory", compactionSegmentID)
|
||||
}
|
||||
|
@ -514,7 +571,7 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
|
|||
if err != nil {
|
||||
return col2SegmentChangeInfos, err
|
||||
}
|
||||
segmentKey := fmt.Sprintf("%s/%d", util.SegmentMetaPrefix, info.SegmentID)
|
||||
segmentKey := fmt.Sprintf("%s/%d/%d/%d", util.SegmentMetaPrefix, info.CollectionID, info.PartitionID, info.SegmentID)
|
||||
segmentInfoKvs[segmentKey] = string(segmentInfoBytes)
|
||||
}
|
||||
}
|
||||
|
@ -526,8 +583,8 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
|
|||
}
|
||||
|
||||
// remove compacted segment info from etcd
|
||||
for _, segmentID := range segmentsCompactionFrom {
|
||||
segmentKey := fmt.Sprintf("%s/%d", util.SegmentMetaPrefix, segmentID)
|
||||
for _, segmentInfo := range segmentsCompactionFrom {
|
||||
segmentKey := fmt.Sprintf("%s/%d/%d/%d", util.SegmentMetaPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
|
||||
err := m.client.Remove(segmentKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -575,8 +632,8 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
|
|||
m.segmentInfos[segmentID] = info
|
||||
}
|
||||
}
|
||||
for _, segmentID := range segmentsCompactionFrom {
|
||||
delete(m.segmentInfos, segmentID)
|
||||
for _, segmentInfo := range segmentsCompactionFrom {
|
||||
delete(m.segmentInfos, segmentInfo.SegmentID)
|
||||
}
|
||||
m.segmentMu.Unlock()
|
||||
|
||||
|
@ -650,7 +707,7 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
|
|||
|
||||
// remove meta from etcd
|
||||
for _, info := range removes {
|
||||
segmentKey := fmt.Sprintf("%s/%d", util.SegmentMetaPrefix, info.SegmentID)
|
||||
segmentKey := fmt.Sprintf("%s/%d/%d/%d", util.SegmentMetaPrefix, info.CollectionID, info.PartitionID, info.SegmentID)
|
||||
err = m.client.Remove(segmentKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -818,153 +875,36 @@ func (m *MetaReplica) getPartitionStatesByID(collectionID UniqueID, partitionID
|
|||
return nil, errors.New("getPartitionStateByID: can't find collectionID in collectionInfo")
|
||||
}
|
||||
|
||||
func (m *MetaReplica) releaseCollection(collectionID UniqueID) error {
|
||||
err := removeGlobalCollectionInfo(collectionID, m.client)
|
||||
func (m *MetaReplica) getDmChannelInfosByNodeID(nodeID int64) []*querypb.DmChannelWatchInfo {
|
||||
m.dmChannelMu.RLock()
|
||||
defer m.dmChannelMu.RUnlock()
|
||||
|
||||
var watchedDmChannelWatchInfo []*querypb.DmChannelWatchInfo
|
||||
for _, channelInfo := range m.dmChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
watchedDmChannelWatchInfo = append(watchedDmChannelWatchInfo, proto.Clone(channelInfo).(*querypb.DmChannelWatchInfo))
|
||||
}
|
||||
}
|
||||
|
||||
return watchedDmChannelWatchInfo
|
||||
}
|
||||
|
||||
func (m *MetaReplica) setDmChannelInfos(dmChannelWatchInfos []*querypb.DmChannelWatchInfo) error {
|
||||
m.dmChannelMu.Lock()
|
||||
defer m.dmChannelMu.Unlock()
|
||||
|
||||
err := saveDmChannelWatchInfos(dmChannelWatchInfos, m.client)
|
||||
if err != nil {
|
||||
log.Warn("remove collectionInfo from etcd failed", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
log.Error("save dmChannelWatchInfo error", zap.Any("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
|
||||
m.collectionMu.Lock()
|
||||
delete(m.collectionInfos, collectionID)
|
||||
m.collectionMu.Unlock()
|
||||
for _, channelInfo := range dmChannelWatchInfos {
|
||||
m.dmChannelInfos[channelInfo.DmChannel] = channelInfo
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MetaReplica) releasePartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
info, err := m.getCollectionInfoByID(collectionID)
|
||||
if err == nil {
|
||||
newPartitionIDs := make([]UniqueID, 0)
|
||||
newPartitionStates := make([]*querypb.PartitionStates, 0)
|
||||
for offset, id := range info.PartitionIDs {
|
||||
if id != partitionID {
|
||||
newPartitionIDs = append(newPartitionIDs, id)
|
||||
newPartitionStates = append(newPartitionStates, info.PartitionStates[offset])
|
||||
}
|
||||
}
|
||||
info.PartitionIDs = newPartitionIDs
|
||||
info.PartitionStates = newPartitionStates
|
||||
|
||||
releasedPartitionIDs := make([]UniqueID, 0)
|
||||
for _, id := range info.ReleasedPartitionIDs {
|
||||
if id != partitionID {
|
||||
releasedPartitionIDs = append(releasedPartitionIDs, id)
|
||||
}
|
||||
}
|
||||
releasedPartitionIDs = append(releasedPartitionIDs, partitionID)
|
||||
info.ReleasedPartitionIDs = releasedPartitionIDs
|
||||
|
||||
// If user loaded a collectionA, and release a partitionB which belongs to collectionA,
|
||||
// and then load collectionA again, if we don't set the inMemoryPercentage to 0 when releasing
|
||||
// partitionB, the second loading of collectionA would directly return because
|
||||
// the inMemoryPercentage in ShowCollection response is still the old value -- 100.
|
||||
// So if releasing partition, inMemoryPercentage should be set to 0.
|
||||
info.InMemoryPercentage = 0
|
||||
|
||||
err = saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
if err != nil {
|
||||
log.Error("releasePartition: save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID))
|
||||
return err
|
||||
}
|
||||
|
||||
m.collectionMu.Lock()
|
||||
m.collectionInfos[collectionID] = info
|
||||
m.collectionMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *MetaReplica) getDmChannelsByNodeID(collectionID UniqueID, nodeID int64) ([]string, error) {
|
||||
m.collectionMu.RLock()
|
||||
defer m.collectionMu.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
channels := make([]string, 0)
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
channels = append(channels, channelInfo.ChannelIDs...)
|
||||
}
|
||||
}
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("getDmChannelsByNodeID: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *MetaReplica) addDmChannel(collectionID UniqueID, nodeID int64, channels []string) error {
|
||||
//before add channel, should ensure toAddedChannels not in MetaReplica
|
||||
info, err := m.getCollectionInfoByID(collectionID)
|
||||
if err == nil {
|
||||
findNodeID := false
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
findNodeID = true
|
||||
channelInfo.ChannelIDs = append(channelInfo.ChannelIDs, channels...)
|
||||
}
|
||||
}
|
||||
if !findNodeID {
|
||||
newChannelInfo := &querypb.DmChannelWatchInfo{
|
||||
NodeIDLoaded: nodeID,
|
||||
ChannelIDs: channels,
|
||||
}
|
||||
info.ChannelInfos = append(info.ChannelInfos, newChannelInfo)
|
||||
}
|
||||
|
||||
err = saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
}
|
||||
m.collectionMu.Lock()
|
||||
m.collectionInfos[collectionID] = info
|
||||
m.collectionMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("addDmChannels: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *MetaReplica) removeDmChannel(collectionID UniqueID, nodeID int64, channels []string) error {
|
||||
info, err := m.getCollectionInfoByID(collectionID)
|
||||
if err == nil {
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
newChannelIDs := make([]string, 0)
|
||||
for _, channelID := range channelInfo.ChannelIDs {
|
||||
findChannel := false
|
||||
for _, channel := range channels {
|
||||
if channelID == channel {
|
||||
findChannel = true
|
||||
}
|
||||
}
|
||||
if !findChannel {
|
||||
newChannelIDs = append(newChannelIDs, channelID)
|
||||
}
|
||||
}
|
||||
channelInfo.ChannelIDs = newChannelIDs
|
||||
}
|
||||
}
|
||||
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
}
|
||||
|
||||
m.collectionMu.Lock()
|
||||
m.collectionInfos[collectionID] = info
|
||||
m.collectionMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("addDmChannels: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func createQueryChannel(collectionID UniqueID) *querypb.QueryChannelInfo {
|
||||
searchPrefix := Params.SearchChannelPrefix
|
||||
searchResultPrefix := Params.SearchResultChannelPrefix
|
||||
|
@ -994,7 +934,7 @@ func (m *MetaReplica) getDeltaChannelsByCollectionID(collectionID UniqueID) ([]*
|
|||
return infos, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("delta channel not exist in meta")
|
||||
return nil, fmt.Errorf("delta channel not exist in meta, collectionID = %d", collectionID)
|
||||
}
|
||||
|
||||
func (m *MetaReplica) setDeltaChannel(collectionID UniqueID, infos []*datapb.VchannelInfo) error {
|
||||
|
@ -1008,10 +948,10 @@ func (m *MetaReplica) setDeltaChannel(collectionID UniqueID, infos []*datapb.Vch
|
|||
|
||||
err := saveDeltaChannelInfo(collectionID, infos, m.client)
|
||||
if err != nil {
|
||||
log.Error("save delta channel info error", zap.Error(err))
|
||||
log.Error("save delta channel info error", zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("save delta channel infos to meta", zap.Any("collectionID", collectionID), zap.Any("infos", infos))
|
||||
log.Debug("save delta channel infos to meta", zap.Any("collectionID", collectionID))
|
||||
m.deltaChannelInfos[collectionID] = infos
|
||||
return nil
|
||||
}
|
||||
|
@ -1029,7 +969,7 @@ func (m *MetaReplica) getQueryChannelInfoByID(collectionID UniqueID) (*querypb.Q
|
|||
// all collection use the same query channel
|
||||
colIDForAssignChannel := UniqueID(0)
|
||||
info := createQueryChannel(colIDForAssignChannel)
|
||||
err := saveQueryChannelInfo(collectionID, info, m.client)
|
||||
err := saveQueryChannelInfo(info, m.client)
|
||||
if err != nil {
|
||||
log.Error("getQueryChannel: save channel to etcd error", zap.Error(err))
|
||||
return nil, err
|
||||
|
@ -1071,41 +1011,34 @@ func (m *MetaReplica) getQueryStreamByID(collectionID UniqueID) (msgstream.MsgSt
|
|||
}
|
||||
|
||||
func (m *MetaReplica) setLoadType(collectionID UniqueID, loadType querypb.LoadType) error {
|
||||
info, err := m.getCollectionInfoByID(collectionID)
|
||||
if err == nil {
|
||||
m.collectionMu.Lock()
|
||||
defer m.collectionMu.Unlock()
|
||||
|
||||
if _, ok := m.collectionInfos[collectionID]; ok {
|
||||
info := proto.Clone(m.collectionInfos[collectionID]).(*querypb.CollectionInfo)
|
||||
info.LoadType = loadType
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
}
|
||||
m.collectionMu.Lock()
|
||||
m.collectionInfos[collectionID] = info
|
||||
m.collectionMu.Unlock()
|
||||
|
||||
m.collectionInfos[collectionID] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("setLoadType: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *MetaReplica) getLoadType(collectionID UniqueID) (querypb.LoadType, error) {
|
||||
m.collectionMu.RLock()
|
||||
defer m.collectionMu.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
return info.LoadType, nil
|
||||
}
|
||||
|
||||
return 0, errors.New("getLoadType: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *MetaReplica) setLoadPercentage(collectionID UniqueID, partitionID UniqueID, percentage int64, loadType querypb.LoadType) error {
|
||||
info, err := m.getCollectionInfoByID(collectionID)
|
||||
if err != nil {
|
||||
m.collectionMu.Lock()
|
||||
defer m.collectionMu.Unlock()
|
||||
|
||||
if _, ok := m.collectionInfos[collectionID]; !ok {
|
||||
return errors.New("setLoadPercentage: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
info := proto.Clone(m.collectionInfos[collectionID]).(*querypb.CollectionInfo)
|
||||
if loadType == querypb.LoadType_loadCollection {
|
||||
info.InMemoryPercentage = percentage
|
||||
for _, partitionState := range info.PartitionStates {
|
||||
|
@ -1144,10 +1077,7 @@ func (m *MetaReplica) setLoadPercentage(collectionID UniqueID, partitionID Uniqu
|
|||
}
|
||||
}
|
||||
|
||||
m.collectionMu.Lock()
|
||||
m.collectionInfos[collectionID] = info
|
||||
m.collectionMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1177,42 +1107,13 @@ func saveGlobalCollectionInfo(collectionID UniqueID, info *querypb.CollectionInf
|
|||
return kv.Save(key, string(infoBytes))
|
||||
}
|
||||
|
||||
func removeGlobalCollectionInfo(collectionID UniqueID, kv kv.MetaKv) error {
|
||||
key := fmt.Sprintf("%s/%d", collectionMetaPrefix, collectionID)
|
||||
return kv.Remove(key)
|
||||
}
|
||||
|
||||
func multiSaveSegmentInfos(segmentInfos map[UniqueID]*querypb.SegmentInfo, kv kv.MetaKv) error {
|
||||
kvs := make(map[string]string)
|
||||
for segmentID, info := range segmentInfos {
|
||||
infoBytes, err := proto.Marshal(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := fmt.Sprintf("%s/%d", util.SegmentMetaPrefix, segmentID)
|
||||
kvs[key] = string(infoBytes)
|
||||
}
|
||||
|
||||
return kv.MultiSave(kvs)
|
||||
}
|
||||
|
||||
func multiRemoveSegmentInfo(segmentIDs []UniqueID, kv kv.MetaKv) error {
|
||||
keys := make([]string, 0)
|
||||
for _, segmentID := range segmentIDs {
|
||||
key := fmt.Sprintf("%s/%d", util.SegmentMetaPrefix, segmentID)
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return kv.MultiRemove(keys)
|
||||
}
|
||||
|
||||
func saveQueryChannelInfo(collectionID UniqueID, info *querypb.QueryChannelInfo, kv kv.MetaKv) error {
|
||||
func saveQueryChannelInfo(info *querypb.QueryChannelInfo, kv kv.MetaKv) error {
|
||||
infoBytes, err := proto.Marshal(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%d", queryChannelMetaPrefix, collectionID)
|
||||
key := fmt.Sprintf("%s/%d", queryChannelMetaPrefix, info.CollectionID)
|
||||
return kv.Save(key, string(infoBytes))
|
||||
}
|
||||
|
||||
|
@ -1229,3 +1130,29 @@ func saveDeltaChannelInfo(collectionID UniqueID, infos []*datapb.VchannelInfo, k
|
|||
}
|
||||
return kv.MultiSave(kvs)
|
||||
}
|
||||
|
||||
func saveDmChannelWatchInfos(infos []*querypb.DmChannelWatchInfo, kv kv.MetaKv) error {
|
||||
kvs := make(map[string]string)
|
||||
for _, info := range infos {
|
||||
infoBytes, err := proto.Marshal(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%d/%s", dmChannelMetaPrefix, info.CollectionID, info.DmChannel)
|
||||
kvs[key] = string(infoBytes)
|
||||
}
|
||||
return kv.MultiSave(kvs)
|
||||
}
|
||||
|
||||
func removeCollectionMeta(collectionID UniqueID, kv kv.MetaKv) error {
|
||||
var prefixes []string
|
||||
collectionInfosPrefix := fmt.Sprintf("%s/%d", collectionMetaPrefix, collectionID)
|
||||
prefixes = append(prefixes, collectionInfosPrefix)
|
||||
dmChannelInfosPrefix := fmt.Sprintf("%s/%d", dmChannelMetaPrefix, collectionID)
|
||||
prefixes = append(prefixes, dmChannelInfosPrefix)
|
||||
deltaChannelInfosPrefix := fmt.Sprintf("%s/%d", deltaChannelMetaPrefix, collectionID)
|
||||
prefixes = append(prefixes, deltaChannelInfosPrefix)
|
||||
|
||||
return kv.MultiRemoveWithPrefix(prefixes)
|
||||
}
|
||||
|
|
|
@ -68,23 +68,23 @@ func TestReplica_Release(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
meta, err := newMeta(context.Background(), etcdKV, nil, nil)
|
||||
assert.Nil(t, err)
|
||||
err = meta.addCollection(1, nil)
|
||||
err = meta.addCollection(1, querypb.LoadType_loadCollection, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
collections := meta.showCollections()
|
||||
assert.Equal(t, 1, len(collections))
|
||||
|
||||
err = meta.addPartition(1, 100)
|
||||
err = meta.addPartitions(1, []UniqueID{100})
|
||||
assert.NoError(t, err)
|
||||
partitions, err := meta.showPartitions(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(partitions))
|
||||
|
||||
meta.releasePartition(1, 100)
|
||||
meta.releasePartitions(1, []UniqueID{100})
|
||||
partitions, err = meta.showPartitions(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(partitions))
|
||||
meta.releasePartition(1, 100)
|
||||
meta.releasePartitions(1, []UniqueID{100})
|
||||
|
||||
meta.releaseCollection(1)
|
||||
collections = meta.showCollections()
|
||||
|
@ -96,15 +96,24 @@ func TestMetaFunc(t *testing.T) {
|
|||
refreshParams()
|
||||
kv, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
|
||||
assert.Nil(t, err)
|
||||
|
||||
nodeID := defaultQueryNodeID
|
||||
segmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
|
||||
segmentInfos[defaultSegmentID] = &querypb.SegmentInfo{
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
SegmentID: defaultSegmentID,
|
||||
NodeID: nodeID,
|
||||
}
|
||||
meta := &MetaReplica{
|
||||
client: kv,
|
||||
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
|
||||
segmentInfos: map[UniqueID]*querypb.SegmentInfo{},
|
||||
segmentInfos: segmentInfos,
|
||||
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
|
||||
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
|
||||
globalSeekPosition: &internalpb.MsgPosition{},
|
||||
}
|
||||
|
||||
nodeID := int64(100)
|
||||
dmChannels := []string{"testDm1", "testDm2"}
|
||||
|
||||
t.Run("Test ShowPartitionFail", func(t *testing.T) {
|
||||
|
@ -129,7 +138,7 @@ func TestMetaFunc(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Test GetSegmentInfoByIDFail", func(t *testing.T) {
|
||||
res, err := meta.getSegmentInfoByID(defaultSegmentID)
|
||||
res, err := meta.getSegmentInfoByID(defaultSegmentID + 100)
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
|
@ -152,17 +161,6 @@ func TestMetaFunc(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Test GetDmChannelsByNodeIDFail", func(t *testing.T) {
|
||||
res, err := meta.getDmChannelsByNodeID(defaultCollectionID, nodeID)
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
|
||||
t.Run("Test AddDmChannelFail", func(t *testing.T) {
|
||||
err := meta.addDmChannel(defaultCollectionID, nodeID, dmChannels)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Test SetLoadTypeFail", func(t *testing.T) {
|
||||
err := meta.setLoadType(defaultCollectionID, querypb.LoadType_loadCollection)
|
||||
assert.NotNil(t, err)
|
||||
|
@ -175,7 +173,7 @@ func TestMetaFunc(t *testing.T) {
|
|||
|
||||
t.Run("Test AddCollection", func(t *testing.T) {
|
||||
schema := genCollectionSchema(defaultCollectionID, false)
|
||||
err := meta.addCollection(defaultCollectionID, schema)
|
||||
err := meta.addCollection(defaultCollectionID, querypb.LoadType_loadCollection, schema)
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
|
@ -185,7 +183,7 @@ func TestMetaFunc(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Test AddPartition", func(t *testing.T) {
|
||||
err := meta.addPartition(defaultCollectionID, defaultPartitionID)
|
||||
err := meta.addPartitions(defaultCollectionID, []UniqueID{defaultPartitionID})
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
|
@ -218,27 +216,21 @@ func TestMetaFunc(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Test AddDmChannel", func(t *testing.T) {
|
||||
err := meta.addDmChannel(defaultCollectionID, nodeID, dmChannels)
|
||||
var dmChannelWatchInfos []*querypb.DmChannelWatchInfo
|
||||
for _, channel := range dmChannels {
|
||||
dmChannelWatchInfos = append(dmChannelWatchInfos, &querypb.DmChannelWatchInfo{
|
||||
CollectionID: defaultCollectionID,
|
||||
DmChannel: channel,
|
||||
NodeIDLoaded: nodeID,
|
||||
})
|
||||
}
|
||||
err = meta.setDmChannelInfos(dmChannelWatchInfos)
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Test GetDmChannelsByNodeID", func(t *testing.T) {
|
||||
channels, err := meta.getDmChannelsByNodeID(defaultCollectionID, nodeID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(channels))
|
||||
})
|
||||
|
||||
t.Run("Test SetSegmentInfo", func(t *testing.T) {
|
||||
info := &querypb.SegmentInfo{
|
||||
SegmentID: defaultSegmentID,
|
||||
PartitionID: defaultPartitionID,
|
||||
CollectionID: defaultCollectionID,
|
||||
NodeID: nodeID,
|
||||
}
|
||||
segmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
|
||||
segmentInfos[defaultSegmentID] = info
|
||||
err := meta.setSegmentInfos(segmentInfos)
|
||||
assert.Nil(t, err)
|
||||
channelInfos := meta.getDmChannelInfosByNodeID(nodeID)
|
||||
assert.Equal(t, 2, len(channelInfos))
|
||||
})
|
||||
|
||||
t.Run("Test ShowSegmentInfo", func(t *testing.T) {
|
||||
|
@ -250,6 +242,7 @@ func TestMetaFunc(t *testing.T) {
|
|||
t.Run("Test GetSegmentInfoByNode", func(t *testing.T) {
|
||||
infos := meta.getSegmentInfosByNode(nodeID)
|
||||
assert.Equal(t, 1, len(infos))
|
||||
assert.Equal(t, defaultSegmentID, infos[0].SegmentID)
|
||||
})
|
||||
|
||||
t.Run("Test getQueryChannel", func(t *testing.T) {
|
||||
|
@ -283,23 +276,8 @@ func TestMetaFunc(t *testing.T) {
|
|||
assert.Equal(t, int64(100), info.InMemoryPercentage)
|
||||
})
|
||||
|
||||
t.Run("Test RemoveDmChannel", func(t *testing.T) {
|
||||
err := meta.removeDmChannel(defaultCollectionID, nodeID, dmChannels)
|
||||
assert.Nil(t, err)
|
||||
channels, err := meta.getDmChannelsByNodeID(defaultCollectionID, nodeID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(channels))
|
||||
})
|
||||
|
||||
t.Run("Test DeleteSegmentInfoByNodeID", func(t *testing.T) {
|
||||
err := meta.deleteSegmentInfoByNodeID(nodeID)
|
||||
assert.Nil(t, err)
|
||||
_, err = meta.getSegmentInfoByID(defaultSegmentID)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Test ReleasePartition", func(t *testing.T) {
|
||||
err := meta.releasePartition(defaultCollectionID, defaultPartitionID)
|
||||
err := meta.releasePartitions(defaultCollectionID, []UniqueID{defaultPartitionID})
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
|
@ -318,6 +296,7 @@ func TestReloadMetaFromKV(t *testing.T) {
|
|||
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
|
||||
segmentInfos: map[UniqueID]*querypb.SegmentInfo{},
|
||||
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
|
||||
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
|
||||
deltaChannelInfos: map[UniqueID][]*datapb.VchannelInfo{},
|
||||
}
|
||||
|
||||
|
@ -358,6 +337,15 @@ func TestReloadMetaFromKV(t *testing.T) {
|
|||
kvs[key] = string(infoBytes)
|
||||
}
|
||||
|
||||
dmChannelInfo := &querypb.DmChannelWatchInfo{
|
||||
CollectionID: defaultCollectionID,
|
||||
DmChannel: "dm-channel1",
|
||||
}
|
||||
dmChannelInfoBlob, err := proto.Marshal(dmChannelInfo)
|
||||
assert.Nil(t, err)
|
||||
key := fmt.Sprintf("%s/%d/%s", dmChannelMetaPrefix, dmChannelInfo.CollectionID, dmChannelInfo.DmChannel)
|
||||
kvs[key] = string(dmChannelInfoBlob)
|
||||
|
||||
err = kv.MultiSave(kvs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ type queryNodeClientMock struct {
|
|||
}
|
||||
|
||||
func newQueryNodeTest(ctx context.Context, address string, id UniqueID, kv *etcdkv.EtcdKV) (Node, error) {
|
||||
collectionInfo := make(map[UniqueID]*querypb.CollectionInfo)
|
||||
watchedChannels := make(map[UniqueID]*querypb.QueryChannelInfo)
|
||||
watchedDeltaChannels := make(map[UniqueID][]*datapb.VchannelInfo)
|
||||
childCtx, cancel := context.WithCancel(ctx)
|
||||
|
@ -59,7 +58,6 @@ func newQueryNodeTest(ctx context.Context, address string, id UniqueID, kv *etcd
|
|||
address: address,
|
||||
client: client,
|
||||
kvClient: kv,
|
||||
collectionInfos: collectionInfo,
|
||||
watchedQueryChannels: watchedChannels,
|
||||
watchedDeltaChannels: watchedDeltaChannels,
|
||||
}
|
||||
|
|
|
@ -315,12 +315,8 @@ func (qc *QueryCoord) watchNodeLoop() {
|
|||
defer qc.loopWg.Done()
|
||||
log.Debug("QueryCoord start watch node loop")
|
||||
|
||||
offlineNodes, err := qc.cluster.offlineNodes()
|
||||
if err == nil {
|
||||
offlineNodeIDs := make([]int64, 0)
|
||||
for id := range offlineNodes {
|
||||
offlineNodeIDs = append(offlineNodeIDs, id)
|
||||
}
|
||||
offlineNodeIDs := qc.cluster.offlineNodeIDs()
|
||||
if len(offlineNodeIDs) != 0 {
|
||||
loadBalanceSegment := &querypb.LoadBalanceRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
||||
|
@ -456,9 +452,9 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
onlineNodes, err := qc.cluster.onlineNodes()
|
||||
if err != nil {
|
||||
log.Warn("loadBalanceSegmentLoop: there are no online QueryNode to balance")
|
||||
onlineNodeIDs := qc.cluster.onlineNodeIDs()
|
||||
if len(onlineNodeIDs) == 0 {
|
||||
log.Error("loadBalanceSegmentLoop: there are no online QueryNode to balance")
|
||||
continue
|
||||
}
|
||||
// get mem info of online nodes from cluster
|
||||
|
@ -466,12 +462,11 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
nodeID2MemUsage := make(map[int64]uint64)
|
||||
nodeID2TotalMem := make(map[int64]uint64)
|
||||
nodeID2SegmentInfos := make(map[int64]map[UniqueID]*querypb.SegmentInfo)
|
||||
onlineNodeIDs := make([]int64, 0)
|
||||
for nodeID := range onlineNodes {
|
||||
var availableNodeIDs []int64
|
||||
for _, nodeID := range onlineNodeIDs {
|
||||
nodeInfo, err := qc.cluster.getNodeInfoByID(nodeID)
|
||||
if err != nil {
|
||||
log.Warn("loadBalanceSegmentLoop: get node info from QueryNode failed", zap.Int64("nodeID", nodeID), zap.Error(err))
|
||||
delete(onlineNodes, nodeID)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -482,7 +477,6 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
leastInfo, err := qc.cluster.getSegmentInfoByID(ctx, segmentInfo.SegmentID)
|
||||
if err != nil {
|
||||
log.Warn("loadBalanceSegmentLoop: get segment info from QueryNode failed", zap.Int64("nodeID", nodeID), zap.Error(err))
|
||||
delete(onlineNodes, nodeID)
|
||||
updateSegmentInfoDone = false
|
||||
break
|
||||
}
|
||||
|
@ -492,13 +486,13 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
nodeID2MemUsageRate[nodeID] = nodeInfo.(*queryNode).memUsageRate
|
||||
nodeID2MemUsage[nodeID] = nodeInfo.(*queryNode).memUsage
|
||||
nodeID2TotalMem[nodeID] = nodeInfo.(*queryNode).totalMem
|
||||
onlineNodeIDs = append(onlineNodeIDs, nodeID)
|
||||
availableNodeIDs = append(availableNodeIDs, nodeID)
|
||||
nodeID2SegmentInfos[nodeID] = leastSegmentInfos
|
||||
}
|
||||
}
|
||||
log.Debug("loadBalanceSegmentLoop: memory usage rate of all online QueryNode", zap.Any("mem rate", nodeID2MemUsageRate))
|
||||
if len(onlineNodeIDs) <= 1 {
|
||||
log.Warn("loadBalanceSegmentLoop: there are too few online query nodes to balance", zap.Int64s("onlineNodeIDs", onlineNodeIDs))
|
||||
if len(availableNodeIDs) <= 1 {
|
||||
log.Warn("loadBalanceSegmentLoop: there are too few available query nodes to balance", zap.Int64s("onlineNodeIDs", onlineNodeIDs), zap.Int64s("availableNodeIDs", availableNodeIDs))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -506,14 +500,13 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
memoryInsufficient := false
|
||||
loadBalanceTasks := make([]*loadBalanceTask, 0)
|
||||
for {
|
||||
var selectedSegmentInfo *querypb.SegmentInfo
|
||||
sort.Slice(onlineNodeIDs, func(i, j int) bool {
|
||||
return nodeID2MemUsageRate[onlineNodeIDs[i]] > nodeID2MemUsageRate[onlineNodeIDs[j]]
|
||||
sort.Slice(availableNodeIDs, func(i, j int) bool {
|
||||
return nodeID2MemUsageRate[availableNodeIDs[i]] > nodeID2MemUsageRate[availableNodeIDs[j]]
|
||||
})
|
||||
|
||||
// the memoryUsageRate of the sourceNode is higher than other query node
|
||||
sourceNodeID := onlineNodeIDs[0]
|
||||
dstNodeID := onlineNodeIDs[len(onlineNodeIDs)-1]
|
||||
sourceNodeID := availableNodeIDs[0]
|
||||
dstNodeID := availableNodeIDs[len(availableNodeIDs)-1]
|
||||
memUsageRateDiff := nodeID2MemUsageRate[sourceNodeID] - nodeID2MemUsageRate[dstNodeID]
|
||||
// if memoryUsageRate of source node is greater then 90%, and the max memUsageDiff is greater than 30%
|
||||
// then migrate the segments on source node to other query nodes
|
||||
|
@ -521,8 +514,14 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
memUsageRateDiff > Params.MemoryUsageMaxDifferencePercentage {
|
||||
segmentInfos := nodeID2SegmentInfos[sourceNodeID]
|
||||
// select the segment that needs balance on the source node
|
||||
selectedSegmentInfo, err = chooseSegmentToBalance(sourceNodeID, dstNodeID, segmentInfos, nodeID2MemUsage, nodeID2TotalMem, nodeID2MemUsageRate)
|
||||
if err == nil && selectedSegmentInfo != nil {
|
||||
selectedSegmentInfo, err := chooseSegmentToBalance(sourceNodeID, dstNodeID, segmentInfos, nodeID2MemUsage, nodeID2TotalMem, nodeID2MemUsageRate)
|
||||
if err != nil {
|
||||
// no enough memory on query nodes to balance, then notify proxy to stop insert
|
||||
memoryInsufficient = true
|
||||
break
|
||||
}
|
||||
// select a segment to balance successfully, then recursive traversal whether there are other segments that can balance
|
||||
if selectedSegmentInfo != nil {
|
||||
req := &querypb.LoadBalanceRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
||||
|
@ -550,22 +549,20 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
|
|||
delete(nodeID2SegmentInfos[sourceNodeID], selectedSegmentInfo.SegmentID)
|
||||
nodeID2SegmentInfos[dstNodeID][selectedSegmentInfo.SegmentID] = selectedSegmentInfo
|
||||
continue
|
||||
} else {
|
||||
// moving any segment will not improve the balance status
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// all query node's memoryUsageRate is less than 90%, and the max memUsageDiff is less than 30%
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// no enough memory on query nodes to balance, then notify proxy to stop insert
|
||||
memoryInsufficient = true
|
||||
}
|
||||
// if memoryInsufficient == false
|
||||
// all query node's memoryUsageRate is less than 90%, and the max memUsageDiff is less than 30%
|
||||
// this balance loop is done
|
||||
break
|
||||
}
|
||||
if !memoryInsufficient {
|
||||
for _, t := range loadBalanceTasks {
|
||||
qc.scheduler.Enqueue(t)
|
||||
log.Debug("loadBalanceSegmentLoop: enqueue a loadBalance task", zap.Any("task", t))
|
||||
err = t.waitToFinish()
|
||||
err := t.waitToFinish()
|
||||
if err != nil {
|
||||
// if failed, wait for next balance loop
|
||||
// it may be that the collection/partition of the balanced segment has been released
|
||||
|
|
|
@ -168,7 +168,7 @@ func TestWatchNodeLoop(t *testing.T) {
|
|||
}
|
||||
collectionBlobs, err := proto.Marshal(collectionInfo)
|
||||
assert.Nil(t, err)
|
||||
nodeKey := fmt.Sprintf("%s/%d", queryNodeMetaPrefix, 100)
|
||||
nodeKey := fmt.Sprintf("%s/%d", collectionMetaPrefix, 100)
|
||||
kvs[nodeKey] = string(collectionBlobs)
|
||||
|
||||
err = kv.MultiSave(kvs)
|
||||
|
@ -178,9 +178,9 @@ func TestWatchNodeLoop(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
for {
|
||||
offlineNodes, err := queryCoord.cluster.offlineNodes()
|
||||
if err == nil {
|
||||
log.Warn("find offline Nodes", zap.Any("node map", offlineNodes))
|
||||
offlineNodeIDs := queryCoord.cluster.offlineNodeIDs()
|
||||
if len(offlineNodeIDs) != 0 {
|
||||
log.Warn("find offline Nodes", zap.Int64s("offlineNodeIDs", offlineNodeIDs))
|
||||
break
|
||||
}
|
||||
// if session id not exist, means querycoord already handled it and remove
|
||||
|
@ -224,14 +224,14 @@ func TestWatchNodeLoop(t *testing.T) {
|
|||
|
||||
nodeID := queryNode1.queryNodeID
|
||||
waitQueryNodeOnline(queryCoord.cluster, nodeID)
|
||||
nodes, err := queryCoord.cluster.onlineNodes()
|
||||
assert.Nil(t, err)
|
||||
onlineNodeIDs := queryCoord.cluster.onlineNodeIDs()
|
||||
assert.Equal(t, 1, len(onlineNodeIDs))
|
||||
|
||||
queryNode1.stop()
|
||||
err = removeNodeSession(nodeID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
waitAllQueryNodeOffline(queryCoord.cluster, nodes)
|
||||
waitAllQueryNodeOffline(queryCoord.cluster, onlineNodeIDs)
|
||||
|
||||
queryCoord.Stop()
|
||||
err = removeAllSession()
|
||||
|
@ -310,6 +310,7 @@ func TestHandoffSegmentLoop(t *testing.T) {
|
|||
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
waitTaskFinalState(loadCollectionTask, taskExpired)
|
||||
queryCoord.meta.setLoadType(defaultCollectionID, querypb.LoadType_loadCollection)
|
||||
|
||||
t.Run("Test handoffGrowingSegment", func(t *testing.T) {
|
||||
infos := queryCoord.meta.showSegmentInfos(defaultCollectionID, nil)
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
||||
nodeclient "github.com/milvus-io/milvus/internal/distributed/querynode/client"
|
||||
|
@ -33,7 +32,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
)
|
||||
|
@ -43,14 +41,8 @@ type Node interface {
|
|||
start() error
|
||||
stop()
|
||||
getNodeInfo() (Node, error)
|
||||
clearNodeInfo() error
|
||||
|
||||
addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error
|
||||
setCollectionInfo(info *querypb.CollectionInfo) error
|
||||
showCollections() []*querypb.CollectionInfo
|
||||
releaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) error
|
||||
|
||||
addPartition(collectionID UniqueID, partitionID UniqueID) error
|
||||
releasePartitions(ctx context.Context, in *querypb.ReleasePartitionsRequest) error
|
||||
|
||||
watchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest) error
|
||||
|
@ -85,7 +77,6 @@ type queryNode struct {
|
|||
kvClient *etcdkv.EtcdKV
|
||||
|
||||
sync.RWMutex
|
||||
collectionInfos map[UniqueID]*querypb.CollectionInfo
|
||||
watchedQueryChannels map[UniqueID]*querypb.QueryChannelInfo
|
||||
watchedDeltaChannels map[UniqueID][]*datapb.VchannelInfo
|
||||
state nodeState
|
||||
|
@ -98,7 +89,6 @@ type queryNode struct {
|
|||
}
|
||||
|
||||
func newQueryNode(ctx context.Context, address string, id UniqueID, kv *etcdkv.EtcdKV) (Node, error) {
|
||||
collectionInfo := make(map[UniqueID]*querypb.CollectionInfo)
|
||||
watchedChannels := make(map[UniqueID]*querypb.QueryChannelInfo)
|
||||
watchedDeltaChannels := make(map[UniqueID][]*datapb.VchannelInfo)
|
||||
childCtx, cancel := context.WithCancel(ctx)
|
||||
|
@ -114,7 +104,6 @@ func newQueryNode(ctx context.Context, address string, id UniqueID, kv *etcdkv.E
|
|||
address: address,
|
||||
client: client,
|
||||
kvClient: kv,
|
||||
collectionInfos: collectionInfo,
|
||||
watchedQueryChannels: watchedChannels,
|
||||
watchedDeltaChannels: watchedDeltaChannels,
|
||||
state: disConnect,
|
||||
|
@ -152,185 +141,6 @@ func (qn *queryNode) stop() {
|
|||
qn.cancel()
|
||||
}
|
||||
|
||||
func (qn *queryNode) addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
if _, ok := qn.collectionInfos[collectionID]; !ok {
|
||||
partitions := make([]UniqueID, 0)
|
||||
channels := make([]*querypb.DmChannelWatchInfo, 0)
|
||||
newCollection := &querypb.CollectionInfo{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitions,
|
||||
ChannelInfos: channels,
|
||||
Schema: schema,
|
||||
}
|
||||
qn.collectionInfos[collectionID] = newCollection
|
||||
err := saveNodeCollectionInfo(collectionID, newCollection, qn.id, qn.kvClient)
|
||||
if err != nil {
|
||||
log.Error("addCollection: save collectionInfo error", zap.Int64("nodeID", qn.id), zap.Int64("collectionID", collectionID), zap.Any("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
log.Debug("queryNode addCollection", zap.Int64("nodeID", qn.id), zap.Any("collectionInfo", newCollection))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qn *queryNode) setCollectionInfo(info *querypb.CollectionInfo) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
qn.collectionInfos[info.CollectionID] = info
|
||||
err := saveNodeCollectionInfo(info.CollectionID, info, qn.id, qn.kvClient)
|
||||
if err != nil {
|
||||
log.Error("setCollectionInfo: save collectionInfo error", zap.Int64("nodeID", qn.id), zap.Int64("collectionID", info.CollectionID), zap.Any("error", err.Error()))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qn *queryNode) showCollections() []*querypb.CollectionInfo {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
||||
results := make([]*querypb.CollectionInfo, 0)
|
||||
for _, info := range qn.collectionInfos {
|
||||
results = append(results, proto.Clone(info).(*querypb.CollectionInfo))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (qn *queryNode) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
if col, ok := qn.collectionInfos[collectionID]; ok {
|
||||
for _, id := range col.PartitionIDs {
|
||||
if id == partitionID {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
col.PartitionIDs = append(col.PartitionIDs, partitionID)
|
||||
err := saveNodeCollectionInfo(collectionID, col, qn.id, qn.kvClient)
|
||||
if err != nil {
|
||||
log.Error("AddPartition: save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
}
|
||||
log.Debug("queryNode add partition", zap.Int64("nodeID", qn.id), zap.Any("collectionInfo", col))
|
||||
return nil
|
||||
}
|
||||
return errors.New("AddPartition: can't find collection when add partition")
|
||||
}
|
||||
|
||||
func (qn *queryNode) releaseCollectionInfo(collectionID UniqueID) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
if _, ok := qn.collectionInfos[collectionID]; ok {
|
||||
err := removeNodeCollectionInfo(collectionID, qn.id, qn.kvClient)
|
||||
if err != nil {
|
||||
log.Error("ReleaseCollectionInfo: remove collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
}
|
||||
delete(qn.collectionInfos, collectionID)
|
||||
}
|
||||
|
||||
delete(qn.watchedQueryChannels, collectionID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qn *queryNode) releasePartitionsInfo(collectionID UniqueID, partitionIDs []UniqueID) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
newPartitionIDs := make([]UniqueID, 0)
|
||||
for _, id := range info.PartitionIDs {
|
||||
match := false
|
||||
for _, partitionID := range partitionIDs {
|
||||
if id == partitionID {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
newPartitionIDs = append(newPartitionIDs, id)
|
||||
}
|
||||
}
|
||||
info.PartitionIDs = newPartitionIDs
|
||||
err := saveNodeCollectionInfo(collectionID, info, qn.id, qn.kvClient)
|
||||
if err != nil {
|
||||
log.Error("ReleasePartitionsInfo: remove collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
}
|
||||
log.Debug("queryNode release partition info", zap.Int64("nodeID", qn.id), zap.Any("info", info))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qn *queryNode) addDmChannel(collectionID UniqueID, channels []string) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
//before add channel, should ensure toAddedChannels not in MetaReplica
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
findNodeID := false
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == qn.id {
|
||||
findNodeID = true
|
||||
channelInfo.ChannelIDs = append(channelInfo.ChannelIDs, channels...)
|
||||
}
|
||||
}
|
||||
if !findNodeID {
|
||||
newChannelInfo := &querypb.DmChannelWatchInfo{
|
||||
NodeIDLoaded: qn.id,
|
||||
ChannelIDs: channels,
|
||||
}
|
||||
info.ChannelInfos = append(info.ChannelInfos, newChannelInfo)
|
||||
}
|
||||
err := saveNodeCollectionInfo(collectionID, info, qn.id, qn.kvClient)
|
||||
if err != nil {
|
||||
log.Error("AddDmChannel: save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("AddDmChannels: can't find collection in watchedQueryChannel")
|
||||
}
|
||||
|
||||
//func (qn *queryNode) removeDmChannel(collectionID UniqueID, channels []string) error {
|
||||
// qn.Lock()
|
||||
// defer qn.Unlock()
|
||||
//
|
||||
// if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
// for _, channelInfo := range info.ChannelInfos {
|
||||
// if channelInfo.NodeIDLoaded == qn.id {
|
||||
// newChannelIDs := make([]string, 0)
|
||||
// for _, channelID := range channelInfo.ChannelIDs {
|
||||
// findChannel := false
|
||||
// for _, channel := range channels {
|
||||
// if channelID == channel {
|
||||
// findChannel = true
|
||||
// }
|
||||
// }
|
||||
// if !findChannel {
|
||||
// newChannelIDs = append(newChannelIDs, channelID)
|
||||
// }
|
||||
// }
|
||||
// channelInfo.ChannelIDs = newChannelIDs
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// err := saveNodeCollectionInfo(collectionID, info, qn.id, qn.kvClient)
|
||||
// if err != nil {
|
||||
// log.Error("RemoveDmChannel: save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return errors.New("RemoveDmChannel: can't find collection in watchedQueryChannel")
|
||||
//}
|
||||
|
||||
func (qn *queryNode) hasWatchedQueryChannel(collectionID UniqueID) bool {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
@ -383,14 +193,6 @@ func (qn *queryNode) removeQueryChannelInfo(collectionID UniqueID) {
|
|||
delete(qn.watchedQueryChannels, collectionID)
|
||||
}
|
||||
|
||||
func (qn *queryNode) clearNodeInfo() error {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
// delete query node meta and all the collection info
|
||||
key := fmt.Sprintf("%s/%d", queryNodeMetaPrefix, qn.id)
|
||||
return qn.kvClient.RemoveWithPrefix(key)
|
||||
}
|
||||
|
||||
func (qn *queryNode) setState(state nodeState) {
|
||||
qn.stateLock.Lock()
|
||||
defer qn.stateLock.Unlock()
|
||||
|
@ -432,16 +234,8 @@ func (qn *queryNode) watchDmChannels(ctx context.Context, in *querypb.WatchDmCha
|
|||
if status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
return errors.New(status.Reason)
|
||||
}
|
||||
channels := make([]string, 0)
|
||||
for _, info := range in.Infos {
|
||||
channels = append(channels, info.ChannelName)
|
||||
}
|
||||
err = qn.addCollection(in.CollectionID, in.Schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = qn.addDmChannel(in.CollectionID, channels)
|
||||
return err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qn *queryNode) watchDeltaChannels(ctx context.Context, in *querypb.WatchDeltaChannelsRequest) error {
|
||||
|
@ -513,10 +307,10 @@ func (qn *queryNode) releaseCollection(ctx context.Context, in *querypb.ReleaseC
|
|||
return errors.New(status.Reason)
|
||||
}
|
||||
|
||||
err = qn.releaseCollectionInfo(in.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
qn.Lock()
|
||||
delete(qn.watchedDeltaChannels, in.CollectionID)
|
||||
delete(qn.watchedQueryChannels, in.CollectionID)
|
||||
qn.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -533,10 +327,6 @@ func (qn *queryNode) releasePartitions(ctx context.Context, in *querypb.ReleaseP
|
|||
if status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
return errors.New(status.Reason)
|
||||
}
|
||||
err = qn.releasePartitionsInfo(in.CollectionID, in.PartitionIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -597,16 +387,6 @@ func (qn *queryNode) loadSegments(ctx context.Context, in *querypb.LoadSegmentsR
|
|||
return errors.New(status.Reason)
|
||||
}
|
||||
|
||||
for _, info := range in.Infos {
|
||||
err = qn.addCollection(info.CollectionID, in.Schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = qn.addPartition(info.CollectionID, info.PartitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -665,21 +445,3 @@ func (qn *queryNode) getNodeInfo() (Node, error) {
|
|||
cpuUsage: qn.cpuUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//****************************************************//
|
||||
|
||||
func saveNodeCollectionInfo(collectionID UniqueID, info *querypb.CollectionInfo, nodeID int64, kv *etcdkv.EtcdKV) error {
|
||||
infoBytes, err := proto.Marshal(info)
|
||||
if err != nil {
|
||||
log.Error("QueryNode::saveNodeCollectionInfo ", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%d/%d", queryNodeMetaPrefix, nodeID, collectionID)
|
||||
return kv.Save(key, string(infoBytes))
|
||||
}
|
||||
|
||||
func removeNodeCollectionInfo(collectionID UniqueID, nodeID int64, kv *etcdkv.EtcdKV) error {
|
||||
key := fmt.Sprintf("%s/%d/%d", queryNodeMetaPrefix, nodeID, collectionID)
|
||||
return kv.Remove(key)
|
||||
}
|
||||
|
|
|
@ -59,10 +59,10 @@ func removeAllSession() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func waitAllQueryNodeOffline(cluster Cluster, nodes map[int64]Node) bool {
|
||||
func waitAllQueryNodeOffline(cluster Cluster, nodeIDs []int64) bool {
|
||||
for {
|
||||
allOffline := true
|
||||
for nodeID := range nodes {
|
||||
for _, nodeID := range nodeIDs {
|
||||
nodeExist := cluster.hasNode(nodeID)
|
||||
if nodeExist {
|
||||
allOffline = false
|
||||
|
@ -123,13 +123,13 @@ func TestQueryNode_MultiNode_stop(t *testing.T) {
|
|||
})
|
||||
assert.Nil(t, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
nodes, err := queryCoord.cluster.onlineNodes()
|
||||
assert.Nil(t, err)
|
||||
onlineNodeIDs := queryCoord.cluster.onlineNodeIDs()
|
||||
assert.NotEqual(t, 0, len(onlineNodeIDs))
|
||||
queryNode2.stop()
|
||||
err = removeNodeSession(queryNode2.queryNodeID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
waitAllQueryNodeOffline(queryCoord.cluster, nodes)
|
||||
waitAllQueryNodeOffline(queryCoord.cluster, onlineNodeIDs)
|
||||
queryCoord.Stop()
|
||||
err = removeAllSession()
|
||||
assert.Nil(t, err)
|
||||
|
@ -168,13 +168,13 @@ func TestQueryNode_MultiNode_reStart(t *testing.T) {
|
|||
CollectionID: defaultCollectionID,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
nodes, err := queryCoord.cluster.onlineNodes()
|
||||
assert.Nil(t, err)
|
||||
onlineNodeIDs := queryCoord.cluster.onlineNodeIDs()
|
||||
assert.NotEqual(t, 0, len(onlineNodeIDs))
|
||||
queryNode3.stop()
|
||||
err = removeNodeSession(queryNode3.queryNodeID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
waitAllQueryNodeOffline(queryCoord.cluster, nodes)
|
||||
waitAllQueryNodeOffline(queryCoord.cluster, onlineNodeIDs)
|
||||
queryCoord.Stop()
|
||||
err = removeAllSession()
|
||||
assert.Nil(t, err)
|
||||
|
|
|
@ -33,68 +33,71 @@ func defaultSegAllocatePolicy() SegmentAllocatePolicy {
|
|||
return shuffleSegmentsToQueryNodeV2
|
||||
}
|
||||
|
||||
const shuffleWaitInterval = 1 * time.Second
|
||||
|
||||
// SegmentAllocatePolicy helper function definition to allocate Segment to queryNode
|
||||
type SegmentAllocatePolicy func(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error
|
||||
type SegmentAllocatePolicy func(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error
|
||||
|
||||
// shuffleSegmentsToQueryNode shuffle segments to online nodes
|
||||
// returned are noded id for each segment, which satisfies:
|
||||
// len(returnedNodeIds) == len(segmentIDs) && segmentIDs[i] is assigned to returnedNodeIds[i]
|
||||
func shuffleSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
|
||||
func shuffleSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
|
||||
if len(reqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
availableNodes, err := cluster.onlineNodes()
|
||||
if err != nil {
|
||||
log.Debug(err.Error())
|
||||
onlineNodeIDs := cluster.onlineNodeIDs()
|
||||
if len(onlineNodeIDs) == 0 {
|
||||
err := errors.New("no online QueryNode to allocate")
|
||||
log.Error("shuffleSegmentsToQueryNode failed", zap.Error(err))
|
||||
if !wait {
|
||||
return err
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(shuffleWaitInterval)
|
||||
continue
|
||||
}
|
||||
for _, id := range excludeNodeIDs {
|
||||
delete(availableNodes, id)
|
||||
}
|
||||
|
||||
nodeID2NumSegemnt := make(map[int64]int)
|
||||
for nodeID := range availableNodes {
|
||||
var availableNodeIDs []int64
|
||||
nodeID2NumSegment := make(map[int64]int)
|
||||
for _, nodeID := range onlineNodeIDs {
|
||||
// nodeID not in includeNodeIDs
|
||||
if len(includeNodeIDs) > 0 && !nodeIncluded(nodeID, includeNodeIDs) {
|
||||
delete(availableNodes, nodeID)
|
||||
continue
|
||||
}
|
||||
numSegments, err := cluster.getNumSegments(nodeID)
|
||||
if err != nil {
|
||||
delete(availableNodes, nodeID)
|
||||
|
||||
// nodeID in excludeNodeIDs
|
||||
if nodeIncluded(nodeID, excludeNodeIDs) {
|
||||
continue
|
||||
}
|
||||
nodeID2NumSegemnt[nodeID] = numSegments
|
||||
segmentInfos := metaCache.getSegmentInfosByNode(nodeID)
|
||||
nodeID2NumSegment[nodeID] = len(segmentInfos)
|
||||
availableNodeIDs = append(availableNodeIDs, nodeID)
|
||||
}
|
||||
|
||||
if len(availableNodes) > 0 {
|
||||
nodeIDSlice := make([]int64, 0)
|
||||
for nodeID := range availableNodes {
|
||||
nodeIDSlice = append(nodeIDSlice, nodeID)
|
||||
}
|
||||
|
||||
if len(availableNodeIDs) > 0 {
|
||||
log.Debug("shuffleSegmentsToQueryNode: shuffle segment to available QueryNode", zap.Int64s("available nodeIDs", availableNodeIDs))
|
||||
for _, req := range reqs {
|
||||
sort.Slice(nodeIDSlice, func(i, j int) bool {
|
||||
return nodeID2NumSegemnt[nodeIDSlice[i]] < nodeID2NumSegemnt[nodeIDSlice[j]]
|
||||
sort.Slice(availableNodeIDs, func(i, j int) bool {
|
||||
return nodeID2NumSegment[availableNodeIDs[i]] < nodeID2NumSegment[availableNodeIDs[j]]
|
||||
})
|
||||
req.DstNodeID = nodeIDSlice[0]
|
||||
nodeID2NumSegemnt[nodeIDSlice[0]]++
|
||||
selectedNodeID := availableNodeIDs[0]
|
||||
req.DstNodeID = selectedNodeID
|
||||
nodeID2NumSegment[selectedNodeID]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !wait {
|
||||
return errors.New("no queryNode to allocate")
|
||||
err := errors.New("no available queryNode to allocate")
|
||||
log.Error("shuffleSegmentsToQueryNode failed", zap.Int64s("online nodeIDs", onlineNodeIDs), zap.Int64s("exclude nodeIDs", excludeNodeIDs), zap.Int64s("include nodeIDs", includeNodeIDs), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
time.Sleep(shuffleWaitInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
|
||||
func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
|
||||
// key = offset, value = segmentSize
|
||||
if len(reqs) == 0 {
|
||||
return nil
|
||||
|
@ -131,51 +134,52 @@ func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegme
|
|||
totalMem := make(map[int64]uint64)
|
||||
memUsage := make(map[int64]uint64)
|
||||
memUsageRate := make(map[int64]float64)
|
||||
availableNodes, err := cluster.onlineNodes()
|
||||
if err != nil && !wait {
|
||||
return errors.New("no online queryNode to allocate")
|
||||
onlineNodeIDs := cluster.onlineNodeIDs()
|
||||
if len(onlineNodeIDs) == 0 && !wait {
|
||||
err := errors.New("no online queryNode to allocate")
|
||||
log.Error("shuffleSegmentsToQueryNode failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
for _, id := range excludeNodeIDs {
|
||||
delete(availableNodes, id)
|
||||
}
|
||||
for nodeID := range availableNodes {
|
||||
|
||||
var availableNodeIDs []int64
|
||||
for _, nodeID := range onlineNodeIDs {
|
||||
// nodeID not in includeNodeIDs
|
||||
if len(includeNodeIDs) > 0 && !nodeIncluded(nodeID, includeNodeIDs) {
|
||||
delete(availableNodes, nodeID)
|
||||
continue
|
||||
}
|
||||
|
||||
// nodeID in excludeNodeIDs
|
||||
if nodeIncluded(nodeID, excludeNodeIDs) {
|
||||
continue
|
||||
}
|
||||
// statistic nodeInfo, used memory, memory usage of every query node
|
||||
nodeInfo, err := cluster.getNodeInfoByID(nodeID)
|
||||
if err != nil {
|
||||
log.Debug("shuffleSegmentsToQueryNodeV2: getNodeInfoByID failed", zap.Error(err))
|
||||
delete(availableNodes, nodeID)
|
||||
log.Warn("shuffleSegmentsToQueryNodeV2: getNodeInfoByID failed", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
queryNodeInfo := nodeInfo.(*queryNode)
|
||||
// avoid allocate segment to node which memUsageRate is high
|
||||
if queryNodeInfo.memUsageRate >= Params.OverloadedMemoryThresholdPercentage {
|
||||
log.Debug("shuffleSegmentsToQueryNodeV2: queryNode memUsageRate large than MaxMemUsagePerNode", zap.Int64("nodeID", nodeID), zap.Float64("current rate", queryNodeInfo.memUsageRate))
|
||||
delete(availableNodes, nodeID)
|
||||
continue
|
||||
}
|
||||
|
||||
// update totalMem, memUsage, memUsageRate
|
||||
totalMem[nodeID], memUsage[nodeID], memUsageRate[nodeID] = queryNodeInfo.totalMem, queryNodeInfo.memUsage, queryNodeInfo.memUsageRate
|
||||
availableNodeIDs = append(availableNodeIDs, nodeID)
|
||||
}
|
||||
log.Debug("shuffleSegmentsToQueryNodeV2: num of availableNodes", zap.Int("size", len(availableNodes)))
|
||||
if len(availableNodes) > 0 {
|
||||
nodeIDSlice := make([]int64, 0, len(availableNodes))
|
||||
for nodeID := range availableNodes {
|
||||
nodeIDSlice = append(nodeIDSlice, nodeID)
|
||||
}
|
||||
allocateSegmentsDone := true
|
||||
if len(availableNodeIDs) > 0 {
|
||||
log.Debug("shuffleSegmentsToQueryNodeV2: shuffle segment to available QueryNode", zap.Int64s("available nodeIDs", availableNodeIDs))
|
||||
memoryInsufficient := false
|
||||
for offset, sizeOfReq := range dataSizePerReq {
|
||||
// sort nodes by memUsageRate, low to high
|
||||
sort.Slice(nodeIDSlice, func(i, j int) bool {
|
||||
return memUsageRate[nodeIDSlice[i]] < memUsageRate[nodeIDSlice[j]]
|
||||
sort.Slice(availableNodeIDs, func(i, j int) bool {
|
||||
return memUsageRate[availableNodeIDs[i]] < memUsageRate[availableNodeIDs[j]]
|
||||
})
|
||||
findNodeToAllocate := false
|
||||
// assign load segment request to query node which has least memUsageRate
|
||||
for _, nodeID := range nodeIDSlice {
|
||||
for _, nodeID := range availableNodeIDs {
|
||||
memUsageAfterLoad := memUsage[nodeID] + uint64(sizeOfReq)
|
||||
memUsageRateAfterLoad := float64(memUsageAfterLoad) / float64(totalMem[nodeID])
|
||||
if memUsageRateAfterLoad > Params.OverloadedMemoryThresholdPercentage {
|
||||
|
@ -189,23 +193,33 @@ func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegme
|
|||
}
|
||||
// the load segment request can't be allocated to any query node
|
||||
if !findNodeToAllocate {
|
||||
allocateSegmentsDone = false
|
||||
memoryInsufficient = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allocateSegmentsDone {
|
||||
// shuffle segment success
|
||||
if !memoryInsufficient {
|
||||
log.Debug("shuffleSegmentsToQueryNodeV2: shuffle segment to query node success")
|
||||
return nil
|
||||
}
|
||||
|
||||
// memory insufficient and wait == false
|
||||
if !wait {
|
||||
err := errors.New("shuffleSegmentsToQueryNodeV2: insufficient memory of available node")
|
||||
log.Error("shuffleSegmentsToQueryNode failed", zap.Int64s("online nodeIDs", onlineNodeIDs), zap.Int64s("exclude nodeIDs", excludeNodeIDs), zap.Int64s("include nodeIDs", includeNodeIDs), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// no available node to allocate and wait == false
|
||||
if !wait {
|
||||
err := errors.New("no available queryNode to allocate")
|
||||
log.Error("shuffleSegmentsToQueryNode failed", zap.Int64s("online nodeIDs", onlineNodeIDs), zap.Int64s("exclude nodeIDs", excludeNodeIDs), zap.Int64s("include nodeIDs", includeNodeIDs), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if wait {
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
} else {
|
||||
return errors.New("no queryNode to allocate")
|
||||
}
|
||||
time.Sleep(shuffleWaitInterval)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
|
|||
reqs := []*querypb.LoadSegmentsRequest{firstReq, secondReq}
|
||||
|
||||
t.Run("Test shuffleSegmentsWithoutQueryNode", func(t *testing.T) {
|
||||
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, false, nil, nil)
|
||||
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
|
@ -101,7 +101,7 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
|
|||
waitQueryNodeOnline(cluster, node1ID)
|
||||
|
||||
t.Run("Test shuffleSegmentsToQueryNode", func(t *testing.T) {
|
||||
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, false, nil, nil)
|
||||
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, node1ID, firstReq.DstNodeID)
|
||||
|
@ -117,7 +117,7 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
|
|||
cluster.stopNode(node1ID)
|
||||
|
||||
t.Run("Test shuffleSegmentsToQueryNodeV2", func(t *testing.T) {
|
||||
err = shuffleSegmentsToQueryNodeV2(baseCtx, reqs, cluster, false, nil, nil)
|
||||
err = shuffleSegmentsToQueryNodeV2(baseCtx, reqs, cluster, meta, false, nil, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, node2ID, firstReq.DstNodeID)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -324,6 +324,7 @@ func (scheduler *TaskScheduler) unmarshalTask(taskID UniqueID, t string) (task,
|
|||
baseTask: baseTask,
|
||||
ReleasePartitionsRequest: &loadReq,
|
||||
cluster: scheduler.cluster,
|
||||
meta: scheduler.meta,
|
||||
}
|
||||
newTask = releasePartitionTask
|
||||
case commonpb.MsgType_LoadSegments:
|
||||
|
|
|
@ -83,7 +83,8 @@ func (tt *testTask) execute(ctx context.Context) error {
|
|||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadSegments,
|
||||
},
|
||||
Infos: []*querypb.SegmentLoadInfo{segmentInfo},
|
||||
Infos: []*querypb.SegmentLoadInfo{segmentInfo},
|
||||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
loadTask := &loadSegmentTask{
|
||||
baseTask: &baseTask{
|
||||
|
@ -110,7 +111,8 @@ func (tt *testTask) execute(ctx context.Context) error {
|
|||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadSegments,
|
||||
},
|
||||
DstNodeID: tt.nodeID,
|
||||
DstNodeID: tt.nodeID,
|
||||
CollectionID: defaultCollectionID,
|
||||
},
|
||||
meta: tt.meta,
|
||||
cluster: tt.cluster,
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
)
|
||||
|
||||
func genLoadCollectionTask(ctx context.Context, queryCoord *QueryCoord) *loadCollectionTask {
|
||||
queryCoord.meta.setDeltaChannel(defaultCollectionID, nil)
|
||||
req := &querypb.LoadCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadCollection,
|
||||
|
@ -51,6 +52,7 @@ func genLoadCollectionTask(ctx context.Context, queryCoord *QueryCoord) *loadCol
|
|||
}
|
||||
|
||||
func genLoadPartitionTask(ctx context.Context, queryCoord *QueryCoord) *loadPartitionTask {
|
||||
queryCoord.meta.setDeltaChannel(defaultCollectionID, nil)
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadPartitions,
|
||||
|
@ -104,6 +106,7 @@ func genReleasePartitionTask(ctx context.Context, queryCoord *QueryCoord) *relea
|
|||
baseTask: baseTask,
|
||||
ReleasePartitionsRequest: req,
|
||||
cluster: queryCoord.cluster,
|
||||
meta: queryCoord.meta,
|
||||
}
|
||||
|
||||
return releasePartitionTask
|
||||
|
@ -178,10 +181,11 @@ func genWatchDmChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID i
|
|||
parentTask.addChildTask(watchDmChannelTask)
|
||||
watchDmChannelTask.setParentTask(parentTask)
|
||||
|
||||
queryCoord.meta.addCollection(defaultCollectionID, schema)
|
||||
queryCoord.meta.addCollection(defaultCollectionID, querypb.LoadType_loadCollection, schema)
|
||||
return watchDmChannelTask
|
||||
}
|
||||
func genLoadSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int64) *loadSegmentTask {
|
||||
queryCoord.meta.setDeltaChannel(defaultCollectionID, nil)
|
||||
schema := genCollectionSchema(defaultCollectionID, false)
|
||||
segmentInfo := &querypb.SegmentLoadInfo{
|
||||
SegmentID: defaultSegmentID,
|
||||
|
@ -231,7 +235,7 @@ func genLoadSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int6
|
|||
parentTask.addChildTask(loadSegmentTask)
|
||||
loadSegmentTask.setParentTask(parentTask)
|
||||
|
||||
queryCoord.meta.addCollection(defaultCollectionID, schema)
|
||||
queryCoord.meta.addCollection(defaultCollectionID, querypb.LoadType_loadCollection, schema)
|
||||
return loadSegmentTask
|
||||
}
|
||||
|
||||
|
@ -701,14 +705,15 @@ func Test_AssignInternalTask(t *testing.T) {
|
|||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadSegments,
|
||||
},
|
||||
DstNodeID: node1.queryNodeID,
|
||||
Schema: schema,
|
||||
Infos: []*querypb.SegmentLoadInfo{segmentInfo},
|
||||
DstNodeID: node1.queryNodeID,
|
||||
Schema: schema,
|
||||
Infos: []*querypb.SegmentLoadInfo{segmentInfo},
|
||||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
loadSegmentRequests = append(loadSegmentRequests, req)
|
||||
}
|
||||
|
||||
internalTasks, err := assignInternalTask(queryCoord.loopCtx, defaultCollectionID, loadCollectionTask, queryCoord.meta, queryCoord.cluster, loadSegmentRequests, nil, nil, false, nil, nil)
|
||||
internalTasks, err := assignInternalTask(queryCoord.loopCtx, loadCollectionTask, queryCoord.meta, queryCoord.cluster, loadSegmentRequests, nil, false, nil, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.NotEqual(t, 1, len(internalTasks))
|
||||
|
|
|
@ -497,8 +497,9 @@ func TestTask_loadSegmentsTask(t *testing.T) {
|
|||
genLoadEmptySegmentsRequest := func() *querypb.LoadSegmentsRequest {
|
||||
schema := genSimpleSegCoreSchema()
|
||||
req := &querypb.LoadSegmentsRequest{
|
||||
Base: genCommonMsgBase(commonpb.MsgType_LoadSegments),
|
||||
Schema: schema,
|
||||
Base: genCommonMsgBase(commonpb.MsgType_LoadSegments),
|
||||
CollectionID: defaultCollectionID,
|
||||
Schema: schema,
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue