mirror of https://github.com/milvus-io/milvus.git
Simplify flow graph in QueryNode (#13613)
Signed-off-by: bigsheeper <yihao.dai@zilliz.com>pull/13667/head
parent
63ebf76318
commit
1ca08d9138
|
@ -167,7 +167,7 @@ message WatchDmChannelsRequest {
|
|||
common.MsgBase base = 1;
|
||||
int64 nodeID = 2;
|
||||
int64 collectionID = 3;
|
||||
int64 partitionID = 4;
|
||||
repeated int64 partitionIDs = 4;
|
||||
repeated data.VchannelInfo infos = 5;
|
||||
schema.CollectionSchema schema = 6;
|
||||
repeated data.SegmentInfo exclude_infos = 7;
|
||||
|
|
|
@ -1116,7 +1116,7 @@ type WatchDmChannelsRequest struct {
|
|||
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
NodeID int64 `protobuf:"varint,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
|
||||
CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
PartitionID int64 `protobuf:"varint,4,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
|
||||
PartitionIDs []int64 `protobuf:"varint,4,rep,packed,name=partitionIDs,proto3" json:"partitionIDs,omitempty"`
|
||||
Infos []*datapb.VchannelInfo `protobuf:"bytes,5,rep,name=infos,proto3" json:"infos,omitempty"`
|
||||
Schema *schemapb.CollectionSchema `protobuf:"bytes,6,opt,name=schema,proto3" json:"schema,omitempty"`
|
||||
ExcludeInfos []*datapb.SegmentInfo `protobuf:"bytes,7,rep,name=exclude_infos,json=excludeInfos,proto3" json:"exclude_infos,omitempty"`
|
||||
|
@ -1171,11 +1171,11 @@ func (m *WatchDmChannelsRequest) GetCollectionID() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *WatchDmChannelsRequest) GetPartitionID() int64 {
|
||||
func (m *WatchDmChannelsRequest) GetPartitionIDs() []int64 {
|
||||
if m != nil {
|
||||
return m.PartitionID
|
||||
return m.PartitionIDs
|
||||
}
|
||||
return 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *WatchDmChannelsRequest) GetInfos() []*datapb.VchannelInfo {
|
||||
|
@ -2244,148 +2244,148 @@ func init() {
|
|||
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
|
||||
|
||||
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
|
||||
// 2254 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcf, 0x73, 0xdc, 0x48,
|
||||
0xf5, 0xb7, 0xe6, 0x97, 0x3d, 0x6f, 0xc6, 0x33, 0x72, 0x27, 0xf6, 0x4e, 0xe6, 0xbb, 0xc9, 0x66,
|
||||
0xb5, 0x9b, 0x6c, 0xbe, 0x59, 0xd6, 0x0e, 0x0e, 0x50, 0x6c, 0xc1, 0x1e, 0x62, 0x9b, 0x78, 0x4d,
|
||||
0x12, 0xc7, 0xc8, 0xce, 0x52, 0xa4, 0x52, 0x25, 0x34, 0xa3, 0xf6, 0x58, 0x15, 0x49, 0x3d, 0x51,
|
||||
0x6b, 0xd6, 0x76, 0xce, 0x1c, 0xe0, 0x40, 0xf1, 0x07, 0x40, 0x71, 0x5a, 0x0a, 0x52, 0xc5, 0x1e,
|
||||
0xb9, 0xe7, 0xc2, 0x9f, 0xc0, 0x95, 0xa2, 0x0a, 0xfe, 0x05, 0xee, 0x54, 0xb7, 0x5a, 0x1a, 0xfd,
|
||||
0x68, 0x79, 0xc6, 0x76, 0x79, 0x93, 0xa2, 0xb8, 0x49, 0xdd, 0xaf, 0xfb, 0xbd, 0x7e, 0x3f, 0x3f,
|
||||
0xdd, 0x0f, 0x16, 0x5e, 0x8c, 0xb0, 0x7f, 0x6c, 0xf4, 0x09, 0xf1, 0xad, 0xe5, 0xa1, 0x4f, 0x02,
|
||||
0x82, 0x90, 0x6b, 0x3b, 0x5f, 0x8e, 0x68, 0xf8, 0xb7, 0xcc, 0xe7, 0xbb, 0xcd, 0x3e, 0x71, 0x5d,
|
||||
0xe2, 0x85, 0x63, 0xdd, 0x66, 0x92, 0xa2, 0xdb, 0xb2, 0xbd, 0x00, 0xfb, 0x9e, 0xe9, 0x44, 0xb3,
|
||||
0xb4, 0x7f, 0x80, 0x5d, 0x53, 0xfc, 0xa9, 0x96, 0x19, 0x98, 0xc9, 0xfd, 0xbb, 0x0b, 0xb6, 0x67,
|
||||
0xe1, 0xa3, 0xe4, 0x90, 0xf6, 0x0b, 0x05, 0x96, 0x76, 0x0f, 0xc8, 0xe1, 0x3a, 0x71, 0x1c, 0xdc,
|
||||
0x0f, 0x6c, 0xe2, 0x51, 0x1d, 0xbf, 0x18, 0x61, 0x1a, 0xa0, 0x3b, 0x50, 0xe9, 0x99, 0x14, 0x77,
|
||||
0x94, 0xeb, 0xca, 0xad, 0xc6, 0xea, 0xbb, 0xcb, 0x29, 0xe1, 0x84, 0x54, 0x8f, 0xe8, 0x60, 0xcd,
|
||||
0xa4, 0x58, 0xe7, 0x94, 0x08, 0x41, 0xc5, 0xea, 0x6d, 0x6d, 0x74, 0x4a, 0xd7, 0x95, 0x5b, 0x65,
|
||||
0x9d, 0x7f, 0xa3, 0x0f, 0x61, 0xbe, 0x1f, 0xef, 0xbd, 0xb5, 0x41, 0x3b, 0xe5, 0xeb, 0xe5, 0x5b,
|
||||
0x65, 0x3d, 0x3d, 0xa8, 0xfd, 0x51, 0x81, 0x77, 0x72, 0x62, 0xd0, 0x21, 0xf1, 0x28, 0x46, 0x77,
|
||||
0xa1, 0x46, 0x03, 0x33, 0x18, 0x51, 0x21, 0xc9, 0xff, 0x49, 0x25, 0xd9, 0xe5, 0x24, 0xba, 0x20,
|
||||
0xcd, 0xb3, 0x2d, 0x49, 0xd8, 0xa2, 0x6f, 0xc3, 0x65, 0xdb, 0x7b, 0x84, 0x5d, 0xe2, 0x1f, 0x1b,
|
||||
0x43, 0xec, 0xf7, 0xb1, 0x17, 0x98, 0x03, 0x1c, 0xc9, 0x78, 0x29, 0x9a, 0xdb, 0x19, 0x4f, 0x69,
|
||||
0x7f, 0x50, 0x60, 0x91, 0x49, 0xba, 0x63, 0xfa, 0x81, 0x7d, 0x01, 0xfa, 0xd2, 0xa0, 0x99, 0x94,
|
||||
0xb1, 0x53, 0xe6, 0x73, 0xa9, 0x31, 0x46, 0x33, 0x8c, 0xd8, 0xb3, 0xb3, 0x55, 0xb8, 0xb8, 0xa9,
|
||||
0x31, 0xed, 0x2b, 0x61, 0xd8, 0xa4, 0x9c, 0xe7, 0x51, 0x68, 0x96, 0x67, 0x29, 0xcf, 0xf3, 0x2c,
|
||||
0xea, 0x7c, 0xad, 0xc0, 0xe2, 0x43, 0x62, 0x5a, 0x63, 0xc3, 0x7f, 0xf3, 0xea, 0xfc, 0x0c, 0x6a,
|
||||
0x61, 0xe0, 0x74, 0x2a, 0x9c, 0xd7, 0x8d, 0x34, 0x2f, 0x11, 0x54, 0x63, 0x09, 0x77, 0xf9, 0x80,
|
||||
0x2e, 0x16, 0x69, 0xbf, 0x53, 0xa0, 0xa3, 0x63, 0x07, 0x9b, 0x14, 0xbf, 0xc9, 0x53, 0x2c, 0x41,
|
||||
0xcd, 0x23, 0x16, 0xde, 0xda, 0xe0, 0xa7, 0x28, 0xeb, 0xe2, 0x4f, 0xfb, 0x97, 0xd0, 0xf0, 0x5b,
|
||||
0xee, 0xb0, 0x09, 0x2b, 0x54, 0xcf, 0x62, 0x85, 0xd7, 0x63, 0x2b, 0xbc, 0xed, 0x27, 0x1d, 0x5b,
|
||||
0xaa, 0x9a, 0xb2, 0xd4, 0xcf, 0xe0, 0xca, 0xba, 0x8f, 0xcd, 0x00, 0xff, 0x84, 0x65, 0xfe, 0xf5,
|
||||
0x03, 0xd3, 0xf3, 0xb0, 0x13, 0x1d, 0x21, 0xcb, 0x5c, 0x91, 0x30, 0xef, 0xc0, 0xec, 0xd0, 0x27,
|
||||
0x47, 0xc7, 0xb1, 0xdc, 0xd1, 0xaf, 0xf6, 0x27, 0x05, 0xba, 0xb2, 0xbd, 0xcf, 0x93, 0x11, 0x3e,
|
||||
0x80, 0x79, 0x51, 0xc2, 0xc2, 0xdd, 0x38, 0xcf, 0xba, 0xde, 0x7c, 0x91, 0xe0, 0x80, 0xee, 0xc0,
|
||||
0xe5, 0x90, 0xc8, 0xc7, 0x74, 0xe4, 0x04, 0x31, 0x6d, 0x99, 0xd3, 0x22, 0x3e, 0xa7, 0xf3, 0x29,
|
||||
0xb1, 0x42, 0x7b, 0xa5, 0xc0, 0x95, 0x4d, 0x1c, 0xc4, 0x46, 0x64, 0x5c, 0xf1, 0x5b, 0x9a, 0x64,
|
||||
0xbf, 0x56, 0xa0, 0x2b, 0x93, 0xf5, 0x3c, 0x6a, 0x7d, 0x0a, 0x4b, 0x31, 0x0f, 0xc3, 0xc2, 0xb4,
|
||||
0xef, 0xdb, 0x43, 0xee, 0xcc, 0x3c, 0xe5, 0x36, 0x56, 0x3f, 0x58, 0xce, 0xa3, 0x84, 0xe5, 0xac,
|
||||
0x04, 0x8b, 0xf1, 0x16, 0x1b, 0x89, 0x1d, 0xb4, 0x5f, 0x2b, 0xb0, 0xb8, 0x89, 0x83, 0x5d, 0x3c,
|
||||
0x70, 0xb1, 0x17, 0x6c, 0x79, 0xfb, 0xe4, 0xec, 0x7a, 0xbd, 0x06, 0x40, 0xc5, 0x3e, 0x71, 0x39,
|
||||
0x48, 0x8c, 0x4c, 0xa3, 0x63, 0x8e, 0x3e, 0xb2, 0xf2, 0x9c, 0x47, 0x77, 0xdf, 0x85, 0xaa, 0xed,
|
||||
0xed, 0x93, 0x48, 0x55, 0xef, 0xc9, 0x54, 0x95, 0x64, 0x16, 0x52, 0x6b, 0x7f, 0x29, 0xc3, 0xd2,
|
||||
0x3d, 0xcb, 0x92, 0x85, 0xdd, 0xe9, 0xf5, 0x32, 0x8e, 0xee, 0x52, 0x32, 0xba, 0xa7, 0xf2, 0xb9,
|
||||
0x5c, 0x48, 0x55, 0x4e, 0x11, 0x52, 0xd5, 0xa2, 0x90, 0x42, 0xdf, 0x83, 0x77, 0x06, 0x0e, 0xe9,
|
||||
0x99, 0x8e, 0x41, 0xb1, 0xe9, 0x60, 0xcb, 0x88, 0xcd, 0xd4, 0xa9, 0x71, 0xbb, 0x2d, 0x86, 0xd3,
|
||||
0xbb, 0x7c, 0x36, 0x52, 0xd0, 0x06, 0xda, 0x84, 0x79, 0x8a, 0xf1, 0x73, 0x63, 0x48, 0x28, 0xf7,
|
||||
0xa5, 0xce, 0x2c, 0xd7, 0x82, 0x96, 0xd6, 0x42, 0x0c, 0x42, 0x1f, 0xd1, 0xc1, 0x8e, 0xa0, 0xd4,
|
||||
0x9b, 0x6c, 0x61, 0xf4, 0x87, 0x9e, 0xc0, 0x92, 0x54, 0x00, 0xda, 0x99, 0x9b, 0xce, 0x50, 0x97,
|
||||
0x25, 0x02, 0x52, 0xed, 0x1f, 0x0a, 0x5c, 0xd1, 0xb1, 0x4b, 0xbe, 0xc4, 0xff, 0xad, 0xa6, 0xd3,
|
||||
0xfe, 0x59, 0x82, 0xa5, 0x9f, 0x9a, 0x41, 0xff, 0x60, 0xc3, 0x15, 0x43, 0xf4, 0xcd, 0x9c, 0xef,
|
||||
0x3a, 0x34, 0x12, 0xa9, 0x4f, 0x60, 0x8c, 0xe4, 0xd0, 0x38, 0xf8, 0xaa, 0x32, 0x9b, 0xb2, 0xcb,
|
||||
0xc8, 0xf2, 0x17, 0xe2, 0xc0, 0x89, 0xe0, 0x4b, 0xd4, 0xfd, 0xda, 0x19, 0xea, 0x3e, 0x5a, 0x87,
|
||||
0x79, 0x7c, 0xd4, 0x77, 0x46, 0x16, 0x36, 0x42, 0xee, 0xb3, 0x9c, 0xfb, 0x35, 0x09, 0xf7, 0xa4,
|
||||
0x43, 0x35, 0xc5, 0xa2, 0x2d, 0x9e, 0x00, 0x5e, 0x2b, 0x70, 0x25, 0xd4, 0x32, 0x76, 0x02, 0xf3,
|
||||
0xcd, 0x2a, 0x3a, 0x56, 0x63, 0xe5, 0x34, 0x6a, 0xd4, 0xbe, 0xaa, 0x40, 0x5b, 0x1c, 0x90, 0xa1,
|
||||
0x3d, 0x36, 0x85, 0xde, 0x85, 0xfa, 0x38, 0xd2, 0x43, 0xc0, 0x30, 0x1e, 0xc8, 0x5a, 0xb4, 0x94,
|
||||
0xb7, 0xe8, 0x34, 0xe2, 0x46, 0xe5, 0xb5, 0x92, 0x28, 0xaf, 0x57, 0x01, 0xf6, 0x9d, 0x11, 0x3d,
|
||||
0x30, 0x02, 0xdb, 0xc5, 0x02, 0xe4, 0xd4, 0xf9, 0xc8, 0x9e, 0xed, 0x62, 0x74, 0x0f, 0x9a, 0x3d,
|
||||
0xdb, 0x73, 0xc8, 0xc0, 0x18, 0x9a, 0xc1, 0x01, 0xe5, 0x39, 0x48, 0x6e, 0xb1, 0xfb, 0x36, 0x76,
|
||||
0xac, 0x35, 0x4e, 0xab, 0x37, 0xc2, 0x35, 0x3b, 0x6c, 0x09, 0xba, 0x06, 0x0d, 0x6f, 0xe4, 0x1a,
|
||||
0x64, 0xdf, 0xf0, 0xc9, 0x21, 0xe5, 0x79, 0xa9, 0xac, 0xd7, 0xbd, 0x91, 0xfb, 0x78, 0x5f, 0x27,
|
||||
0x87, 0x14, 0xfd, 0x10, 0xea, 0xac, 0x24, 0x50, 0x87, 0x0c, 0xa2, 0x1c, 0x33, 0x69, 0xff, 0xf1,
|
||||
0x02, 0xf4, 0x19, 0xd4, 0x2d, 0xe6, 0x08, 0x7c, 0x75, 0xbd, 0xd0, 0x0c, 0xdc, 0x59, 0x1e, 0x92,
|
||||
0x01, 0x37, 0xc3, 0x78, 0x05, 0xba, 0x09, 0xad, 0x3e, 0x71, 0x87, 0x26, 0x57, 0xd1, 0x7d, 0x9f,
|
||||
0xb8, 0x1d, 0xe0, 0x59, 0x36, 0x33, 0x8a, 0xde, 0x87, 0x26, 0xf6, 0xcc, 0x9e, 0xc3, 0x3c, 0xd7,
|
||||
0xc2, 0x47, 0x9d, 0xc6, 0x75, 0xe5, 0xd6, 0x9c, 0xde, 0x08, 0xc7, 0xb6, 0xd8, 0x10, 0x7a, 0x0c,
|
||||
0x6a, 0x78, 0x67, 0x67, 0x9a, 0x12, 0x0e, 0xde, 0xe4, 0x02, 0xdd, 0xc8, 0x26, 0x61, 0x0b, 0x1f,
|
||||
0x2d, 0xf3, 0x45, 0xf7, 0x6d, 0x07, 0x33, 0x2d, 0x71, 0xb1, 0x5a, 0x7c, 0x22, 0xfa, 0xa5, 0xda,
|
||||
0xab, 0x12, 0x5c, 0x62, 0xfe, 0x11, 0xe5, 0xd0, 0xb3, 0xfb, 0xf8, 0x55, 0x00, 0x8b, 0x06, 0x46,
|
||||
0xca, 0xcf, 0xeb, 0x16, 0x0d, 0xb6, 0x43, 0x57, 0xff, 0x34, 0x72, 0xe3, 0x72, 0x31, 0x6a, 0xc9,
|
||||
0xf8, 0x6b, 0x3e, 0x23, 0x9c, 0xe5, 0x3e, 0xc6, 0x32, 0x31, 0x25, 0x23, 0xbf, 0x8f, 0x8d, 0x14,
|
||||
0xca, 0x6e, 0x86, 0x83, 0xdb, 0xf2, 0x48, 0xac, 0x49, 0xd0, 0xc9, 0xdf, 0x15, 0x58, 0x12, 0x57,
|
||||
0x8a, 0xf3, 0xab, 0xab, 0x28, 0x25, 0x44, 0xf1, 0x53, 0x3e, 0x01, 0x9e, 0x56, 0xa6, 0x80, 0xa7,
|
||||
0x55, 0xc9, 0x45, 0x23, 0x0d, 0xd1, 0x6a, 0x59, 0x88, 0xa6, 0xfd, 0x46, 0x81, 0xa5, 0xcf, 0x4d,
|
||||
0xcf, 0x22, 0xfb, 0xfb, 0xe7, 0x3f, 0xe0, 0x3a, 0x34, 0xe9, 0x38, 0xc1, 0x4e, 0x0d, 0xc1, 0x52,
|
||||
0x8b, 0xb4, 0x5f, 0x96, 0x00, 0x31, 0x77, 0x58, 0x33, 0x1d, 0xd3, 0xeb, 0xe3, 0xb3, 0x4b, 0x73,
|
||||
0x03, 0x5a, 0x29, 0x27, 0x88, 0x1f, 0x80, 0x92, 0x5e, 0x40, 0xd1, 0x03, 0x68, 0xf5, 0x42, 0x56,
|
||||
0x86, 0x8f, 0x4d, 0x4a, 0x3c, 0x6e, 0x87, 0xd6, 0xea, 0x87, 0x32, 0xb1, 0xf7, 0x7c, 0x7b, 0x30,
|
||||
0xc0, 0xfe, 0x3a, 0xf1, 0xac, 0x10, 0xe4, 0xcc, 0xf7, 0x22, 0x31, 0xd9, 0x52, 0xf4, 0x1e, 0x34,
|
||||
0xc6, 0x11, 0x11, 0x5d, 0x18, 0x20, 0x0e, 0x09, 0x8a, 0x3e, 0x86, 0x85, 0x2c, 0x00, 0x8b, 0x0c,
|
||||
0xa7, 0xd2, 0x34, 0xf6, 0xa2, 0xda, 0x6f, 0x15, 0x40, 0x71, 0xd1, 0xe7, 0xc5, 0x89, 0xe7, 0xf4,
|
||||
0x69, 0xee, 0x81, 0xd7, 0x00, 0xa2, 0x0a, 0x21, 0x0e, 0x5e, 0xd7, 0x13, 0x23, 0xac, 0x2e, 0x58,
|
||||
0xd1, 0xce, 0xe2, 0x26, 0x36, 0x1e, 0x60, 0xf1, 0x13, 0x1e, 0xc1, 0x70, 0x88, 0x69, 0x61, 0x2b,
|
||||
0x72, 0xbf, 0x70, 0xf0, 0x21, 0x1f, 0xd3, 0xbe, 0x2e, 0x81, 0x9a, 0x04, 0x5d, 0x53, 0xcb, 0x76,
|
||||
0x31, 0xb7, 0xc6, 0x13, 0x10, 0x66, 0xe5, 0x1c, 0x08, 0x33, 0x8f, 0x80, 0xab, 0x67, 0x43, 0xc0,
|
||||
0xda, 0xef, 0x15, 0x68, 0x67, 0x2e, 0x69, 0xd9, 0x02, 0xac, 0xe4, 0x0b, 0xf0, 0xf7, 0xa1, 0xca,
|
||||
0xaa, 0x12, 0xe6, 0x4a, 0x6a, 0x65, 0xd9, 0xca, 0xae, 0x7e, 0x7a, 0xb8, 0x00, 0xad, 0xc0, 0x25,
|
||||
0xc9, 0x53, 0x9c, 0xc8, 0x32, 0x28, 0xff, 0x12, 0xa7, 0xfd, 0xb9, 0x02, 0x8d, 0x84, 0x3e, 0x26,
|
||||
0x60, 0x87, 0xac, 0xa5, 0x4b, 0x93, 0x11, 0x63, 0x39, 0x7f, 0xbc, 0x82, 0x27, 0x2b, 0x74, 0x05,
|
||||
0xe6, 0x5c, 0xec, 0x1a, 0xd4, 0x7e, 0x19, 0xa1, 0x87, 0x59, 0x17, 0xbb, 0xbb, 0xf6, 0x4b, 0xcc,
|
||||
0xa6, 0x58, 0xe1, 0xe7, 0x55, 0x3f, 0xcc, 0xd9, 0xb3, 0xde, 0xc8, 0xe5, 0x35, 0xff, 0x2a, 0x40,
|
||||
0x58, 0x2b, 0x3d, 0xd3, 0xc5, 0x1c, 0x12, 0xd4, 0xf5, 0x3a, 0x1f, 0xd9, 0x36, 0x5d, 0x8c, 0x3a,
|
||||
0x30, 0xcb, 0x7f, 0xb6, 0x36, 0x3a, 0x73, 0xe1, 0x42, 0xf1, 0x9b, 0x0e, 0x87, 0x7a, 0x36, 0x1c,
|
||||
0xa6, 0xad, 0xe6, 0x77, 0xe0, 0x52, 0x9f, 0xbf, 0xb0, 0x58, 0x6b, 0xc7, 0xeb, 0xf1, 0x94, 0x28,
|
||||
0xea, 0xb2, 0x29, 0x74, 0x9f, 0x39, 0x17, 0xd7, 0xa8, 0x11, 0x5a, 0xb9, 0xc9, 0xad, 0xfc, 0xbe,
|
||||
0xfc, 0xa6, 0x1b, 0x52, 0x86, 0x46, 0x8e, 0x92, 0x26, 0xff, 0xcb, 0xe1, 0x88, 0xf9, 0xe9, 0x70,
|
||||
0x44, 0xeb, 0x3c, 0x38, 0xe2, 0x6f, 0x65, 0x68, 0x8d, 0x2b, 0xf0, 0xd4, 0xd1, 0x3f, 0xcd, 0x2b,
|
||||
0xf2, 0x36, 0xa8, 0xe3, 0x07, 0x10, 0xae, 0x98, 0x13, 0x41, 0x44, 0xf6, 0xe9, 0xa3, 0x3d, 0xcc,
|
||||
0x84, 0xd9, 0x03, 0x98, 0x17, 0xf9, 0xc3, 0x48, 0x02, 0xeb, 0x9b, 0xb2, 0xcd, 0xf2, 0x09, 0x57,
|
||||
0x6f, 0x26, 0xc0, 0x36, 0x45, 0x9f, 0x42, 0x9d, 0x65, 0x45, 0x23, 0x38, 0x1e, 0x86, 0xbe, 0xd9,
|
||||
0xca, 0x96, 0xa3, 0x70, 0x23, 0x96, 0x26, 0xf7, 0x8e, 0x87, 0x58, 0x9f, 0x73, 0xc4, 0xd7, 0x79,
|
||||
0x2f, 0x3a, 0x77, 0x61, 0xd1, 0x0f, 0xc1, 0x88, 0x65, 0xa4, 0x74, 0x38, 0xcb, 0x75, 0x78, 0x39,
|
||||
0x9a, 0xdc, 0x49, 0xea, 0xb2, 0x20, 0x0d, 0xcc, 0x15, 0xa6, 0x81, 0x7f, 0x2b, 0xb0, 0x20, 0x5c,
|
||||
0x8d, 0x69, 0x62, 0xc0, 0x2f, 0x48, 0x2c, 0x69, 0x13, 0xcf, 0xb1, 0xbd, 0x18, 0x52, 0x09, 0xdb,
|
||||
0x86, 0x83, 0x02, 0x52, 0x7d, 0x0e, 0x6d, 0x41, 0x14, 0xe7, 0xde, 0x29, 0x31, 0x40, 0x2b, 0x5c,
|
||||
0x17, 0x67, 0xdd, 0x1b, 0xd0, 0x22, 0xfb, 0xfb, 0x49, 0x7e, 0x61, 0xf2, 0x98, 0x17, 0xa3, 0x82,
|
||||
0xe1, 0x8f, 0x41, 0x8d, 0xc8, 0x4e, 0x9b, 0xed, 0xdb, 0x62, 0x61, 0xfc, 0x94, 0xf0, 0x2b, 0x05,
|
||||
0x3a, 0xe9, 0xdc, 0x9f, 0x38, 0xfe, 0xe9, 0xe1, 0xc7, 0x0f, 0xd2, 0x0f, 0x51, 0x37, 0x4e, 0x90,
|
||||
0x67, 0xcc, 0x47, 0xe0, 0xdf, 0xdb, 0x2f, 0xa1, 0x95, 0x76, 0x6a, 0xd4, 0x84, 0xb9, 0x6d, 0x12,
|
||||
0xfc, 0xe8, 0xc8, 0xa6, 0x81, 0x3a, 0x83, 0x5a, 0x00, 0xdb, 0x24, 0xd8, 0xf1, 0x31, 0xc5, 0x5e,
|
||||
0xa0, 0x2a, 0x08, 0xa0, 0xf6, 0xd8, 0xdb, 0xb0, 0xe9, 0x73, 0xb5, 0x84, 0x2e, 0x89, 0x32, 0x63,
|
||||
0x3a, 0x5b, 0xc2, 0xb8, 0x6a, 0x99, 0x2d, 0x8f, 0xff, 0x2a, 0x48, 0x85, 0x66, 0x4c, 0xb2, 0xb9,
|
||||
0xf3, 0x44, 0xad, 0xa2, 0x3a, 0x54, 0xc3, 0xcf, 0xda, 0x6d, 0x0b, 0xd4, 0x2c, 0xcc, 0x61, 0x7b,
|
||||
0x3e, 0xf1, 0x1e, 0x78, 0xe4, 0x30, 0x1e, 0x52, 0x67, 0x50, 0x03, 0x66, 0x05, 0x74, 0x54, 0x15,
|
||||
0xd4, 0x86, 0x46, 0x02, 0xb5, 0xa9, 0x25, 0x36, 0xb0, 0xe9, 0x0f, 0xfb, 0x02, 0xbf, 0x85, 0x22,
|
||||
0x30, 0xab, 0x6d, 0x90, 0x43, 0x4f, 0xad, 0xdc, 0xbe, 0x07, 0x73, 0x51, 0x80, 0xb0, 0xd3, 0x84,
|
||||
0xbb, 0xb3, 0x3f, 0x75, 0x06, 0x2d, 0xc0, 0x7c, 0xaa, 0x5d, 0xa1, 0x2a, 0x08, 0x41, 0xcb, 0x49,
|
||||
0xf5, 0x88, 0xd4, 0xd2, 0xea, 0x5f, 0x1b, 0x00, 0x21, 0x00, 0x21, 0xc4, 0xb7, 0xd0, 0x10, 0xd0,
|
||||
0x26, 0x0e, 0x58, 0x72, 0x25, 0x5e, 0x94, 0x18, 0x29, 0xba, 0x53, 0x50, 0xa7, 0xf3, 0xa4, 0x42,
|
||||
0xd2, 0xee, 0xcd, 0x82, 0x15, 0x19, 0x72, 0x6d, 0x06, 0xb9, 0x9c, 0x23, 0xbb, 0xd0, 0xee, 0xd9,
|
||||
0xfd, 0xe7, 0x31, 0x72, 0x29, 0xe6, 0x98, 0x21, 0x8d, 0x38, 0x66, 0x92, 0x9a, 0xf8, 0xd9, 0x0d,
|
||||
0x7c, 0xdb, 0x1b, 0x44, 0xaf, 0xa1, 0xda, 0x0c, 0x7a, 0x01, 0x97, 0x37, 0x31, 0xe7, 0x6e, 0xd3,
|
||||
0xc0, 0xee, 0xd3, 0x88, 0xe1, 0x6a, 0x31, 0xc3, 0x1c, 0xf1, 0x29, 0x59, 0x3a, 0xd0, 0xce, 0xf4,
|
||||
0x64, 0xd1, 0x6d, 0xa9, 0x23, 0x4b, 0xfb, 0xc7, 0xdd, 0x8f, 0xa7, 0xa2, 0x8d, 0xb9, 0xd9, 0xd0,
|
||||
0x4a, 0xf7, 0x2b, 0xd1, 0xff, 0x17, 0x6d, 0x90, 0x6b, 0xf0, 0x74, 0x6f, 0x4f, 0x43, 0x1a, 0xb3,
|
||||
0x7a, 0x0a, 0xad, 0x74, 0x47, 0x4c, 0xce, 0x4a, 0xda, 0x35, 0xeb, 0x9e, 0xf4, 0x10, 0xad, 0xcd,
|
||||
0xa0, 0x9f, 0xc3, 0x42, 0xae, 0x0d, 0x85, 0xbe, 0x25, 0xdb, 0xbe, 0xa8, 0x5b, 0x35, 0x89, 0x83,
|
||||
0x90, 0x7e, 0xac, 0xc5, 0x62, 0xe9, 0x73, 0xfd, 0xc8, 0xe9, 0xa5, 0x4f, 0x6c, 0x7f, 0x92, 0xf4,
|
||||
0xa7, 0xe6, 0x30, 0x02, 0x94, 0x6f, 0x44, 0xa1, 0x4f, 0x64, 0x2c, 0x0a, 0x9b, 0x61, 0xdd, 0xe5,
|
||||
0x69, 0xc9, 0x63, 0x93, 0x8f, 0x78, 0xb4, 0x66, 0x11, 0xb8, 0x94, 0x6d, 0x61, 0xf3, 0x49, 0xce,
|
||||
0xb6, 0xb8, 0xff, 0x13, 0x3a, 0x75, 0xba, 0xbf, 0x21, 0xb7, 0x95, 0xb4, 0x27, 0x23, 0x77, 0x6a,
|
||||
0x79, 0xbb, 0x44, 0x9b, 0x41, 0x7b, 0xa9, 0x1c, 0x8c, 0x6e, 0x16, 0xf9, 0x44, 0xfa, 0x6a, 0x3d,
|
||||
0xc9, 0x5c, 0x06, 0xc0, 0x26, 0x0e, 0x1e, 0xe1, 0xc0, 0xb7, 0xfb, 0x34, 0xbb, 0xa9, 0xf8, 0x19,
|
||||
0x13, 0x44, 0x9b, 0x7e, 0x34, 0x91, 0x2e, 0x12, 0x7b, 0xf5, 0x15, 0x40, 0x9d, 0xdb, 0x8c, 0x95,
|
||||
0x87, 0xff, 0xa5, 0xf1, 0x0b, 0x48, 0xe3, 0xcf, 0xa0, 0x9d, 0x69, 0x6e, 0xc9, 0xd3, 0xb8, 0xbc,
|
||||
0x03, 0x36, 0xc9, 0x41, 0x7a, 0x80, 0xf2, 0x2d, 0x18, 0x79, 0x60, 0x15, 0xb6, 0x6a, 0x26, 0xf1,
|
||||
0x78, 0x06, 0xed, 0x4c, 0x0f, 0x44, 0x7e, 0x02, 0x79, 0xa3, 0x64, 0x8a, 0x13, 0xe4, 0xdf, 0xfe,
|
||||
0xe5, 0x27, 0x28, 0xec, 0x11, 0x4c, 0xe2, 0xf1, 0x05, 0x34, 0x93, 0xaf, 0xae, 0xe8, 0xa3, 0xa2,
|
||||
0xe8, 0xcc, 0xbc, 0xc3, 0xbd, 0xf9, 0x7c, 0x7d, 0xf1, 0xf5, 0xec, 0x19, 0xb4, 0x33, 0xaf, 0xac,
|
||||
0x72, 0xeb, 0xca, 0x9f, 0x62, 0x27, 0xed, 0xfe, 0x0d, 0x66, 0xe0, 0x8b, 0xce, 0x95, 0x6b, 0xdf,
|
||||
0x79, 0xba, 0x3a, 0xb0, 0x83, 0x83, 0x51, 0x8f, 0x9d, 0x72, 0x25, 0xa4, 0xfc, 0xc4, 0x26, 0xe2,
|
||||
0x6b, 0x25, 0x4a, 0x1a, 0x2b, 0x7c, 0xa7, 0x15, 0x2e, 0xed, 0xb0, 0xd7, 0xab, 0xf1, 0xdf, 0xbb,
|
||||
0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x68, 0xaa, 0xae, 0xcf, 0x71, 0x28, 0x00, 0x00,
|
||||
// 2251 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x39, 0x4f, 0x73, 0xdc, 0x48,
|
||||
0xf5, 0xd6, 0xfc, 0xb3, 0xe7, 0xcd, 0x78, 0x46, 0xee, 0xc4, 0xde, 0xc9, 0xfc, 0x36, 0xd9, 0xac,
|
||||
0x76, 0x9d, 0xcd, 0xcf, 0xcb, 0xda, 0xc1, 0x01, 0x8a, 0x2d, 0xd8, 0x43, 0x6c, 0x13, 0xaf, 0x49,
|
||||
0xe2, 0x18, 0xd9, 0x59, 0x8a, 0x54, 0xaa, 0x84, 0x66, 0xd4, 0x1e, 0xab, 0x22, 0xa9, 0x27, 0x6a,
|
||||
0xcd, 0xda, 0xce, 0x99, 0x03, 0x1c, 0x28, 0x3e, 0x00, 0x14, 0xa7, 0xa5, 0x20, 0x55, 0xec, 0x91,
|
||||
0x7b, 0x2e, 0x7c, 0x04, 0xae, 0x14, 0x07, 0xf8, 0x08, 0xdc, 0xa9, 0x6e, 0xb5, 0x34, 0xfa, 0xd3,
|
||||
0xb2, 0xc7, 0x76, 0x79, 0x93, 0xa2, 0xb8, 0x49, 0xaf, 0x5f, 0xbf, 0xf7, 0xfa, 0xfd, 0xef, 0x7e,
|
||||
0x30, 0xf7, 0x62, 0x84, 0xfd, 0x63, 0xa3, 0x4f, 0x88, 0x6f, 0x2d, 0x0f, 0x7d, 0x12, 0x10, 0x84,
|
||||
0x5c, 0xdb, 0xf9, 0x72, 0x44, 0xc3, 0xbf, 0x65, 0xbe, 0xde, 0x6d, 0xf6, 0x89, 0xeb, 0x12, 0x2f,
|
||||
0x84, 0x75, 0x9b, 0x49, 0x8c, 0x6e, 0xcb, 0xf6, 0x02, 0xec, 0x7b, 0xa6, 0x13, 0xad, 0xd2, 0xfe,
|
||||
0x01, 0x76, 0x4d, 0xf1, 0xa7, 0x5a, 0x66, 0x60, 0x26, 0xe9, 0x77, 0xe7, 0x6c, 0xcf, 0xc2, 0x47,
|
||||
0x49, 0x90, 0xf6, 0x0b, 0x05, 0x16, 0x76, 0x0f, 0xc8, 0xe1, 0x3a, 0x71, 0x1c, 0xdc, 0x0f, 0x6c,
|
||||
0xe2, 0x51, 0x1d, 0xbf, 0x18, 0x61, 0x1a, 0xa0, 0x3b, 0x50, 0xe9, 0x99, 0x14, 0x77, 0x94, 0x9b,
|
||||
0xca, 0xed, 0xc6, 0xea, 0xbb, 0xcb, 0x29, 0xe1, 0x84, 0x54, 0x8f, 0xe8, 0x60, 0xcd, 0xa4, 0x58,
|
||||
0xe7, 0x98, 0x08, 0x41, 0xc5, 0xea, 0x6d, 0x6d, 0x74, 0x4a, 0x37, 0x95, 0xdb, 0x65, 0x9d, 0x7f,
|
||||
0xa3, 0x0f, 0x61, 0xb6, 0x1f, 0xd3, 0xde, 0xda, 0xa0, 0x9d, 0xf2, 0xcd, 0xf2, 0xed, 0xb2, 0x9e,
|
||||
0x06, 0x6a, 0x7f, 0x54, 0xe0, 0x9d, 0x9c, 0x18, 0x74, 0x48, 0x3c, 0x8a, 0xd1, 0x5d, 0xa8, 0xd1,
|
||||
0xc0, 0x0c, 0x46, 0x54, 0x48, 0xf2, 0x7f, 0x52, 0x49, 0x76, 0x39, 0x8a, 0x2e, 0x50, 0xf3, 0x6c,
|
||||
0x4b, 0x12, 0xb6, 0xe8, 0xdb, 0x70, 0xd5, 0xf6, 0x1e, 0x61, 0x97, 0xf8, 0xc7, 0xc6, 0x10, 0xfb,
|
||||
0x7d, 0xec, 0x05, 0xe6, 0x00, 0x47, 0x32, 0x5e, 0x89, 0xd6, 0x76, 0xc6, 0x4b, 0xda, 0x1f, 0x14,
|
||||
0x98, 0x67, 0x92, 0xee, 0x98, 0x7e, 0x60, 0x5f, 0x82, 0xbe, 0x34, 0x68, 0x26, 0x65, 0xec, 0x94,
|
||||
0xf9, 0x5a, 0x0a, 0xc6, 0x70, 0x86, 0x11, 0x7b, 0x76, 0xb6, 0x0a, 0x17, 0x37, 0x05, 0xd3, 0xbe,
|
||||
0x12, 0x86, 0x4d, 0xca, 0x79, 0x11, 0x85, 0x66, 0x79, 0x96, 0xf2, 0x3c, 0xcf, 0xa3, 0xce, 0xd7,
|
||||
0x0a, 0xcc, 0x3f, 0x24, 0xa6, 0x35, 0x36, 0xfc, 0x37, 0xaf, 0xce, 0xcf, 0xa0, 0x16, 0x06, 0x4e,
|
||||
0xa7, 0xc2, 0x79, 0x2d, 0xa6, 0x79, 0x89, 0xa0, 0x1a, 0x4b, 0xb8, 0xcb, 0x01, 0xba, 0xd8, 0xa4,
|
||||
0xfd, 0x4e, 0x81, 0x8e, 0x8e, 0x1d, 0x6c, 0x52, 0xfc, 0x26, 0x4f, 0xb1, 0x00, 0x35, 0x8f, 0x58,
|
||||
0x78, 0x6b, 0x83, 0x9f, 0xa2, 0xac, 0x8b, 0x3f, 0xed, 0x9f, 0x42, 0xc3, 0x6f, 0xb9, 0xc3, 0x26,
|
||||
0xac, 0x50, 0x3d, 0x8f, 0x15, 0x5e, 0x8f, 0xad, 0xf0, 0xb6, 0x9f, 0x74, 0x6c, 0xa9, 0x6a, 0xca,
|
||||
0x52, 0x3f, 0x83, 0x6b, 0xeb, 0x3e, 0x36, 0x03, 0xfc, 0x13, 0x96, 0xf9, 0xd7, 0x0f, 0x4c, 0xcf,
|
||||
0xc3, 0x4e, 0x74, 0x84, 0x2c, 0x73, 0x45, 0xc2, 0xbc, 0x03, 0xd3, 0x43, 0x9f, 0x1c, 0x1d, 0xc7,
|
||||
0x72, 0x47, 0xbf, 0xda, 0x9f, 0x14, 0xe8, 0xca, 0x68, 0x5f, 0x24, 0x23, 0x7c, 0x00, 0xb3, 0xa2,
|
||||
0x84, 0x85, 0xd4, 0x38, 0xcf, 0xba, 0xde, 0x7c, 0x91, 0xe0, 0x80, 0xee, 0xc0, 0xd5, 0x10, 0xc9,
|
||||
0xc7, 0x74, 0xe4, 0x04, 0x31, 0x6e, 0x99, 0xe3, 0x22, 0xbe, 0xa6, 0xf3, 0x25, 0xb1, 0x43, 0x7b,
|
||||
0xa5, 0xc0, 0xb5, 0x4d, 0x1c, 0xc4, 0x46, 0x64, 0x5c, 0xf1, 0x5b, 0x9a, 0x64, 0xbf, 0x56, 0xa0,
|
||||
0x2b, 0x93, 0xf5, 0x22, 0x6a, 0x7d, 0x0a, 0x0b, 0x31, 0x0f, 0xc3, 0xc2, 0xb4, 0xef, 0xdb, 0x43,
|
||||
0xee, 0xcc, 0x3c, 0xe5, 0x36, 0x56, 0x3f, 0x58, 0xce, 0x77, 0x09, 0xcb, 0x59, 0x09, 0xe6, 0x63,
|
||||
0x12, 0x1b, 0x09, 0x0a, 0xda, 0xaf, 0x15, 0x98, 0xdf, 0xc4, 0xc1, 0x2e, 0x1e, 0xb8, 0xd8, 0x0b,
|
||||
0xb6, 0xbc, 0x7d, 0x72, 0x7e, 0xbd, 0xde, 0x00, 0xa0, 0x82, 0x4e, 0x5c, 0x0e, 0x12, 0x90, 0x49,
|
||||
0x74, 0xcc, 0xbb, 0x8f, 0xac, 0x3c, 0x17, 0xd1, 0xdd, 0x77, 0xa1, 0x6a, 0x7b, 0xfb, 0x24, 0x52,
|
||||
0xd5, 0x7b, 0x32, 0x55, 0x25, 0x99, 0x85, 0xd8, 0xda, 0x5f, 0xca, 0xb0, 0x70, 0xcf, 0xb2, 0x64,
|
||||
0x61, 0x77, 0x76, 0xbd, 0x8c, 0xa3, 0xbb, 0x94, 0x8c, 0xee, 0x89, 0x7c, 0x2e, 0x17, 0x52, 0x95,
|
||||
0x33, 0x84, 0x54, 0xb5, 0x28, 0xa4, 0xd0, 0xf7, 0xe0, 0x9d, 0x81, 0x43, 0x7a, 0xa6, 0x63, 0x50,
|
||||
0x6c, 0x3a, 0xd8, 0x32, 0x62, 0x33, 0x75, 0x6a, 0xdc, 0x6e, 0xf3, 0xe1, 0xf2, 0x2e, 0x5f, 0x8d,
|
||||
0x14, 0xb4, 0x81, 0x36, 0x61, 0x96, 0x62, 0xfc, 0xdc, 0x18, 0x12, 0xca, 0x7d, 0xa9, 0x33, 0xcd,
|
||||
0xb5, 0xa0, 0xa5, 0xb5, 0x10, 0x37, 0xa1, 0x8f, 0xe8, 0x60, 0x47, 0x60, 0xea, 0x4d, 0xb6, 0x31,
|
||||
0xfa, 0x43, 0x4f, 0x60, 0x41, 0x2a, 0x00, 0xed, 0xcc, 0x4c, 0x66, 0xa8, 0xab, 0x12, 0x01, 0xa9,
|
||||
0xf6, 0x0f, 0x05, 0xae, 0xe9, 0xd8, 0x25, 0x5f, 0xe2, 0xff, 0x56, 0xd3, 0x69, 0xff, 0x2a, 0xc1,
|
||||
0xc2, 0x4f, 0xcd, 0xa0, 0x7f, 0xb0, 0xe1, 0x0a, 0x10, 0x7d, 0x33, 0xe7, 0x9b, 0xa4, 0xb0, 0xc5,
|
||||
0xe1, 0x57, 0x95, 0x59, 0x95, 0x5d, 0x47, 0x96, 0xbf, 0x10, 0x47, 0x4e, 0x84, 0x5f, 0xa2, 0xf2,
|
||||
0xd7, 0xce, 0x51, 0xf9, 0xd1, 0x3a, 0xcc, 0xe2, 0xa3, 0xbe, 0x33, 0xb2, 0xb0, 0x11, 0x72, 0x9f,
|
||||
0xe6, 0xdc, 0x6f, 0x48, 0xb8, 0x27, 0x5d, 0xaa, 0x29, 0x36, 0x6d, 0xf1, 0x14, 0xf0, 0x5a, 0x81,
|
||||
0x6b, 0xa1, 0x9e, 0xb1, 0x13, 0x98, 0x6f, 0x56, 0xd5, 0xb1, 0x1a, 0x2b, 0x67, 0x51, 0xa3, 0xf6,
|
||||
0x55, 0x05, 0xda, 0xe2, 0x80, 0xac, 0xdf, 0x63, 0x4b, 0xe8, 0x5d, 0xa8, 0x8f, 0x63, 0x3d, 0x6c,
|
||||
0x19, 0xc6, 0x00, 0x74, 0x13, 0x1a, 0x09, 0xfb, 0x09, 0x49, 0x93, 0xa0, 0x89, 0xc4, 0x8d, 0x0a,
|
||||
0x6c, 0x25, 0x51, 0x60, 0xaf, 0x03, 0xec, 0x3b, 0x23, 0x7a, 0x60, 0x04, 0xb6, 0x8b, 0x45, 0x9b,
|
||||
0x53, 0xe7, 0x90, 0x3d, 0xdb, 0xc5, 0xe8, 0x1e, 0x34, 0x7b, 0xb6, 0xe7, 0x90, 0x81, 0x31, 0x34,
|
||||
0x83, 0x03, 0xca, 0xb3, 0x90, 0xdc, 0x62, 0xf7, 0x6d, 0xec, 0x58, 0x6b, 0x1c, 0x57, 0x6f, 0x84,
|
||||
0x7b, 0x76, 0xd8, 0x16, 0x74, 0x03, 0x1a, 0xde, 0xc8, 0x35, 0xc8, 0xbe, 0xe1, 0x93, 0x43, 0xca,
|
||||
0x33, 0x53, 0x59, 0xaf, 0x7b, 0x23, 0xf7, 0xf1, 0xbe, 0x4e, 0x0e, 0x29, 0xfa, 0x21, 0xd4, 0x59,
|
||||
0x51, 0xa0, 0x0e, 0x19, 0x44, 0x59, 0xe6, 0x34, 0xfa, 0xe3, 0x0d, 0xe8, 0x33, 0xa8, 0x5b, 0xcc,
|
||||
0x11, 0xf8, 0xee, 0x7a, 0xa1, 0x19, 0xb8, 0xb3, 0x3c, 0x24, 0x03, 0x6e, 0x86, 0xf1, 0x0e, 0x74,
|
||||
0x0b, 0x5a, 0x7d, 0xe2, 0x0e, 0x4d, 0xae, 0xa2, 0xfb, 0x3e, 0x71, 0x3b, 0xc0, 0xc3, 0x25, 0x03,
|
||||
0x45, 0xef, 0x43, 0x13, 0x7b, 0x66, 0xcf, 0x61, 0x9e, 0x6b, 0xe1, 0xa3, 0x4e, 0xe3, 0xa6, 0x72,
|
||||
0x7b, 0x46, 0x6f, 0x84, 0xb0, 0x2d, 0x06, 0x42, 0x8f, 0x41, 0x0d, 0x6f, 0xed, 0x4c, 0x53, 0xc2,
|
||||
0xc1, 0x9b, 0x5c, 0xa0, 0xc5, 0x6c, 0x1a, 0xb6, 0xf0, 0xd1, 0x32, 0xdf, 0x74, 0xdf, 0x76, 0x30,
|
||||
0xd3, 0x12, 0x17, 0xab, 0xc5, 0x17, 0xa2, 0x5f, 0xaa, 0xbd, 0x2a, 0xc1, 0x15, 0xe6, 0x1f, 0x51,
|
||||
0x16, 0x3d, 0xbf, 0x8f, 0x5f, 0x07, 0xb0, 0x68, 0x60, 0xa4, 0xfc, 0xbc, 0x6e, 0xd1, 0x60, 0x3b,
|
||||
0x74, 0xf5, 0x4f, 0x23, 0x37, 0x2e, 0x17, 0xf7, 0x2d, 0x19, 0x7f, 0xcd, 0x67, 0x84, 0xf3, 0xdc,
|
||||
0xc8, 0x58, 0x2e, 0xa6, 0x64, 0xe4, 0xf7, 0xb1, 0x91, 0xea, 0xb3, 0x9b, 0x21, 0x70, 0x5b, 0x1e,
|
||||
0x89, 0x35, 0x49, 0x7f, 0xf2, 0x77, 0x05, 0x16, 0xc4, 0xa5, 0xe2, 0xe2, 0xea, 0x2a, 0x4a, 0x09,
|
||||
0x51, 0xfc, 0x94, 0x4f, 0x68, 0x50, 0x2b, 0x13, 0x64, 0xe4, 0xaa, 0x24, 0x23, 0xa7, 0x9b, 0xb4,
|
||||
0x5a, 0xb6, 0x49, 0xd3, 0x7e, 0xa3, 0xc0, 0xc2, 0xe7, 0xa6, 0x67, 0x91, 0xfd, 0xfd, 0x8b, 0x1f,
|
||||
0x70, 0x1d, 0x9a, 0x74, 0x9c, 0x60, 0x27, 0x6e, 0xc2, 0x52, 0x9b, 0xb4, 0x5f, 0x96, 0x00, 0x31,
|
||||
0x77, 0x58, 0x33, 0x1d, 0xd3, 0xeb, 0xe3, 0xf3, 0x4b, 0xb3, 0x08, 0xad, 0x94, 0x13, 0xc4, 0x4f,
|
||||
0x40, 0x49, 0x2f, 0xa0, 0xe8, 0x01, 0xb4, 0x7a, 0x21, 0x2b, 0xc3, 0xc7, 0x26, 0x25, 0x1e, 0xb7,
|
||||
0x43, 0x6b, 0xf5, 0x43, 0x99, 0xd8, 0x7b, 0xbe, 0x3d, 0x18, 0x60, 0x7f, 0x9d, 0x78, 0x56, 0xd8,
|
||||
0xe6, 0xcc, 0xf6, 0x22, 0x31, 0xd9, 0x56, 0xf4, 0x1e, 0x34, 0xc6, 0x11, 0x11, 0xd5, 0x48, 0x88,
|
||||
0x43, 0x82, 0xa2, 0x8f, 0x61, 0x2e, 0xdb, 0x82, 0x45, 0x86, 0x53, 0x69, 0xba, 0xfb, 0xa2, 0xda,
|
||||
0x6f, 0x15, 0x40, 0x71, 0xd9, 0xe7, 0xc5, 0x89, 0xe7, 0xf4, 0x49, 0x6e, 0x82, 0x37, 0x00, 0xa2,
|
||||
0x0a, 0x21, 0x0e, 0x5e, 0xd7, 0x13, 0x10, 0x56, 0x17, 0xac, 0x88, 0xb2, 0xb8, 0x8b, 0x8d, 0x01,
|
||||
0x2c, 0x7e, 0xc2, 0x23, 0x18, 0x0e, 0x31, 0x2d, 0x6c, 0x45, 0xee, 0x17, 0x02, 0x1f, 0x72, 0x98,
|
||||
0xf6, 0x75, 0x09, 0xd4, 0x64, 0xdb, 0x35, 0xb1, 0x6c, 0x97, 0x73, 0x6f, 0x3c, 0xa1, 0xc7, 0xac,
|
||||
0x5c, 0xa0, 0xc7, 0xcc, 0xf7, 0xc0, 0xd5, 0xf3, 0xf5, 0xc0, 0xda, 0xef, 0x15, 0x68, 0x67, 0xae,
|
||||
0x69, 0xd9, 0x02, 0xac, 0xe4, 0x0b, 0xf0, 0xf7, 0xa1, 0xca, 0xaa, 0x12, 0xe6, 0x4a, 0x6a, 0x65,
|
||||
0xd9, 0xca, 0x2e, 0x7f, 0x7a, 0xb8, 0x01, 0xad, 0xc0, 0x15, 0xc9, 0x63, 0x9c, 0xc8, 0x32, 0x28,
|
||||
0xff, 0x16, 0xa7, 0xfd, 0xb9, 0x02, 0x8d, 0x84, 0x3e, 0x4e, 0xe9, 0x1d, 0xb2, 0x96, 0x2e, 0x49,
|
||||
0x2c, 0x9d, 0x39, 0x5e, 0x39, 0x7f, 0xbc, 0x82, 0x47, 0x2b, 0x74, 0x0d, 0x66, 0x5c, 0xec, 0x1a,
|
||||
0xd4, 0x7e, 0x19, 0x75, 0x0f, 0xd3, 0x2e, 0x76, 0x77, 0xed, 0x97, 0x98, 0x2d, 0xb1, 0xc2, 0xcf,
|
||||
0xab, 0x7e, 0x98, 0xb3, 0xa7, 0xbd, 0x91, 0xcb, 0x6b, 0xfe, 0x75, 0x80, 0xb0, 0x56, 0x7a, 0xa6,
|
||||
0x8b, 0x79, 0x4b, 0x50, 0xd7, 0xeb, 0x1c, 0xb2, 0x6d, 0xba, 0x18, 0x75, 0x60, 0x9a, 0xff, 0x6c,
|
||||
0x6d, 0x74, 0x66, 0xc2, 0x8d, 0xe2, 0x37, 0x1d, 0x0e, 0xf5, 0x6c, 0x38, 0x4c, 0x5a, 0xcd, 0xef,
|
||||
0xc0, 0x95, 0x3e, 0x7f, 0x63, 0xb1, 0xd6, 0x8e, 0xd7, 0xe3, 0x25, 0x51, 0xd4, 0x65, 0x4b, 0xe8,
|
||||
0x3e, 0x73, 0x2e, 0xae, 0x51, 0x23, 0xb4, 0x72, 0x93, 0x5b, 0xf9, 0x7d, 0xf9, 0x5d, 0x37, 0xc4,
|
||||
0x0c, 0x8d, 0x1c, 0x25, 0x4d, 0xfe, 0x97, 0xeb, 0x23, 0x66, 0x27, 0xeb, 0x23, 0x5a, 0x17, 0xe9,
|
||||
0x23, 0xfe, 0x56, 0x86, 0xd6, 0xb8, 0x02, 0x4f, 0x1c, 0xfd, 0x93, 0xbc, 0x23, 0x6f, 0x83, 0x3a,
|
||||
0x7e, 0x02, 0xe1, 0x8a, 0x39, 0xb1, 0x89, 0xc8, 0x3e, 0x7e, 0xb4, 0x87, 0x99, 0x30, 0x7b, 0x00,
|
||||
0xb3, 0x22, 0x7f, 0x18, 0xc9, 0xc6, 0xfa, 0x96, 0x8c, 0x58, 0x3e, 0xe1, 0xea, 0xcd, 0x44, 0xb3,
|
||||
0x4d, 0xd1, 0xa7, 0x50, 0x67, 0x59, 0xd1, 0x08, 0x8e, 0x87, 0xa1, 0x6f, 0xb6, 0xb2, 0xe5, 0x28,
|
||||
0x24, 0xc4, 0xd2, 0xe4, 0xde, 0xf1, 0x10, 0xeb, 0x33, 0x8e, 0xf8, 0xba, 0xe8, 0x45, 0xe7, 0x2e,
|
||||
0xcc, 0xfb, 0x61, 0x33, 0x62, 0x19, 0x29, 0x1d, 0x4e, 0x73, 0x1d, 0x5e, 0x8d, 0x16, 0x77, 0x92,
|
||||
0xba, 0x2c, 0x48, 0x03, 0x33, 0x85, 0x69, 0xe0, 0xdf, 0x0a, 0xcc, 0x09, 0x57, 0x63, 0x9a, 0x18,
|
||||
0xf0, 0x0b, 0x12, 0x4b, 0xda, 0xc4, 0x73, 0x6c, 0x2f, 0x6e, 0xa9, 0x84, 0x6d, 0x43, 0xa0, 0x68,
|
||||
0xa9, 0x3e, 0x87, 0xb6, 0x40, 0x8a, 0x73, 0xef, 0x84, 0x3d, 0x40, 0x2b, 0xdc, 0x17, 0x67, 0xdd,
|
||||
0x45, 0x68, 0x91, 0xfd, 0xfd, 0x24, 0xbf, 0x30, 0x79, 0xcc, 0x0a, 0xa8, 0x60, 0xf8, 0x63, 0x50,
|
||||
0x23, 0xb4, 0xb3, 0x66, 0xfb, 0xb6, 0xd8, 0x18, 0x3f, 0x26, 0xfc, 0x4a, 0x81, 0x4e, 0x3a, 0xf7,
|
||||
0x27, 0x8e, 0x7f, 0xf6, 0xf6, 0xe3, 0x07, 0xe9, 0xa7, 0xa8, 0xc5, 0x13, 0xe4, 0x19, 0xf3, 0x11,
|
||||
0xfd, 0xef, 0xd2, 0x4b, 0x68, 0xa5, 0x9d, 0x1a, 0x35, 0x61, 0x66, 0x9b, 0x04, 0x3f, 0x3a, 0xb2,
|
||||
0x69, 0xa0, 0x4e, 0xa1, 0x16, 0xc0, 0x36, 0x09, 0x76, 0x7c, 0x4c, 0xb1, 0x17, 0xa8, 0x0a, 0x02,
|
||||
0xa8, 0x3d, 0xf6, 0x36, 0x6c, 0xfa, 0x5c, 0x2d, 0xa1, 0x2b, 0xa2, 0xcc, 0x98, 0xce, 0x96, 0x30,
|
||||
0xae, 0x5a, 0x66, 0xdb, 0xe3, 0xbf, 0x0a, 0x52, 0xa1, 0x19, 0xa3, 0x6c, 0xee, 0x3c, 0x51, 0xab,
|
||||
0xa8, 0x0e, 0xd5, 0xf0, 0xb3, 0xb6, 0x64, 0x81, 0x9a, 0x6d, 0x73, 0x18, 0xcd, 0x27, 0xde, 0x03,
|
||||
0x8f, 0x1c, 0xc6, 0x20, 0x75, 0x0a, 0x35, 0x60, 0x5a, 0xb4, 0x8e, 0xaa, 0x82, 0xda, 0xd0, 0x48,
|
||||
0x74, 0x6d, 0x6a, 0x89, 0x01, 0x36, 0xfd, 0x61, 0x5f, 0xf4, 0x6f, 0xa1, 0x08, 0xcc, 0x6a, 0x1b,
|
||||
0xe4, 0xd0, 0x53, 0x2b, 0x4b, 0xf7, 0x60, 0x26, 0x0a, 0x10, 0x76, 0x9a, 0x90, 0x3a, 0xfb, 0x53,
|
||||
0xa7, 0xd0, 0x1c, 0xcc, 0xa6, 0x06, 0x16, 0xaa, 0x82, 0x10, 0xb4, 0x9c, 0xd4, 0x94, 0x48, 0x2d,
|
||||
0xad, 0xfe, 0xb5, 0x01, 0x10, 0x36, 0x20, 0x84, 0xf8, 0x16, 0x1a, 0x02, 0xda, 0xc4, 0x01, 0x4b,
|
||||
0xae, 0xc4, 0x8b, 0x12, 0x23, 0x45, 0x77, 0x0a, 0xea, 0x74, 0x1e, 0x55, 0x48, 0xda, 0xbd, 0x55,
|
||||
0xb0, 0x23, 0x83, 0xae, 0x4d, 0x21, 0x97, 0x73, 0x64, 0x17, 0xda, 0x3d, 0xbb, 0xff, 0x3c, 0xee,
|
||||
0x5c, 0x8a, 0x39, 0x66, 0x50, 0x23, 0x8e, 0x99, 0xa4, 0x26, 0x7e, 0x76, 0x03, 0xdf, 0xf6, 0x06,
|
||||
0xd1, 0x7b, 0xa8, 0x36, 0x85, 0x5e, 0xc0, 0xd5, 0x4d, 0xcc, 0xb9, 0xdb, 0x34, 0xb0, 0xfb, 0x34,
|
||||
0x62, 0xb8, 0x5a, 0xcc, 0x30, 0x87, 0x7c, 0x46, 0x96, 0x0e, 0xb4, 0x33, 0x53, 0x59, 0xb4, 0x24,
|
||||
0x75, 0x64, 0xe9, 0x04, 0xb9, 0xfb, 0xf1, 0x44, 0xb8, 0x31, 0x37, 0x1b, 0x5a, 0xe9, 0x89, 0x25,
|
||||
0xfa, 0xff, 0x22, 0x02, 0xb9, 0x11, 0x4f, 0x77, 0x69, 0x12, 0xd4, 0x98, 0xd5, 0x53, 0x68, 0xa5,
|
||||
0x67, 0x62, 0x72, 0x56, 0xd2, 0xb9, 0x59, 0xf7, 0xa4, 0xa7, 0x68, 0x6d, 0x0a, 0xfd, 0x1c, 0xe6,
|
||||
0x72, 0x83, 0x28, 0xf4, 0x2d, 0x19, 0xf9, 0xa2, 0x79, 0xd5, 0x69, 0x1c, 0x84, 0xf4, 0x63, 0x2d,
|
||||
0x16, 0x4b, 0x9f, 0x9b, 0x48, 0x4e, 0x2e, 0x7d, 0x82, 0xfc, 0x49, 0xd2, 0x9f, 0x99, 0xc3, 0x08,
|
||||
0x50, 0x7e, 0x14, 0x85, 0x3e, 0x91, 0xb1, 0x28, 0x1c, 0x87, 0x75, 0x97, 0x27, 0x45, 0x8f, 0x4d,
|
||||
0x3e, 0xe2, 0xd1, 0x9a, 0xed, 0xc0, 0xa5, 0x6c, 0x0b, 0xc7, 0x4f, 0x72, 0xb6, 0xc5, 0x13, 0xa0,
|
||||
0xd0, 0xa9, 0xd3, 0x13, 0x0e, 0xb9, 0xad, 0xa4, 0x53, 0x19, 0xb9, 0x53, 0xcb, 0x07, 0x26, 0xda,
|
||||
0x14, 0xda, 0x4b, 0xe5, 0x60, 0x74, 0xab, 0xc8, 0x27, 0xd2, 0x57, 0xeb, 0xd3, 0xcc, 0x65, 0x00,
|
||||
0x6c, 0xe2, 0xe0, 0x11, 0x0e, 0x7c, 0xbb, 0x4f, 0xb3, 0x44, 0xc5, 0xcf, 0x18, 0x21, 0x22, 0xfa,
|
||||
0xd1, 0xa9, 0x78, 0x91, 0xd8, 0xab, 0xaf, 0x00, 0xea, 0xdc, 0x66, 0xac, 0x3c, 0xfc, 0x2f, 0x8d,
|
||||
0x5f, 0x42, 0x1a, 0x7f, 0x06, 0xed, 0xcc, 0x78, 0x4b, 0x9e, 0xc6, 0xe5, 0x33, 0xb0, 0xd3, 0x1c,
|
||||
0xa4, 0x07, 0x28, 0x3f, 0x84, 0x91, 0x07, 0x56, 0xe1, 0xb0, 0xe6, 0x34, 0x1e, 0xcf, 0xa0, 0x9d,
|
||||
0x99, 0x82, 0xc8, 0x4f, 0x20, 0x1f, 0x95, 0x4c, 0x70, 0x82, 0xfc, 0xdb, 0xbf, 0xfc, 0x04, 0x85,
|
||||
0x33, 0x82, 0xd3, 0x78, 0x7c, 0x01, 0xcd, 0xe4, 0xab, 0x2b, 0xfa, 0xa8, 0x28, 0x3a, 0x33, 0xef,
|
||||
0x70, 0x6f, 0x3e, 0x5f, 0x5f, 0x7e, 0x3d, 0x7b, 0x06, 0xed, 0xcc, 0x2b, 0xab, 0xdc, 0xba, 0xf2,
|
||||
0xa7, 0xd8, 0xd3, 0xa8, 0x7f, 0x83, 0x19, 0xf8, 0xb2, 0x73, 0xe5, 0xda, 0x77, 0x9e, 0xae, 0x0e,
|
||||
0xec, 0xe0, 0x60, 0xd4, 0x63, 0xa7, 0x5c, 0x09, 0x31, 0x3f, 0xb1, 0x89, 0xf8, 0x5a, 0x89, 0x92,
|
||||
0xc6, 0x0a, 0xa7, 0xb4, 0xc2, 0xa5, 0x1d, 0xf6, 0x7a, 0x35, 0xfe, 0x7b, 0xf7, 0x3f, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0x04, 0xe5, 0x43, 0x04, 0x73, 0x28, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
|
|
@ -51,7 +51,7 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
|
|||
|
||||
firstReq := &querypb.WatchDmChannelsRequest{
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
Infos: []*datapb.VchannelInfo{
|
||||
{
|
||||
ChannelName: "test1",
|
||||
|
@ -60,7 +60,7 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
|
|||
}
|
||||
secondReq := &querypb.WatchDmChannelsRequest{
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
Infos: []*datapb.VchannelInfo{
|
||||
{
|
||||
ChannelName: "test2",
|
||||
|
|
|
@ -451,7 +451,7 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
|
|||
watchRequest := &querypb.WatchDmChannelsRequest{
|
||||
Base: msgBase,
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
PartitionIDs: toLoadPartitionIDs,
|
||||
Infos: []*datapb.VchannelInfo{info},
|
||||
Schema: lct.Schema,
|
||||
}
|
||||
|
@ -795,7 +795,7 @@ func (lpt *loadPartitionTask) execute(ctx context.Context) error {
|
|||
watchDmRequest := &querypb.WatchDmChannelsRequest{
|
||||
Base: msgBase,
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
Infos: []*datapb.VchannelInfo{info},
|
||||
Schema: lpt.Schema,
|
||||
}
|
||||
|
@ -1291,7 +1291,7 @@ func (wdt *watchDmChannelTask) reschedule(ctx context.Context) ([]task, error) {
|
|||
req := &querypb.WatchDmChannelsRequest{
|
||||
Base: msgBase,
|
||||
CollectionID: collectionID,
|
||||
PartitionID: wdt.PartitionID,
|
||||
PartitionIDs: wdt.PartitionIDs,
|
||||
Infos: []*datapb.VchannelInfo{info},
|
||||
Schema: wdt.Schema,
|
||||
ExcludeInfos: wdt.ExcludeInfos,
|
||||
|
@ -1801,7 +1801,7 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
|
|||
watchRequest := &querypb.WatchDmChannelsRequest{
|
||||
Base: msgBase,
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
Infos: []*datapb.VchannelInfo{channelInfo},
|
||||
Schema: schema,
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ func genWatchDmChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID i
|
|||
},
|
||||
NodeID: nodeID,
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
Schema: schema,
|
||||
Infos: []*datapb.VchannelInfo{vChannelInfo},
|
||||
}
|
||||
|
|
|
@ -55,14 +55,14 @@ func TestCollection_vChannel(t *testing.T) {
|
|||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
|
||||
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
|
||||
collection.addVChannels([]Channel{defaultVChannel})
|
||||
collection.addVChannels([]Channel{defaultVChannel})
|
||||
collection.addVChannels([]Channel{defaultDMLChannel})
|
||||
collection.addVChannels([]Channel{defaultDMLChannel})
|
||||
collection.addVChannels([]Channel{"TestCollection_addVChannel_channel"})
|
||||
|
||||
channels := collection.getVChannels()
|
||||
assert.Equal(t, 2, len(channels))
|
||||
|
||||
collection.removeVChannel(defaultVChannel)
|
||||
collection.removeVChannel(defaultDMLChannel)
|
||||
channels = collection.getVChannels()
|
||||
assert.Equal(t, 1, len(channels))
|
||||
}
|
||||
|
@ -72,14 +72,14 @@ func TestCollection_vDeltaChannel(t *testing.T) {
|
|||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
|
||||
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
|
||||
collection.addVDeltaChannels([]Channel{defaultHistoricalVChannel})
|
||||
collection.addVDeltaChannels([]Channel{defaultHistoricalVChannel})
|
||||
collection.addVDeltaChannels([]Channel{defaultDeltaChannel})
|
||||
collection.addVDeltaChannels([]Channel{defaultDeltaChannel})
|
||||
collection.addVDeltaChannels([]Channel{"TestCollection_addVDeltaChannel_channel"})
|
||||
|
||||
channels := collection.getVDeltaChannels()
|
||||
assert.Equal(t, 2, len(channels))
|
||||
|
||||
collection.removeVDeltaChannel(defaultHistoricalVChannel)
|
||||
collection.removeVDeltaChannel(defaultDeltaChannel)
|
||||
channels = collection.getVDeltaChannels()
|
||||
assert.Equal(t, 1, len(channels))
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ package querynode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
@ -28,22 +27,13 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
)
|
||||
|
||||
// loadType is load collection or load partition
|
||||
type loadType = int32
|
||||
|
||||
const (
|
||||
loadTypeCollection loadType = 0
|
||||
loadTypePartition loadType = 1
|
||||
)
|
||||
|
||||
// dataSyncService manages a lot of flow graphs for collections and partitions
|
||||
// dataSyncService manages a lot of flow graphs
|
||||
type dataSyncService struct {
|
||||
ctx context.Context
|
||||
|
||||
mu sync.Mutex // guards FlowGraphs
|
||||
collectionFlowGraphs map[UniqueID]map[Channel]*queryNodeFlowGraph // map[collectionID]flowGraphs
|
||||
collectionDeltaFlowGraphs map[UniqueID]map[Channel]*queryNodeFlowGraph
|
||||
partitionFlowGraphs map[UniqueID]map[Channel]*queryNodeFlowGraph // map[partitionID]flowGraphs
|
||||
mu sync.Mutex // guards FlowGraphs
|
||||
dmlChannel2FlowGraph map[Channel]*queryNodeFlowGraph
|
||||
deltaChannel2FlowGraph map[Channel]*queryNodeFlowGraph
|
||||
|
||||
streamingReplica ReplicaInterface
|
||||
historicalReplica ReplicaInterface
|
||||
|
@ -51,227 +41,142 @@ type dataSyncService struct {
|
|||
msFactory msgstream.Factory
|
||||
}
|
||||
|
||||
// collection flow graph
|
||||
// addCollectionFlowGraph add a collection flowGraph to collectionFlowGraphs
|
||||
func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID, vChannels []string) {
|
||||
// addFlowGraphsForDMLChannels add flowGraphs to dmlChannel2FlowGraph
|
||||
func (dsService *dataSyncService) addFlowGraphsForDMLChannels(collectionID UniqueID, dmlChannels []string) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
|
||||
dsService.collectionFlowGraphs[collectionID] = make(map[Channel]*queryNodeFlowGraph)
|
||||
}
|
||||
for _, vChannel := range vChannels {
|
||||
// collection flow graph doesn't need partition id
|
||||
partitionID := UniqueID(0)
|
||||
for _, channel := range dmlChannels {
|
||||
if _, ok := dsService.dmlChannel2FlowGraph[channel]; ok {
|
||||
log.Warn("dml flow graph has been existed",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", channel),
|
||||
)
|
||||
continue
|
||||
}
|
||||
newFlowGraph := newQueryNodeFlowGraph(dsService.ctx,
|
||||
loadTypeCollection,
|
||||
collectionID,
|
||||
partitionID,
|
||||
dsService.streamingReplica,
|
||||
dsService.tSafeReplica,
|
||||
vChannel,
|
||||
channel,
|
||||
dsService.msFactory)
|
||||
dsService.collectionFlowGraphs[collectionID][vChannel] = newFlowGraph
|
||||
log.Debug("add collection flow graph",
|
||||
dsService.dmlChannel2FlowGraph[channel] = newFlowGraph
|
||||
log.Debug("add DML flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", vChannel))
|
||||
zap.Any("channel", channel))
|
||||
}
|
||||
}
|
||||
|
||||
// collection flow graph
|
||||
// addCollectionFlowGraphDelta add a collection flowGraph to collectionFlowGraphs
|
||||
func (dsService *dataSyncService) addCollectionDeltaFlowGraph(collectionID UniqueID, vChannels []string) {
|
||||
// addFlowGraphsForDeltaChannels add flowGraphs to deltaChannel2FlowGraph
|
||||
func (dsService *dataSyncService) addFlowGraphsForDeltaChannels(collectionID UniqueID, deltaChannels []string) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionDeltaFlowGraphs[collectionID]; !ok {
|
||||
dsService.collectionDeltaFlowGraphs[collectionID] = make(map[Channel]*queryNodeFlowGraph)
|
||||
}
|
||||
for _, vChannel := range vChannels {
|
||||
// collection flow graph doesn't need partition id
|
||||
partitionID := UniqueID(0)
|
||||
for _, channel := range deltaChannels {
|
||||
if _, ok := dsService.deltaChannel2FlowGraph[channel]; ok {
|
||||
log.Warn("delta flow graph has been existed",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", channel),
|
||||
)
|
||||
continue
|
||||
}
|
||||
newFlowGraph := newQueryNodeDeltaFlowGraph(dsService.ctx,
|
||||
collectionID,
|
||||
partitionID,
|
||||
dsService.historicalReplica,
|
||||
dsService.tSafeReplica,
|
||||
vChannel,
|
||||
channel,
|
||||
dsService.msFactory)
|
||||
dsService.collectionDeltaFlowGraphs[collectionID][vChannel] = newFlowGraph
|
||||
log.Debug("add collection flow graph",
|
||||
dsService.deltaChannel2FlowGraph[channel] = newFlowGraph
|
||||
log.Debug("add delta flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", vChannel))
|
||||
zap.Any("channel", channel))
|
||||
}
|
||||
}
|
||||
|
||||
// getCollectionFlowGraphs returns the collection flowGraph by collectionID
|
||||
func (dsService *dataSyncService) getCollectionFlowGraphs(collectionID UniqueID, vChannels []string) (map[Channel]*queryNodeFlowGraph, error) {
|
||||
// getFlowGraphByDMLChannel returns the DML flowGraph by channel
|
||||
func (dsService *dataSyncService) getFlowGraphByDMLChannel(collectionID UniqueID, channel Channel) (*queryNodeFlowGraph, error) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
|
||||
return nil, errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
if _, ok := dsService.dmlChannel2FlowGraph[channel]; !ok {
|
||||
return nil, fmt.Errorf("DML flow graph doesn't existed, collectionID = %d", collectionID)
|
||||
}
|
||||
|
||||
tmpFGs := make(map[Channel]*queryNodeFlowGraph)
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID][channel]; ok {
|
||||
tmpFGs[channel] = dsService.collectionFlowGraphs[collectionID][channel]
|
||||
}
|
||||
}
|
||||
|
||||
return tmpFGs, nil
|
||||
// TODO: return clone?
|
||||
return dsService.dmlChannel2FlowGraph[channel], nil
|
||||
}
|
||||
|
||||
// getCollectionDeltaFlowGraphs returns the collection delta flowGraph by collectionID
|
||||
func (dsService *dataSyncService) getCollectionDeltaFlowGraphs(collectionID UniqueID, vChannels []string) (map[Channel]*queryNodeFlowGraph, error) {
|
||||
// getFlowGraphByDeltaChannel returns the delta flowGraph by channel
|
||||
func (dsService *dataSyncService) getFlowGraphByDeltaChannel(collectionID UniqueID, channel Channel) (*queryNodeFlowGraph, error) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionDeltaFlowGraphs[collectionID]; !ok {
|
||||
return nil, errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
if _, ok := dsService.deltaChannel2FlowGraph[channel]; !ok {
|
||||
return nil, fmt.Errorf("delta flow graph doesn't existed, collectionID = %d", collectionID)
|
||||
}
|
||||
|
||||
tmpFGs := make(map[Channel]*queryNodeFlowGraph)
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.collectionDeltaFlowGraphs[collectionID][channel]; ok {
|
||||
tmpFGs[channel] = dsService.collectionDeltaFlowGraphs[collectionID][channel]
|
||||
}
|
||||
}
|
||||
|
||||
return tmpFGs, nil
|
||||
// TODO: return clone?
|
||||
return dsService.deltaChannel2FlowGraph[channel], nil
|
||||
}
|
||||
|
||||
// startCollectionFlowGraph starts the collection flow graph by collectionID
|
||||
func (dsService *dataSyncService) startCollectionFlowGraph(collectionID UniqueID, vChannels []string) error {
|
||||
// startFlowGraphByDMLChannel starts the DML flow graph by channel
|
||||
func (dsService *dataSyncService) startFlowGraphByDMLChannel(collectionID UniqueID, channel Channel) error {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
|
||||
return errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
}
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID][channel]; ok {
|
||||
// start flow graph
|
||||
log.Debug("start collection flow graph", zap.Any("channel", channel))
|
||||
dsService.collectionFlowGraphs[collectionID][channel].flowGraph.Start()
|
||||
}
|
||||
if _, ok := dsService.dmlChannel2FlowGraph[channel]; !ok {
|
||||
return fmt.Errorf("DML flow graph doesn't existed, collectionID = %d", collectionID)
|
||||
}
|
||||
log.Debug("start DML flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", channel),
|
||||
)
|
||||
dsService.dmlChannel2FlowGraph[channel].flowGraph.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
// startCollectionDeltaFlowGraph would start the collection delta flow graph by collectionID
|
||||
func (dsService *dataSyncService) startCollectionDeltaFlowGraph(collectionID UniqueID, vChannels []string) error {
|
||||
// startFlowGraphForDeltaChannel would start the delta flow graph by channel
|
||||
func (dsService *dataSyncService) startFlowGraphForDeltaChannel(collectionID UniqueID, channel Channel) error {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionDeltaFlowGraphs[collectionID]; !ok {
|
||||
return errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
}
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.collectionDeltaFlowGraphs[collectionID][channel]; ok {
|
||||
// start flow graph
|
||||
log.Debug("start collection flow graph", zap.Any("channel", channel))
|
||||
dsService.collectionDeltaFlowGraphs[collectionID][channel].flowGraph.Start()
|
||||
}
|
||||
if _, ok := dsService.deltaChannel2FlowGraph[channel]; !ok {
|
||||
return fmt.Errorf("delta flow graph doesn't existed, collectionID = %d", collectionID)
|
||||
}
|
||||
log.Debug("start delta flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", channel),
|
||||
)
|
||||
dsService.deltaChannel2FlowGraph[channel].flowGraph.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeCollectionFlowGraph would remove the collection flow graph by collectionID
|
||||
func (dsService *dataSyncService) removeCollectionFlowGraph(collectionID UniqueID) {
|
||||
// removeFlowGraphsByDMLChannels would remove the DML flow graphs by channels
|
||||
func (dsService *dataSyncService) removeFlowGraphsByDMLChannels(channels []Channel) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; ok {
|
||||
for _, nodeFG := range dsService.collectionFlowGraphs[collectionID] {
|
||||
for _, channel := range channels {
|
||||
if _, ok := dsService.dmlChannel2FlowGraph[channel]; ok {
|
||||
// close flow graph
|
||||
nodeFG.close()
|
||||
dsService.dmlChannel2FlowGraph[channel].close()
|
||||
}
|
||||
dsService.collectionFlowGraphs[collectionID] = nil
|
||||
delete(dsService.dmlChannel2FlowGraph, channel)
|
||||
}
|
||||
delete(dsService.collectionFlowGraphs, collectionID)
|
||||
}
|
||||
|
||||
// removeCollectionDeltaFlowGraph would remove the collection delta flow graph by collectionID
|
||||
func (dsService *dataSyncService) removeCollectionDeltaFlowGraph(collectionID UniqueID) {
|
||||
// removeFlowGraphsByDeltaChannels would remove the delta flow graphs by channels
|
||||
func (dsService *dataSyncService) removeFlowGraphsByDeltaChannels(channels []Channel) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionDeltaFlowGraphs[collectionID]; ok {
|
||||
for _, nodeFG := range dsService.collectionDeltaFlowGraphs[collectionID] {
|
||||
for _, channel := range channels {
|
||||
if _, ok := dsService.deltaChannel2FlowGraph[channel]; ok {
|
||||
// close flow graph
|
||||
nodeFG.close()
|
||||
dsService.deltaChannel2FlowGraph[channel].close()
|
||||
}
|
||||
dsService.collectionDeltaFlowGraphs[collectionID] = nil
|
||||
delete(dsService.deltaChannel2FlowGraph, channel)
|
||||
}
|
||||
delete(dsService.collectionDeltaFlowGraphs, collectionID)
|
||||
}
|
||||
|
||||
// partition flow graph
|
||||
// addPartitionFlowGraph adds a partition flow graph to dataSyncService
|
||||
func (dsService *dataSyncService) addPartitionFlowGraph(collectionID UniqueID, partitionID UniqueID, vChannels []string) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
|
||||
dsService.partitionFlowGraphs[partitionID] = make(map[Channel]*queryNodeFlowGraph)
|
||||
}
|
||||
for _, vChannel := range vChannels {
|
||||
newFlowGraph := newQueryNodeFlowGraph(dsService.ctx,
|
||||
loadTypePartition,
|
||||
collectionID,
|
||||
partitionID,
|
||||
dsService.streamingReplica,
|
||||
dsService.tSafeReplica,
|
||||
vChannel,
|
||||
dsService.msFactory)
|
||||
dsService.partitionFlowGraphs[partitionID][vChannel] = newFlowGraph
|
||||
}
|
||||
}
|
||||
|
||||
// getPartitionFlowGraphs returns the partition flow graph by partitionID
|
||||
func (dsService *dataSyncService) getPartitionFlowGraphs(partitionID UniqueID, vChannels []string) (map[Channel]*queryNodeFlowGraph, error) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
|
||||
return nil, errors.New("partition flow graph doesn't existed, partitionID = " + fmt.Sprintln(partitionID))
|
||||
}
|
||||
|
||||
tmpFGs := make(map[Channel]*queryNodeFlowGraph)
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID][channel]; ok {
|
||||
tmpFGs[channel] = dsService.partitionFlowGraphs[partitionID][channel]
|
||||
}
|
||||
}
|
||||
|
||||
return tmpFGs, nil
|
||||
}
|
||||
|
||||
// startPartitionFlowGraph would start the partition flow graph
|
||||
func (dsService *dataSyncService) startPartitionFlowGraph(partitionID UniqueID, vChannels []string) error {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
|
||||
return errors.New("partition flow graph doesn't existed, partitionID = " + fmt.Sprintln(partitionID))
|
||||
}
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID][channel]; ok {
|
||||
// start flow graph
|
||||
log.Debug("start partition flow graph", zap.Any("channel", channel))
|
||||
dsService.partitionFlowGraphs[partitionID][channel].flowGraph.Start()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removePartitionFlowGraph removes the partition flow graph from dataSyncService by partitionID
|
||||
func (dsService *dataSyncService) removePartitionFlowGraph(partitionID UniqueID) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
delete(dsService.partitionFlowGraphs, partitionID)
|
||||
}
|
||||
|
||||
// newDataSyncService returns a new dataSyncService
|
||||
|
@ -282,35 +187,30 @@ func newDataSyncService(ctx context.Context,
|
|||
factory msgstream.Factory) *dataSyncService {
|
||||
|
||||
return &dataSyncService{
|
||||
ctx: ctx,
|
||||
collectionFlowGraphs: make(map[UniqueID]map[Channel]*queryNodeFlowGraph),
|
||||
collectionDeltaFlowGraphs: map[int64]map[string]*queryNodeFlowGraph{},
|
||||
partitionFlowGraphs: make(map[UniqueID]map[Channel]*queryNodeFlowGraph),
|
||||
streamingReplica: streamingReplica,
|
||||
historicalReplica: historicalReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
msFactory: factory,
|
||||
ctx: ctx,
|
||||
dmlChannel2FlowGraph: make(map[Channel]*queryNodeFlowGraph),
|
||||
deltaChannel2FlowGraph: make(map[Channel]*queryNodeFlowGraph),
|
||||
streamingReplica: streamingReplica,
|
||||
historicalReplica: historicalReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
msFactory: factory,
|
||||
}
|
||||
}
|
||||
|
||||
// close would close and remove all flow graphs in dataSyncService
|
||||
func (dsService *dataSyncService) close() {
|
||||
// close collection flow graphs
|
||||
for _, nodeFGs := range dsService.collectionFlowGraphs {
|
||||
for _, nodeFG := range nodeFGs {
|
||||
if nodeFG != nil {
|
||||
nodeFG.flowGraph.Close()
|
||||
}
|
||||
// close DML flow graphs
|
||||
for channel, nodeFG := range dsService.dmlChannel2FlowGraph {
|
||||
if nodeFG != nil {
|
||||
nodeFG.flowGraph.Close()
|
||||
}
|
||||
delete(dsService.dmlChannel2FlowGraph, channel)
|
||||
}
|
||||
// close partition flow graphs
|
||||
for _, nodeFGs := range dsService.partitionFlowGraphs {
|
||||
for _, nodeFG := range nodeFGs {
|
||||
if nodeFG != nil {
|
||||
nodeFG.flowGraph.Close()
|
||||
}
|
||||
// close delta flow graphs
|
||||
for channel, nodeFG := range dsService.deltaChannel2FlowGraph {
|
||||
if nodeFG != nil {
|
||||
nodeFG.flowGraph.Close()
|
||||
}
|
||||
delete(dsService.deltaChannel2FlowGraph, channel)
|
||||
}
|
||||
dsService.collectionFlowGraphs = make(map[UniqueID]map[Channel]*queryNodeFlowGraph)
|
||||
dsService.partitionFlowGraphs = make(map[UniqueID]map[Channel]*queryNodeFlowGraph)
|
||||
}
|
||||
|
|
|
@ -18,126 +18,16 @@ package querynode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
)
|
||||
|
||||
// NOTE: start pulsar before test
|
||||
func TestDataSyncService_Start(t *testing.T) {
|
||||
collectionID := UniqueID(0)
|
||||
|
||||
node := newQueryNodeMock()
|
||||
initTestMeta(t, node, 0, 0)
|
||||
// test data generate
|
||||
const msgLength = 10
|
||||
const DIM = 16
|
||||
const N = 10
|
||||
|
||||
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
var rawData []byte
|
||||
for _, ele := range vec {
|
||||
buf := make([]byte, 4)
|
||||
common.Endian.PutUint32(buf, math.Float32bits(ele))
|
||||
rawData = append(rawData, buf...)
|
||||
}
|
||||
bs := make([]byte, 4)
|
||||
common.Endian.PutUint32(bs, 1)
|
||||
rawData = append(rawData, bs...)
|
||||
var records []*commonpb.Blob
|
||||
for i := 0; i < N; i++ {
|
||||
blob := &commonpb.Blob{
|
||||
Value: rawData,
|
||||
}
|
||||
records = append(records, blob)
|
||||
}
|
||||
// messages generate
|
||||
insertMessages := make([]msgstream.TsMsg, 0)
|
||||
for i := 0; i < msgLength; i++ {
|
||||
var msg msgstream.TsMsg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{
|
||||
uint32(i), uint32(i),
|
||||
},
|
||||
},
|
||||
InsertRequest: internalpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
SegmentID: UniqueID(0),
|
||||
ShardName: "0",
|
||||
Timestamps: []Timestamp{Timestamp(i + 1000), Timestamp(i + 1000)},
|
||||
RowIDs: []int64{int64(i), int64(i)},
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
},
|
||||
},
|
||||
}
|
||||
insertMessages = append(insertMessages, msg)
|
||||
}
|
||||
|
||||
// generate timeTick
|
||||
timeTickMsgPack := msgstream.MsgPack{}
|
||||
baseMsg := msgstream.BaseMsg{
|
||||
BeginTimestamp: 0,
|
||||
EndTimestamp: 0,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
timeTickResult := internalpb.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_TimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: math.MaxUint64,
|
||||
SourceID: 0,
|
||||
},
|
||||
}
|
||||
timeTickMsg := &msgstream.TimeTickMsg{
|
||||
BaseMsg: baseMsg,
|
||||
TimeTickMsg: timeTickResult,
|
||||
}
|
||||
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
|
||||
|
||||
// pulsar produce
|
||||
const receiveBufSize = 1024
|
||||
pulsarURL := Params.PulsarAddress
|
||||
|
||||
msFactory := msgstream.NewPmsFactory()
|
||||
m := map[string]interface{}{
|
||||
"receiveBufSize": receiveBufSize,
|
||||
"pulsarAddress": pulsarURL,
|
||||
"pulsarBufSize": 1024}
|
||||
err := msFactory.SetParams(m)
|
||||
assert.Nil(t, err)
|
||||
|
||||
channels := []Channel{"0"}
|
||||
node.dataSyncService.addCollectionFlowGraph(collectionID, channels)
|
||||
err = node.dataSyncService.startCollectionFlowGraph(collectionID, channels)
|
||||
assert.NoError(t, err)
|
||||
|
||||
<-node.queryNodeLoopCtx.Done()
|
||||
node.dataSyncService.close()
|
||||
|
||||
err = node.Stop()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDataSyncService_collectionFlowGraphs(t *testing.T) {
|
||||
func TestDataSyncService_DMLFlowGraphs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
streaming, err := genSimpleReplica()
|
||||
streamingReplica, err := genSimpleReplica()
|
||||
assert.NoError(t, err)
|
||||
|
||||
historicalReplica, err := genSimpleReplica()
|
||||
|
@ -147,44 +37,48 @@ func TestDataSyncService_collectionFlowGraphs(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
tSafe := newTSafeReplica()
|
||||
dataSyncService := newDataSyncService(ctx, streaming, historicalReplica, tSafe, fac)
|
||||
dataSyncService := newDataSyncService(ctx, streamingReplica, historicalReplica, tSafe, fac)
|
||||
assert.NotNil(t, dataSyncService)
|
||||
|
||||
dataSyncService.addCollectionFlowGraph(defaultCollectionID, []Channel{defaultVChannel})
|
||||
dataSyncService.addFlowGraphsForDMLChannels(defaultCollectionID, []Channel{defaultDMLChannel})
|
||||
assert.Len(t, dataSyncService.dmlChannel2FlowGraph, 1)
|
||||
|
||||
fg, err := dataSyncService.getCollectionFlowGraphs(defaultCollectionID, []Channel{defaultVChannel})
|
||||
dataSyncService.addFlowGraphsForDMLChannels(defaultCollectionID, []Channel{defaultDMLChannel})
|
||||
assert.Len(t, dataSyncService.dmlChannel2FlowGraph, 1)
|
||||
|
||||
fg, err := dataSyncService.getFlowGraphByDMLChannel(defaultCollectionID, defaultDMLChannel)
|
||||
assert.NotNil(t, fg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fg))
|
||||
|
||||
fg, err = dataSyncService.getCollectionFlowGraphs(UniqueID(1000), []Channel{defaultVChannel})
|
||||
fg, err = dataSyncService.getFlowGraphByDMLChannel(defaultCollectionID, "invalid-vChannel")
|
||||
assert.Nil(t, fg)
|
||||
assert.Error(t, err)
|
||||
|
||||
fg, err = dataSyncService.getCollectionFlowGraphs(defaultCollectionID, []Channel{"invalid-vChannel"})
|
||||
assert.NotNil(t, fg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(fg))
|
||||
|
||||
fg, err = dataSyncService.getCollectionFlowGraphs(UniqueID(1000), []Channel{"invalid-vChannel"})
|
||||
assert.Nil(t, fg)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = dataSyncService.startCollectionFlowGraph(defaultCollectionID, []Channel{defaultVChannel})
|
||||
err = dataSyncService.startFlowGraphByDMLChannel(defaultCollectionID, defaultDMLChannel)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dataSyncService.removeCollectionFlowGraph(defaultCollectionID)
|
||||
err = dataSyncService.startFlowGraphByDMLChannel(defaultCollectionID, "invalid-vChannel")
|
||||
assert.Error(t, err)
|
||||
|
||||
fg, err = dataSyncService.getCollectionFlowGraphs(defaultCollectionID, []Channel{defaultVChannel})
|
||||
dataSyncService.removeFlowGraphsByDMLChannels([]Channel{defaultDMLChannel})
|
||||
assert.Len(t, dataSyncService.dmlChannel2FlowGraph, 0)
|
||||
|
||||
fg, err = dataSyncService.getFlowGraphByDMLChannel(defaultCollectionID, defaultDMLChannel)
|
||||
assert.Nil(t, fg)
|
||||
assert.Error(t, err)
|
||||
|
||||
dataSyncService.addFlowGraphsForDMLChannels(defaultCollectionID, []Channel{defaultDMLChannel})
|
||||
assert.Len(t, dataSyncService.dmlChannel2FlowGraph, 1)
|
||||
|
||||
dataSyncService.close()
|
||||
assert.Len(t, dataSyncService.dmlChannel2FlowGraph, 0)
|
||||
}
|
||||
|
||||
func TestDataSyncService_partitionFlowGraphs(t *testing.T) {
|
||||
func TestDataSyncService_DeltaFlowGraphs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
streaming, err := genSimpleReplica()
|
||||
streamingReplica, err := genSimpleReplica()
|
||||
assert.NoError(t, err)
|
||||
|
||||
historicalReplica, err := genSimpleReplica()
|
||||
|
@ -194,63 +88,39 @@ func TestDataSyncService_partitionFlowGraphs(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
tSafe := newTSafeReplica()
|
||||
|
||||
dataSyncService := newDataSyncService(ctx, streaming, historicalReplica, tSafe, fac)
|
||||
dataSyncService := newDataSyncService(ctx, streamingReplica, historicalReplica, tSafe, fac)
|
||||
assert.NotNil(t, dataSyncService)
|
||||
|
||||
dataSyncService.addPartitionFlowGraph(defaultPartitionID, defaultPartitionID, []Channel{defaultVChannel})
|
||||
dataSyncService.addFlowGraphsForDeltaChannels(defaultCollectionID, []Channel{defaultDeltaChannel})
|
||||
assert.Len(t, dataSyncService.deltaChannel2FlowGraph, 1)
|
||||
|
||||
fg, err := dataSyncService.getPartitionFlowGraphs(defaultPartitionID, []Channel{defaultVChannel})
|
||||
dataSyncService.addFlowGraphsForDeltaChannels(defaultCollectionID, []Channel{defaultDeltaChannel})
|
||||
assert.Len(t, dataSyncService.deltaChannel2FlowGraph, 1)
|
||||
|
||||
fg, err := dataSyncService.getFlowGraphByDeltaChannel(defaultCollectionID, defaultDeltaChannel)
|
||||
assert.NotNil(t, fg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fg))
|
||||
|
||||
fg, err = dataSyncService.getPartitionFlowGraphs(UniqueID(1000), []Channel{defaultVChannel})
|
||||
fg, err = dataSyncService.getFlowGraphByDeltaChannel(defaultCollectionID, "invalid-vChannel")
|
||||
assert.Nil(t, fg)
|
||||
assert.Error(t, err)
|
||||
|
||||
fg, err = dataSyncService.getPartitionFlowGraphs(defaultPartitionID, []Channel{"invalid-vChannel"})
|
||||
assert.NotNil(t, fg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(fg))
|
||||
|
||||
fg, err = dataSyncService.getPartitionFlowGraphs(UniqueID(1000), []Channel{"invalid-vChannel"})
|
||||
assert.Nil(t, fg)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = dataSyncService.startPartitionFlowGraph(defaultPartitionID, []Channel{defaultVChannel})
|
||||
err = dataSyncService.startFlowGraphForDeltaChannel(defaultCollectionID, defaultDeltaChannel)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dataSyncService.removePartitionFlowGraph(defaultPartitionID)
|
||||
err = dataSyncService.startFlowGraphForDeltaChannel(defaultCollectionID, "invalid-vChannel")
|
||||
assert.Error(t, err)
|
||||
|
||||
fg, err = dataSyncService.getPartitionFlowGraphs(defaultPartitionID, []Channel{defaultVChannel})
|
||||
dataSyncService.removeFlowGraphsByDeltaChannels([]Channel{defaultDeltaChannel})
|
||||
assert.Len(t, dataSyncService.deltaChannel2FlowGraph, 0)
|
||||
|
||||
fg, err = dataSyncService.getFlowGraphByDeltaChannel(defaultCollectionID, defaultDeltaChannel)
|
||||
assert.Nil(t, fg)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDataSyncService_removePartitionFlowGraphs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
t.Run("test no tSafe", func(t *testing.T) {
|
||||
streaming, err := genSimpleReplica()
|
||||
assert.NoError(t, err)
|
||||
|
||||
historicalReplica, err := genSimpleReplica()
|
||||
assert.NoError(t, err)
|
||||
|
||||
fac, err := genFactory()
|
||||
assert.NoError(t, err)
|
||||
|
||||
tSafe := newTSafeReplica()
|
||||
tSafe.addTSafe(defaultVChannel)
|
||||
|
||||
dataSyncService := newDataSyncService(ctx, streaming, historicalReplica, tSafe, fac)
|
||||
assert.NotNil(t, dataSyncService)
|
||||
|
||||
dataSyncService.addPartitionFlowGraph(defaultPartitionID, defaultPartitionID, []Channel{defaultVChannel})
|
||||
|
||||
dataSyncService.tSafeReplica.removeTSafe(defaultVChannel)
|
||||
dataSyncService.removePartitionFlowGraph(defaultPartitionID)
|
||||
})
|
||||
|
||||
dataSyncService.addFlowGraphsForDeltaChannels(defaultCollectionID, []Channel{defaultDMLChannel})
|
||||
assert.Len(t, dataSyncService.deltaChannel2FlowGraph, 1)
|
||||
|
||||
dataSyncService.close()
|
||||
assert.Len(t, dataSyncService.deltaChannel2FlowGraph, 0)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -57,7 +57,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -88,7 +88,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -108,7 +108,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -145,7 +145,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -171,7 +171,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -197,7 +197,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -222,7 +222,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
type filterDeleteNode struct {
|
||||
baseNode
|
||||
collectionID UniqueID
|
||||
partitionID UniqueID
|
||||
replica ReplicaInterface
|
||||
}
|
||||
|
||||
|
@ -114,9 +113,7 @@ func (fddNode *filterDeleteNode) filterInvalidDeleteMessage(msg *msgstream.Delet
|
|||
}
|
||||
|
||||
// newFilteredDeleteNode returns a new filterDeleteNode
|
||||
func newFilteredDeleteNode(replica ReplicaInterface,
|
||||
collectionID UniqueID,
|
||||
partitionID UniqueID) *filterDeleteNode {
|
||||
func newFilteredDeleteNode(replica ReplicaInterface, collectionID UniqueID) *filterDeleteNode {
|
||||
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
@ -128,7 +125,6 @@ func newFilteredDeleteNode(replica ReplicaInterface,
|
|||
return &filterDeleteNode{
|
||||
baseNode: baseNode,
|
||||
collectionID: collectionID,
|
||||
partitionID: partitionID,
|
||||
replica: replica,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func getFilterDeleteNode(ctx context.Context) (*filterDeleteNode, error) {
|
|||
}
|
||||
|
||||
historical.addExcludedSegments(defaultCollectionID, nil)
|
||||
return newFilteredDeleteNode(historical, defaultCollectionID, defaultPartitionID), nil
|
||||
return newFilteredDeleteNode(historical, defaultCollectionID), nil
|
||||
}
|
||||
|
||||
func TestFlowGraphFilterDeleteNode_filterDeleteNode(t *testing.T) {
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -32,9 +30,7 @@ import (
|
|||
// filterDmNode is one of the nodes in query node flow graph
|
||||
type filterDmNode struct {
|
||||
baseNode
|
||||
loadType loadType // load collection or load partition
|
||||
collectionID UniqueID
|
||||
partitionID UniqueID
|
||||
replica ReplicaInterface
|
||||
}
|
||||
|
||||
|
@ -110,36 +106,29 @@ func (fdmNode *filterDmNode) filterInvalidDeleteMessage(msg *msgstream.DeleteMsg
|
|||
msg.SetTraceCtx(ctx)
|
||||
defer sp.Finish()
|
||||
|
||||
if msg.CollectionID != fdmNode.collectionID {
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if collection and partition exist
|
||||
collection := fdmNode.replica.hasCollection(msg.CollectionID)
|
||||
partition := fdmNode.replica.hasPartition(msg.PartitionID)
|
||||
if fdmNode.loadType == loadTypeCollection && !collection {
|
||||
col, err := fdmNode.replica.getCollectionByID(msg.CollectionID)
|
||||
if err != nil {
|
||||
log.Debug("filter invalid delete message, collection does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
if fdmNode.loadType == loadTypePartition && !partition {
|
||||
log.Debug("filter invalid delete message, partition does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
if msg.CollectionID != fdmNode.collectionID {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the flow graph type is partition, check if the partition is target partition
|
||||
if fdmNode.loadType == loadTypePartition && msg.PartitionID != fdmNode.partitionID {
|
||||
log.Debug("filter invalid delete message, partition is not the target partition",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
if col.getLoadType() == loadTypePartition {
|
||||
if !fdmNode.replica.hasPartition(msg.PartitionID) {
|
||||
log.Debug("filter invalid delete message, partition does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// check if partition has been released
|
||||
if fdmNode.loadType == loadTypeCollection {
|
||||
if col.getLoadType() == loadTypeCollection {
|
||||
col, err := fdmNode.replica.getCollectionByID(msg.CollectionID)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
|
@ -170,22 +159,6 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
|
|||
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
||||
msg.SetTraceCtx(ctx)
|
||||
defer sp.Finish()
|
||||
// check if collection and partition exist
|
||||
collection := fdmNode.replica.hasCollection(msg.CollectionID)
|
||||
partition := fdmNode.replica.hasPartition(msg.PartitionID)
|
||||
if fdmNode.loadType == loadTypeCollection && !collection {
|
||||
log.Debug("filter invalid insert message, collection does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
if fdmNode.loadType == loadTypePartition && !partition {
|
||||
log.Debug("filter invalid insert message, partition does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the collection from message is target collection
|
||||
if msg.CollectionID != fdmNode.collectionID {
|
||||
|
@ -195,21 +168,25 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
|
|||
return nil
|
||||
}
|
||||
|
||||
// if the flow graph type is partition, check if the partition is target partition
|
||||
if fdmNode.loadType == loadTypePartition && msg.PartitionID != fdmNode.partitionID {
|
||||
log.Debug("filter invalid insert message, partition is not the target partition",
|
||||
// check if collection and partition exist
|
||||
col, err := fdmNode.replica.getCollectionByID(msg.CollectionID)
|
||||
if err != nil {
|
||||
log.Debug("filter invalid insert message, collection does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if partition has been released
|
||||
if fdmNode.loadType == loadTypeCollection {
|
||||
col, err := fdmNode.replica.getCollectionByID(msg.CollectionID)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
if col.getLoadType() == loadTypePartition {
|
||||
if !fdmNode.replica.hasPartition(msg.PartitionID) {
|
||||
log.Debug("filter invalid insert message, partition does not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// check if partition has been released
|
||||
if col.getLoadType() == loadTypeCollection {
|
||||
if err = col.checkReleasedPartitions([]UniqueID{msg.PartitionID}); err != nil {
|
||||
log.Warn(err.Error())
|
||||
return nil
|
||||
|
@ -257,10 +234,7 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
|
|||
}
|
||||
|
||||
// newFilteredDmNode returns a new filterDmNode
|
||||
func newFilteredDmNode(replica ReplicaInterface,
|
||||
loadType loadType,
|
||||
collectionID UniqueID,
|
||||
partitionID UniqueID) *filterDmNode {
|
||||
func newFilteredDmNode(replica ReplicaInterface, collectionID UniqueID) *filterDmNode {
|
||||
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
@ -269,17 +243,9 @@ func newFilteredDmNode(replica ReplicaInterface,
|
|||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
if loadType != loadTypeCollection && loadType != loadTypePartition {
|
||||
err := errors.New("invalid flow graph type")
|
||||
log.Warn(err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
return &filterDmNode{
|
||||
baseNode: baseNode,
|
||||
loadType: loadType,
|
||||
collectionID: collectionID,
|
||||
partitionID: partitionID,
|
||||
replica: replica,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func getFilterDMNode(ctx context.Context) (*filterDmNode, error) {
|
|||
}
|
||||
|
||||
streaming.addExcludedSegments(defaultCollectionID, nil)
|
||||
return newFilteredDmNode(streaming, loadTypeCollection, defaultCollectionID, defaultPartitionID), nil
|
||||
return newFilteredDmNode(streaming, defaultCollectionID), nil
|
||||
}
|
||||
|
||||
func TestFlowGraphFilterDmNode_filterDmNode(t *testing.T) {
|
||||
|
@ -47,12 +47,6 @@ func TestFlowGraphFilterDmNode_filterDmNode(t *testing.T) {
|
|||
fg.Name()
|
||||
}
|
||||
|
||||
func TestFlowGraphFilterDmNode_invalidLoadType(t *testing.T) {
|
||||
const invalidLoadType = -1
|
||||
fg := newFilteredDmNode(nil, invalidLoadType, defaultCollectionID, defaultPartitionID)
|
||||
assert.Nil(t, fg)
|
||||
}
|
||||
|
||||
func TestFlowGraphFilterDmNode_filterInvalidInsertMessage(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -82,7 +76,11 @@ func TestFlowGraphFilterDmNode_filterInvalidInsertMessage(t *testing.T) {
|
|||
msg.PartitionID = UniqueID(1000)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
fg.loadType = loadTypePartition
|
||||
|
||||
col, err := fg.replica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
col.setLoadType(loadTypePartition)
|
||||
|
||||
res := fg.filterInvalidInsertMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
|
@ -97,17 +95,6 @@ func TestFlowGraphFilterDmNode_filterInvalidInsertMessage(t *testing.T) {
|
|||
assert.Nil(t, res)
|
||||
})
|
||||
|
||||
t.Run("test not target partition", func(t *testing.T) {
|
||||
msg, err := genSimpleInsertMsg()
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
fg.loadType = loadTypePartition
|
||||
fg.partitionID = UniqueID(1000)
|
||||
res := fg.filterInvalidInsertMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
|
||||
t.Run("test released partition", func(t *testing.T) {
|
||||
msg, err := genSimpleInsertMsg()
|
||||
assert.NoError(t, err)
|
||||
|
@ -201,7 +188,11 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
msg.PartitionID = UniqueID(1000)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
fg.loadType = loadTypePartition
|
||||
|
||||
col, err := fg.replica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
col.setLoadType(loadTypePartition)
|
||||
|
||||
res := fg.filterInvalidDeleteMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
|
@ -216,17 +207,6 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
assert.Nil(t, res)
|
||||
})
|
||||
|
||||
t.Run("test delete not target partition", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
fg.loadType = loadTypePartition
|
||||
fg.partitionID = UniqueID(1000)
|
||||
res := fg.filterInvalidDeleteMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
|
||||
t.Run("test delete released partition", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -100,11 +100,16 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
|
||||
// 1. hash insertMessages to insertData
|
||||
for _, task := range iMsg.insertMessages {
|
||||
// check if partition exists, if not, create partition
|
||||
if hasPartition := iNode.streamingReplica.hasPartition(task.PartitionID); !hasPartition {
|
||||
err := iNode.streamingReplica.addPartition(task.CollectionID, task.PartitionID)
|
||||
// if loadType is loadCollection, check if partition exists, if not, create partition
|
||||
col, err := iNode.streamingReplica.getCollectionByID(task.CollectionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
if col.getLoadType() == loadTypeCollection {
|
||||
err = iNode.streamingReplica.addPartition(task.CollectionID, task.PartitionID)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ func TestFlowGraphInsertNode_insert(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -101,7 +101,7 @@ func TestFlowGraphInsertNode_insert(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -132,7 +132,7 @@ func TestFlowGraphInsertNode_insert(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -152,7 +152,7 @@ func TestFlowGraphInsertNode_delete(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -178,7 +178,7 @@ func TestFlowGraphInsertNode_delete(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -198,7 +198,7 @@ func TestFlowGraphInsertNode_delete(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -230,7 +230,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -267,7 +267,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -293,7 +293,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -319,7 +319,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
@ -344,7 +344,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
err = streaming.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -34,16 +34,13 @@ type queryNodeFlowGraph struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
collectionID UniqueID
|
||||
partitionID UniqueID
|
||||
channel Channel
|
||||
flowGraph *flowgraph.TimeTickedFlowGraph
|
||||
dmlStream msgstream.MsgStream
|
||||
}
|
||||
|
||||
func newQueryNodeFlowGraph(ctx context.Context,
|
||||
loadType loadType,
|
||||
collectionID UniqueID,
|
||||
partitionID UniqueID,
|
||||
streamingReplica ReplicaInterface,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
channel Channel,
|
||||
|
@ -55,15 +52,14 @@ func newQueryNodeFlowGraph(ctx context.Context,
|
|||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
collectionID: collectionID,
|
||||
partitionID: partitionID,
|
||||
channel: channel,
|
||||
flowGraph: flowgraph.NewTimeTickedFlowGraph(ctx1),
|
||||
}
|
||||
|
||||
var dmStreamNode node = q.newDmInputNode(ctx1, factory)
|
||||
var filterDmNode node = newFilteredDmNode(streamingReplica, loadType, collectionID, partitionID)
|
||||
var filterDmNode node = newFilteredDmNode(streamingReplica, collectionID)
|
||||
var insertNode node = newInsertNode(streamingReplica)
|
||||
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, loadType, channel, factory)
|
||||
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, collectionID, channel, factory)
|
||||
|
||||
q.flowGraph.AddNode(dmStreamNode)
|
||||
q.flowGraph.AddNode(filterDmNode)
|
||||
|
@ -111,7 +107,6 @@ func newQueryNodeFlowGraph(ctx context.Context,
|
|||
|
||||
func newQueryNodeDeltaFlowGraph(ctx context.Context,
|
||||
collectionID UniqueID,
|
||||
partitionID UniqueID,
|
||||
historicalReplica ReplicaInterface,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
channel Channel,
|
||||
|
@ -123,15 +118,14 @@ func newQueryNodeDeltaFlowGraph(ctx context.Context,
|
|||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
collectionID: collectionID,
|
||||
partitionID: partitionID,
|
||||
channel: channel,
|
||||
flowGraph: flowgraph.NewTimeTickedFlowGraph(ctx1),
|
||||
}
|
||||
|
||||
var dmStreamNode node = q.newDmInputNode(ctx1, factory)
|
||||
var filterDeleteNode node = newFilteredDeleteNode(historicalReplica, collectionID, partitionID)
|
||||
var filterDeleteNode node = newFilteredDeleteNode(historicalReplica, collectionID)
|
||||
var deleteNode node = newDeleteNode(historicalReplica)
|
||||
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, loadTypeCollection, channel, factory)
|
||||
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, collectionID, channel, factory)
|
||||
|
||||
q.flowGraph.AddNode(dmStreamNode)
|
||||
q.flowGraph.AddNode(filterDeleteNode)
|
||||
|
@ -233,7 +227,6 @@ func (q *queryNodeFlowGraph) close() {
|
|||
q.flowGraph.Close()
|
||||
log.Debug("stop query node flow graph",
|
||||
zap.Any("collectionID", q.collectionID),
|
||||
zap.Any("partitionID", q.partitionID),
|
||||
zap.Any("channel", q.channel),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -38,15 +38,13 @@ func TestQueryNodeFlowGraph_consumerFlowGraph(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
fg := newQueryNodeFlowGraph(ctx,
|
||||
loadTypeCollection,
|
||||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
streamingReplica,
|
||||
tSafe,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
fac)
|
||||
|
||||
err = fg.consumerFlowGraph(defaultVChannel, defaultSubName)
|
||||
err = fg.consumerFlowGraph(defaultDMLChannel, defaultSubName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fg.close()
|
||||
|
@ -65,16 +63,14 @@ func TestQueryNodeFlowGraph_seekQueryNodeFlowGraph(t *testing.T) {
|
|||
tSafe := newTSafeReplica()
|
||||
|
||||
fg := newQueryNodeFlowGraph(ctx,
|
||||
loadTypeCollection,
|
||||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
streamingReplica,
|
||||
tSafe,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
fac)
|
||||
|
||||
position := &internalpb.MsgPosition{
|
||||
ChannelName: defaultVChannel,
|
||||
ChannelName: defaultDMLChannel,
|
||||
MsgID: []byte{},
|
||||
MsgGroup: defaultSubName,
|
||||
Timestamp: 0,
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
type serviceTimeNode struct {
|
||||
baseNode
|
||||
loadType loadType
|
||||
collectionID UniqueID
|
||||
vChannel Channel
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
timeTickMsgStream msgstream.MsgStream
|
||||
|
@ -64,7 +64,10 @@ func (stNode *serviceTimeNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
// update service time
|
||||
err := stNode.tSafeReplica.setTSafe(stNode.vChannel, serviceTimeMsg.timeRange.timestampMax)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
log.Error("serviceTimeNode setTSafe failed",
|
||||
zap.Any("collectionID", stNode.collectionID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
//p, _ := tsoutil.ParseTS(serviceTimeMsg.timeRange.timestampMax)
|
||||
//log.Debug("update tSafe:",
|
||||
|
@ -105,7 +108,7 @@ func (stNode *serviceTimeNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
|
||||
func newServiceTimeNode(ctx context.Context,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
loadType loadType,
|
||||
collectionID UniqueID,
|
||||
channel Channel,
|
||||
factory msgstream.Factory) *serviceTimeNode {
|
||||
|
||||
|
@ -128,7 +131,7 @@ func newServiceTimeNode(ctx context.Context,
|
|||
|
||||
return &serviceTimeNode{
|
||||
baseNode: baseNode,
|
||||
loadType: loadType,
|
||||
collectionID: collectionID,
|
||||
vChannel: channel,
|
||||
tSafeReplica: tSafeReplica,
|
||||
timeTickMsgStream: timeTimeMsgStream,
|
||||
|
|
|
@ -31,15 +31,15 @@ func TestServiceTimeNode_Operate(t *testing.T) {
|
|||
|
||||
genServiceTimeNode := func() *serviceTimeNode {
|
||||
tSafe := newTSafeReplica()
|
||||
tSafe.addTSafe(defaultVChannel)
|
||||
tSafe.addTSafe(defaultDMLChannel)
|
||||
|
||||
fac, err := genFactory()
|
||||
assert.NoError(t, err)
|
||||
|
||||
node := newServiceTimeNode(ctx,
|
||||
tSafe,
|
||||
loadTypeCollection,
|
||||
defaultVChannel,
|
||||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
fac)
|
||||
return node
|
||||
}
|
||||
|
@ -58,7 +58,6 @@ func TestServiceTimeNode_Operate(t *testing.T) {
|
|||
|
||||
t.Run("test operate of loadTypePartition", func(t *testing.T) {
|
||||
node := genServiceTimeNode()
|
||||
node.loadType = loadTypePartition
|
||||
msg := &serviceTimeMsg{
|
||||
timeRange: TimeRange{
|
||||
timestampMin: 0,
|
||||
|
@ -83,7 +82,7 @@ func TestServiceTimeNode_Operate(t *testing.T) {
|
|||
|
||||
t.Run("test no tSafe", func(t *testing.T) {
|
||||
node := genServiceTimeNode()
|
||||
node.tSafeReplica.removeTSafe(defaultVChannel)
|
||||
node.tSafeReplica.removeTSafe(defaultDMLChannel)
|
||||
msg := &serviceTimeMsg{
|
||||
timeRange: TimeRange{
|
||||
timestampMin: 0,
|
||||
|
|
|
@ -132,7 +132,7 @@ func TestImpl_WatchDmChannels(t *testing.T) {
|
|||
},
|
||||
NodeID: 0,
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
Schema: schema,
|
||||
}
|
||||
|
||||
|
|
|
@ -63,12 +63,9 @@ const (
|
|||
defaultNProb = 10
|
||||
defaultMetricType = "JACCARD"
|
||||
|
||||
defaultKVRootPath = "query-node-unittest"
|
||||
defaultVChannel = "query-node-unittest-channel-0"
|
||||
defaultHistoricalVChannel = "query-node-unittest-historical-channel-0"
|
||||
//defaultQueryChannel = "query-node-unittest-query-channel-0"
|
||||
//defaultQueryResultChannel = "query-node-unittest-query-result-channel-0"
|
||||
defaultSubName = "query-node-unittest-sub-name-0"
|
||||
defaultDMLChannel = "query-node-unittest-DML-0"
|
||||
defaultDeltaChannel = "query-node-unittest-delta-channel-0"
|
||||
defaultSubName = "query-node-unittest-sub-name-0"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -757,7 +754,7 @@ func genSimpleInsertMsg() (*msgstream.InsertMsg, error) {
|
|||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
SegmentID: defaultSegmentID,
|
||||
ShardName: defaultVChannel,
|
||||
ShardName: defaultDMLChannel,
|
||||
Timestamps: genSimpleTimestampFieldData(),
|
||||
RowIDs: genSimpleRowIDField(),
|
||||
RowData: rowData,
|
||||
|
@ -858,7 +855,7 @@ func genSimpleSealedSegment() (*Segment, error) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
}
|
||||
|
||||
|
@ -913,9 +910,9 @@ func genSimpleHistorical(ctx context.Context, tSafeReplica TSafeReplicaInterface
|
|||
return nil, err
|
||||
}
|
||||
col.addVChannels([]Channel{
|
||||
defaultHistoricalVChannel,
|
||||
defaultDeltaChannel,
|
||||
})
|
||||
h.tSafeReplica.addTSafe(defaultHistoricalVChannel)
|
||||
h.tSafeReplica.addTSafe(defaultDeltaChannel)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
|
@ -940,7 +937,7 @@ func genSimpleStreaming(ctx context.Context, tSafeReplica TSafeReplicaInterface)
|
|||
err = r.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeGrowing,
|
||||
true)
|
||||
if err != nil {
|
||||
|
@ -952,9 +949,9 @@ func genSimpleStreaming(ctx context.Context, tSafeReplica TSafeReplicaInterface)
|
|||
return nil, err
|
||||
}
|
||||
col.addVChannels([]Channel{
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
})
|
||||
s.tSafeReplica.addTSafe(defaultVChannel)
|
||||
s.tSafeReplica.addTSafe(defaultDMLChannel)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,13 @@ type queryMsg interface {
|
|||
TimeoutTs() Timestamp
|
||||
}
|
||||
|
||||
// queryCollection manages and executes the retrieve and search tasks, it can be created
|
||||
// by LoadCollection or LoadPartition, but can only be destroyed by ReleaseCollection.
|
||||
// Currently query behaviors are defined below, if:
|
||||
// 1. LoadCollection --> ReleaseCollection: Query would be failed in proxy because collection is not loaded;
|
||||
// 2. LoadCollection --> ReleasePartition: Not allowed, release would failed;
|
||||
// 3. LoadPartition --> ReleaseCollection: Query would be failed in proxy because collection is not loaded;
|
||||
// 4. LoadPartition --> ReleasePartition: Query in collection should return empty results, and query in partition should return notLoaded error.
|
||||
type queryCollection struct {
|
||||
releaseCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
|
|
@ -112,14 +112,14 @@ func genSimpleSealedSegmentsChangeInfoMsg() *msgstream.SealedSegmentsChangeInfoM
|
|||
|
||||
func updateTSafe(queryCollection *queryCollection, timestamp Timestamp) error {
|
||||
// register
|
||||
queryCollection.tSafeWatchers[defaultVChannel] = newTSafeWatcher()
|
||||
queryCollection.tSafeWatchers[defaultHistoricalVChannel] = newTSafeWatcher()
|
||||
queryCollection.tSafeWatchers[defaultDMLChannel] = newTSafeWatcher()
|
||||
queryCollection.tSafeWatchers[defaultDeltaChannel] = newTSafeWatcher()
|
||||
|
||||
err := queryCollection.streaming.tSafeReplica.setTSafe(defaultVChannel, timestamp)
|
||||
err := queryCollection.streaming.tSafeReplica.setTSafe(defaultDMLChannel, timestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return queryCollection.historical.tSafeReplica.setTSafe(defaultHistoricalVChannel, timestamp)
|
||||
return queryCollection.historical.tSafeReplica.setTSafe(defaultDeltaChannel, timestamp)
|
||||
}
|
||||
|
||||
func TestQueryCollection_withoutVChannel(t *testing.T) {
|
||||
|
@ -487,14 +487,14 @@ func TestQueryCollection_tSafeWatcher(t *testing.T) {
|
|||
queryCollection, err := genSimpleQueryCollection(ctx, cancel)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = queryCollection.addTSafeWatcher(defaultVChannel)
|
||||
err = queryCollection.addTSafeWatcher(defaultDMLChannel)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = queryCollection.removeTSafeWatcher(defaultVChannel)
|
||||
err = queryCollection.removeTSafeWatcher(defaultDMLChannel)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// no tSafe watcher
|
||||
err = queryCollection.removeTSafeWatcher(defaultVChannel)
|
||||
err = queryCollection.removeTSafeWatcher(defaultDMLChannel)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
|
@ -572,7 +572,8 @@ func TestQueryCollection_doUnsolvedQueryMsg(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
timestamp := Timestamp(1000)
|
||||
updateTSafe(queryCollection, timestamp)
|
||||
err = updateTSafe(queryCollection, timestamp)
|
||||
assert.NoError(t, err)
|
||||
|
||||
go queryCollection.doUnsolvedQueryMsg()
|
||||
|
||||
|
@ -588,7 +589,8 @@ func TestQueryCollection_doUnsolvedQueryMsg(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
timestamp := Timestamp(1000)
|
||||
updateTSafe(queryCollection, timestamp)
|
||||
err = updateTSafe(queryCollection, timestamp)
|
||||
assert.NoError(t, err)
|
||||
|
||||
go queryCollection.doUnsolvedQueryMsg()
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -370,7 +370,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||
collection, err := node.historical.replica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultVChannel, segmentTypeGrowing, true)
|
||||
segment := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, true)
|
||||
|
||||
insertMsg, err := genSimpleInsertMsg()
|
||||
assert.NoError(t, err)
|
||||
|
@ -388,7 +388,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||
collection, err := node.historical.replica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultVChannel, segmentTypeGrowing, true)
|
||||
segment := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, true)
|
||||
|
||||
insertMsg, err := genSimpleInsertMsg()
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -667,7 +667,7 @@ func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
pks := []IntPrimaryKey{1, 2, 3}
|
||||
|
@ -713,7 +713,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -722,7 +722,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -734,7 +734,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -743,7 +743,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -755,7 +755,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -764,7 +764,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -776,7 +776,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -785,7 +785,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -797,7 +797,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -806,7 +806,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -818,7 +818,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -827,7 +827,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -839,7 +839,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
defaultMsgLength)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -848,7 +848,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
defaultSegmentID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -1005,7 +1005,7 @@ func TestSegment_BasicMetrics(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -1064,7 +1064,7 @@ func TestSegment_fillVectorFieldsData(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -1116,7 +1116,7 @@ func TestSegment_indexParam(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -1175,7 +1175,7 @@ func TestSegment_updateSegmentIndex(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -1214,7 +1214,7 @@ func TestSegment_dropSegmentIndex(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -1230,7 +1230,7 @@ func TestSegment_dropSegmentIndex(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
|
||||
|
@ -1247,7 +1247,7 @@ func TestSegment_dropSegmentIndex(t *testing.T) {
|
|||
defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
segment.setType(segmentTypeIndexing)
|
||||
|
|
|
@ -51,7 +51,7 @@ func TestStreaming_search(t *testing.T) {
|
|||
res, _, _, err := streaming.search(searchReqs,
|
||||
defaultCollectionID,
|
||||
[]UniqueID{defaultPartitionID},
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
plan,
|
||||
Timestamp(0))
|
||||
assert.NoError(t, err)
|
||||
|
@ -70,7 +70,7 @@ func TestStreaming_search(t *testing.T) {
|
|||
res, _, _, err := streaming.search(searchReqs,
|
||||
defaultCollectionID,
|
||||
[]UniqueID{},
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
plan,
|
||||
Timestamp(0))
|
||||
assert.NoError(t, err)
|
||||
|
@ -96,7 +96,7 @@ func TestStreaming_search(t *testing.T) {
|
|||
res, _, _, err := streaming.search(searchReqs,
|
||||
defaultCollectionID,
|
||||
[]UniqueID{defaultPartitionID},
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
plan,
|
||||
Timestamp(0))
|
||||
assert.NoError(t, err)
|
||||
|
@ -122,7 +122,7 @@ func TestStreaming_search(t *testing.T) {
|
|||
_, _, _, err = streaming.search(searchReqs,
|
||||
defaultCollectionID,
|
||||
[]UniqueID{defaultPartitionID},
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
plan,
|
||||
Timestamp(0))
|
||||
assert.Error(t, err)
|
||||
|
@ -143,7 +143,7 @@ func TestStreaming_search(t *testing.T) {
|
|||
res, _, _, err := streaming.search(searchReqs,
|
||||
defaultCollectionID,
|
||||
[]UniqueID{},
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
plan,
|
||||
Timestamp(0))
|
||||
assert.NoError(t, err)
|
||||
|
@ -167,7 +167,7 @@ func TestStreaming_search(t *testing.T) {
|
|||
_, _, _, err = streaming.search(searchReqs,
|
||||
defaultCollectionID,
|
||||
[]UniqueID{},
|
||||
defaultVChannel,
|
||||
defaultDMLChannel,
|
||||
plan,
|
||||
Timestamp(0))
|
||||
assert.Error(t, err)
|
||||
|
|
|
@ -228,9 +228,15 @@ func (w *watchDmChannelsTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
||||
collectionID := w.req.CollectionID
|
||||
partitionID := w.req.PartitionID
|
||||
partitionIDs := w.req.GetPartitionIDs()
|
||||
|
||||
var lType loadType
|
||||
// if no partitionID is specified, load type is load collection
|
||||
loadPartition := partitionID != 0
|
||||
if len(partitionIDs) != 0 {
|
||||
lType = loadTypePartition
|
||||
} else {
|
||||
lType = loadTypeCollection
|
||||
}
|
||||
|
||||
// get all vChannels
|
||||
vChannels := make([]Channel, 0)
|
||||
|
@ -270,39 +276,35 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
var l loadType
|
||||
if loadPartition {
|
||||
l = loadTypePartition
|
||||
} else {
|
||||
l = loadTypeCollection
|
||||
}
|
||||
sCol, err := w.node.streaming.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sCol.addVChannels(vChannels)
|
||||
sCol.addPChannels(pChannels)
|
||||
sCol.setLoadType(l)
|
||||
sCol.setLoadType(lType)
|
||||
hCol, err := w.node.historical.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hCol.addVChannels(vChannels)
|
||||
hCol.addPChannels(pChannels)
|
||||
hCol.setLoadType(l)
|
||||
if loadPartition {
|
||||
sCol.deleteReleasedPartition(partitionID)
|
||||
hCol.deleteReleasedPartition(partitionID)
|
||||
if hasPartitionInStreaming := w.node.streaming.replica.hasPartition(partitionID); !hasPartitionInStreaming {
|
||||
err := w.node.streaming.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
hCol.setLoadType(lType)
|
||||
if lType == loadTypePartition {
|
||||
for _, partitionID := range partitionIDs {
|
||||
sCol.deleteReleasedPartition(partitionID)
|
||||
hCol.deleteReleasedPartition(partitionID)
|
||||
if hasPartitionInStreaming := w.node.streaming.replica.hasPartition(partitionID); !hasPartitionInStreaming {
|
||||
err := w.node.streaming.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasPartitionInHistorical := w.node.historical.replica.hasPartition(partitionID); !hasPartitionInHistorical {
|
||||
err := w.node.historical.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
if hasPartitionInHistorical := w.node.historical.replica.hasPartition(partitionID); !hasPartitionInHistorical {
|
||||
err := w.node.historical.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -386,13 +388,8 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// add flow graph
|
||||
if loadPartition {
|
||||
w.node.dataSyncService.addPartitionFlowGraph(collectionID, partitionID, vChannels)
|
||||
log.Debug("Query node add partition flow graphs", zap.Any("channels", vChannels))
|
||||
} else {
|
||||
w.node.dataSyncService.addCollectionFlowGraph(collectionID, vChannels)
|
||||
log.Debug("Query node add collection flow graphs", zap.Any("channels", vChannels))
|
||||
}
|
||||
w.node.dataSyncService.addFlowGraphsForDMLChannels(collectionID, vChannels)
|
||||
log.Debug("Query node add DML flow graphs", zap.Any("channels", vChannels))
|
||||
|
||||
// add tSafe watcher if queryCollection exists
|
||||
qc, err := w.node.queryService.getQueryCollection(collectionID)
|
||||
|
@ -407,29 +404,15 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// channels as consumer
|
||||
var nodeFGs map[Channel]*queryNodeFlowGraph
|
||||
if loadPartition {
|
||||
nodeFGs, err = w.node.dataSyncService.getPartitionFlowGraphs(partitionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
nodeFGs, err = w.node.dataSyncService.getCollectionFlowGraphs(collectionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, channel := range toSubChannels {
|
||||
for _, fg := range nodeFGs {
|
||||
if fg.channel == channel {
|
||||
// use pChannel to consume
|
||||
err := fg.consumerFlowGraph(VPChannels[channel], consumeSubName)
|
||||
if err != nil {
|
||||
errMsg := "msgStream consume error :" + err.Error()
|
||||
log.Warn(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
}
|
||||
fg, err := w.node.dataSyncService.getFlowGraphByDMLChannel(collectionID, channel)
|
||||
if err != nil {
|
||||
return errors.New("watchDmChannelsTask failed, error = " + err.Error())
|
||||
}
|
||||
// use pChannel to consume
|
||||
err = fg.consumerFlowGraph(VPChannels[channel], consumeSubName)
|
||||
if err != nil {
|
||||
return errors.New("watchDmChannelsTask failed, msgStream consume error :" + err.Error())
|
||||
}
|
||||
}
|
||||
log.Debug("as consumer channels",
|
||||
|
@ -438,18 +421,16 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
|
||||
// seek channel
|
||||
for _, pos := range toSeekChannels {
|
||||
for _, fg := range nodeFGs {
|
||||
if fg.channel == pos.ChannelName {
|
||||
pos.MsgGroup = consumeSubName
|
||||
// use pChannel to seek
|
||||
pos.ChannelName = VPChannels[fg.channel]
|
||||
err := fg.seekQueryNodeFlowGraph(pos)
|
||||
if err != nil {
|
||||
errMsg := "msgStream seek error :" + err.Error()
|
||||
log.Warn(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
}
|
||||
fg, err := w.node.dataSyncService.getFlowGraphByDMLChannel(collectionID, pos.ChannelName)
|
||||
if err != nil {
|
||||
return errors.New("watchDmChannelsTask failed, error = " + err.Error())
|
||||
}
|
||||
pos.MsgGroup = consumeSubName
|
||||
// use pChannel to seek
|
||||
pos.ChannelName = VPChannels[fg.channel]
|
||||
err = fg.seekQueryNodeFlowGraph(pos)
|
||||
if err != nil {
|
||||
return errors.New("msgStream seek error :" + err.Error())
|
||||
}
|
||||
}
|
||||
log.Debug("Seek all channel done",
|
||||
|
@ -495,15 +476,10 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
)
|
||||
|
||||
// start flow graphs
|
||||
if loadPartition {
|
||||
err = w.node.dataSyncService.startPartitionFlowGraph(partitionID, vChannels)
|
||||
for _, channel := range vChannels {
|
||||
err = w.node.dataSyncService.startFlowGraphByDMLChannel(collectionID, channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = w.node.dataSyncService.startCollectionFlowGraph(collectionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.New("watchDmChannelsTask failed, error = " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -612,7 +588,7 @@ func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
|
|||
w.node.tSafeReplica.addTSafe(channel)
|
||||
}
|
||||
|
||||
w.node.dataSyncService.addCollectionDeltaFlowGraph(collectionID, vDeltaChannels)
|
||||
w.node.dataSyncService.addFlowGraphsForDeltaChannels(collectionID, vDeltaChannels)
|
||||
|
||||
// add tSafe watcher if queryCollection exists
|
||||
qc, err := w.node.queryService.getQueryCollection(collectionID)
|
||||
|
@ -627,22 +603,15 @@ func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// channels as consumer
|
||||
var nodeFGs map[Channel]*queryNodeFlowGraph
|
||||
nodeFGs, err = w.node.dataSyncService.getCollectionDeltaFlowGraphs(collectionID, vDeltaChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, channel := range toSubChannels {
|
||||
for _, fg := range nodeFGs {
|
||||
if fg.channel == channel {
|
||||
// use pChannel to consume
|
||||
err := fg.consumerFlowGraphLatest(VPDeltaChannels[channel], consumeSubName)
|
||||
if err != nil {
|
||||
errMsg := "msgStream consume error :" + err.Error()
|
||||
log.Warn(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
}
|
||||
fg, err := w.node.dataSyncService.getFlowGraphByDeltaChannel(collectionID, channel)
|
||||
if err != nil {
|
||||
return errors.New("watchDeltaChannelsTask failed, error = " + err.Error())
|
||||
}
|
||||
// use pChannel to consume
|
||||
err = fg.consumerFlowGraphLatest(VPDeltaChannels[channel], consumeSubName)
|
||||
if err != nil {
|
||||
return errors.New("watchDeltaChannelsTask failed, msgStream consume error :" + err.Error())
|
||||
}
|
||||
}
|
||||
log.Debug("as consumer channels",
|
||||
|
@ -654,9 +623,11 @@ func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// start flow graphs
|
||||
err = w.node.dataSyncService.startCollectionDeltaFlowGraph(collectionID, vDeltaChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
for _, channel := range vDeltaChannels {
|
||||
err = w.node.dataSyncService.startFlowGraphForDeltaChannel(collectionID, channel)
|
||||
if err != nil {
|
||||
return errors.New("watchDeltaChannelsTask failed, error = " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("WatchDeltaChannels done", zap.String("ChannelIDs", fmt.Sprintln(vDeltaChannels)))
|
||||
|
@ -789,7 +760,6 @@ const (
|
|||
|
||||
func (r *releaseCollectionTask) Execute(ctx context.Context) error {
|
||||
log.Debug("Execute release collection task", zap.Any("collectionID", r.req.CollectionID))
|
||||
errMsg := "release collection failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
|
||||
log.Debug("release streaming", zap.Any("collectionID", r.req.CollectionID))
|
||||
// sleep to wait for query tasks done
|
||||
const gracefulReleaseTime = 1
|
||||
|
@ -799,18 +769,20 @@ func (r *releaseCollectionTask) Execute(ctx context.Context) error {
|
|||
)
|
||||
|
||||
// remove query collection
|
||||
// queryCollection and Collection would be deleted in releaseCollection,
|
||||
// so we don't need to remove the tSafeWatcher or channel manually.
|
||||
r.node.queryService.stopQueryCollection(r.req.CollectionID)
|
||||
|
||||
err := r.releaseReplica(r.node.streaming.replica, replicaStreaming)
|
||||
if err != nil {
|
||||
return errors.New(errMsg + err.Error())
|
||||
return fmt.Errorf("release collection failed, collectionID = %d, err = %s", r.req.CollectionID, err)
|
||||
}
|
||||
|
||||
// remove collection metas in streaming and historical
|
||||
log.Debug("release historical", zap.Any("collectionID", r.req.CollectionID))
|
||||
err = r.releaseReplica(r.node.historical.replica, replicaHistorical)
|
||||
if err != nil {
|
||||
return errors.New(errMsg + err.Error())
|
||||
return fmt.Errorf("release collection failed, collectionID = %d, err = %s", r.req.CollectionID, err)
|
||||
}
|
||||
r.node.historical.removeGlobalSegmentIDsByCollectionID(r.req.CollectionID)
|
||||
|
||||
|
@ -827,38 +799,24 @@ func (r *releaseCollectionTask) releaseReplica(replica ReplicaInterface, replica
|
|||
log.Debug("set release time", zap.Any("collectionID", r.req.CollectionID))
|
||||
collection.setReleaseTime(r.req.Base.Timestamp)
|
||||
|
||||
// remove all flow graphs of the target collection
|
||||
var channels []Channel
|
||||
if replicaType == replicaStreaming {
|
||||
r.node.dataSyncService.removeCollectionFlowGraph(r.req.CollectionID)
|
||||
// remove partition flow graphs which partitions belong to the target collection
|
||||
partitionIDs, err := replica.getPartitionIDs(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, partitionID := range partitionIDs {
|
||||
r.node.dataSyncService.removePartitionFlowGraph(partitionID)
|
||||
}
|
||||
// remove all tSafes of the target collection
|
||||
for _, channel := range collection.getVChannels() {
|
||||
log.Debug("Releasing tSafe in releaseCollectionTask...",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("vChannel", channel),
|
||||
)
|
||||
r.node.tSafeReplica.removeTSafe(channel)
|
||||
// queryCollection and Collection would be deleted in releaseCollection,
|
||||
// so we don't need to remove the tSafeWatcher or channel manually.
|
||||
}
|
||||
channels = collection.getVChannels()
|
||||
r.node.dataSyncService.removeFlowGraphsByDMLChannels(channels)
|
||||
} else {
|
||||
r.node.dataSyncService.removeCollectionDeltaFlowGraph(r.req.CollectionID)
|
||||
// remove all tSafes of the target collection
|
||||
for _, channel := range collection.getVDeltaChannels() {
|
||||
log.Debug("Releasing tSafe in releaseCollectionTask...",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("vDeltaChannel", channel),
|
||||
)
|
||||
r.node.tSafeReplica.removeTSafe(channel)
|
||||
// queryCollection and Collection would be deleted in releaseCollection,
|
||||
// so we don't need to remove the tSafeWatcher or channel manually.
|
||||
}
|
||||
// remove all tSafes and flow graphs of the target collection
|
||||
channels = collection.getVDeltaChannels()
|
||||
r.node.dataSyncService.removeFlowGraphsByDeltaChannels(channels)
|
||||
}
|
||||
|
||||
// remove all tSafes of the target collection
|
||||
for _, channel := range channels {
|
||||
log.Debug("Releasing tSafe in releaseCollectionTask...",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("vDeltaChannel", channel),
|
||||
)
|
||||
r.node.tSafeReplica.removeTSafe(channel)
|
||||
}
|
||||
|
||||
// remove excludedSegments record
|
||||
|
@ -900,7 +858,6 @@ func (r *releasePartitionsTask) Execute(ctx context.Context) error {
|
|||
log.Debug("Execute release partition task",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("partitionIDs", r.req.PartitionIDs))
|
||||
errMsg := "release partitions failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
|
||||
|
||||
// sleep to wait for query tasks done
|
||||
const gracefulReleaseTime = 1
|
||||
|
@ -909,49 +866,22 @@ func (r *releasePartitionsTask) Execute(ctx context.Context) error {
|
|||
// get collection from streaming and historical
|
||||
hCol, err := r.node.historical.replica.getCollectionByID(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("release partitions failed, collectionID = %d, err = %s", r.req.CollectionID, err)
|
||||
}
|
||||
sCol, err := r.node.streaming.replica.getCollectionByID(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("release partitions failed, collectionID = %d, err = %s", r.req.CollectionID, err)
|
||||
}
|
||||
log.Debug("start release partition", zap.Any("collectionID", r.req.CollectionID))
|
||||
|
||||
// release partitions
|
||||
vChannels := sCol.getVChannels()
|
||||
for _, id := range r.req.PartitionIDs {
|
||||
if _, err := r.node.dataSyncService.getPartitionFlowGraphs(id, vChannels); err == nil {
|
||||
r.node.dataSyncService.removePartitionFlowGraph(id)
|
||||
// remove all tSafes of the target partition
|
||||
for _, channel := range vChannels {
|
||||
log.Debug("Releasing tSafe in releasePartitionTask...",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("partitionID", id),
|
||||
zap.Any("vChannel", channel),
|
||||
)
|
||||
r.node.tSafeReplica.removeTSafe(channel)
|
||||
// no tSafe or tSafe has been removed,
|
||||
// we need to remove the corresponding tSafeWatcher in queryCollection,
|
||||
// and remove the corresponding channel in collection
|
||||
qc, err := r.node.queryService.getQueryCollection(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = qc.removeTSafeWatcher(channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sCol.removeVChannel(channel)
|
||||
hCol.removeVChannel(channel)
|
||||
}
|
||||
}
|
||||
// remove partition from streaming and historical
|
||||
hasPartitionInHistorical := r.node.historical.replica.hasPartition(id)
|
||||
if hasPartitionInHistorical {
|
||||
err := r.node.historical.replica.removePartition(id)
|
||||
if err != nil {
|
||||
// not return, try to release all partitions
|
||||
log.Warn(errMsg + err.Error())
|
||||
log.Warn(err.Error())
|
||||
}
|
||||
}
|
||||
hasPartitionInStreaming := r.node.streaming.replica.hasPartition(id)
|
||||
|
@ -959,46 +889,13 @@ func (r *releasePartitionsTask) Execute(ctx context.Context) error {
|
|||
err := r.node.streaming.replica.removePartition(id)
|
||||
if err != nil {
|
||||
// not return, try to release all partitions
|
||||
log.Warn(errMsg + err.Error())
|
||||
log.Warn(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
hCol.addReleasedPartition(id)
|
||||
sCol.addReleasedPartition(id)
|
||||
}
|
||||
pids, err := r.node.historical.replica.getPartitionIDs(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("start release history pids", zap.Any("pids", pids), zap.Any("load type", hCol.getLoadType()))
|
||||
if len(pids) == 0 && hCol.getLoadType() == loadTypePartition {
|
||||
r.node.dataSyncService.removeCollectionDeltaFlowGraph(r.req.CollectionID)
|
||||
log.Debug("release delta channels", zap.Any("deltaChannels", hCol.getVDeltaChannels()))
|
||||
vChannels := hCol.getVDeltaChannels()
|
||||
for _, channel := range vChannels {
|
||||
log.Debug("Releasing tSafe in releasePartitionTask...",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("vChannel", channel),
|
||||
)
|
||||
r.node.tSafeReplica.removeTSafe(channel)
|
||||
// no tSafe or tSafe has been removed,
|
||||
// we need to remove the corresponding tSafeWatcher in queryCollection,
|
||||
// and remove the corresponding channel in collection
|
||||
qc, err := r.node.queryService.getQueryCollection(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = qc.removeTSafeWatcher(channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sCol.removeVDeltaChannel(channel)
|
||||
hCol.removeVDeltaChannel(channel)
|
||||
}
|
||||
}
|
||||
|
||||
// release global segment info
|
||||
r.node.historical.removeGlobalSegmentIDsByPartitionIds(r.req.PartitionIDs)
|
||||
|
||||
log.Debug("Release partition task done",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
|
|
|
@ -224,7 +224,7 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
req := &querypb.WatchDmChannelsRequest{
|
||||
Base: genCommonMsgBase(commonpb.MsgType_WatchDmChannels),
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
Schema: schema,
|
||||
}
|
||||
return req
|
||||
|
@ -265,10 +265,10 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultVChannel,
|
||||
ChannelName: defaultDMLChannel,
|
||||
},
|
||||
}
|
||||
task.req.PartitionID = 0
|
||||
task.req.PartitionIDs = []UniqueID{0}
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
@ -284,7 +284,7 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultVChannel,
|
||||
ChannelName: defaultDMLChannel,
|
||||
},
|
||||
}
|
||||
err = task.Execute(ctx)
|
||||
|
@ -302,11 +302,11 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultVChannel,
|
||||
ChannelName: defaultDMLChannel,
|
||||
},
|
||||
}
|
||||
task.req.CollectionID++
|
||||
task.req.PartitionID++
|
||||
task.req.PartitionIDs[0]++
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
@ -323,9 +323,9 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
// task.req.Infos = []*datapb.VchannelInfo{
|
||||
// {
|
||||
// CollectionID: defaultCollectionID,
|
||||
// ChannelName: defaultVChannel,
|
||||
// ChannelName: defaultDMLChannel,
|
||||
// SeekPosition: &msgstream.MsgPosition{
|
||||
// ChannelName: defaultVChannel,
|
||||
// ChannelName: defaultDMLChannel,
|
||||
// MsgID: []byte{1, 2, 3},
|
||||
// MsgGroup: defaultSubName,
|
||||
// Timestamp: 0,
|
||||
|
@ -345,11 +345,11 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
req: genWatchDMChannelsRequest(),
|
||||
node: node,
|
||||
}
|
||||
tmpChannel := defaultVChannel + "_1"
|
||||
tmpChannel := defaultDMLChannel + "_1"
|
||||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultVChannel,
|
||||
ChannelName: defaultDMLChannel,
|
||||
SeekPosition: &msgstream.MsgPosition{
|
||||
ChannelName: tmpChannel,
|
||||
Timestamp: 0,
|
||||
|
@ -366,7 +366,7 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
},
|
||||
}
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("test add excluded segment for dropped segment", func(t *testing.T) {
|
||||
|
@ -377,11 +377,11 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
req: genWatchDMChannelsRequest(),
|
||||
node: node,
|
||||
}
|
||||
tmpChannel := defaultVChannel + "_1"
|
||||
tmpChannel := defaultDMLChannel + "_1"
|
||||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultVChannel,
|
||||
ChannelName: defaultDMLChannel,
|
||||
SeekPosition: &msgstream.MsgPosition{
|
||||
ChannelName: tmpChannel,
|
||||
Timestamp: 0,
|
||||
|
@ -398,7 +398,95 @@ func TestTask_watchDmChannelsTask(t *testing.T) {
|
|||
},
|
||||
}
|
||||
err = task.Execute(ctx)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTask_watchDeltaChannelsTask(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
genWatchDeltaChannelsRequest := func() *querypb.WatchDeltaChannelsRequest {
|
||||
req := &querypb.WatchDeltaChannelsRequest{
|
||||
Base: genCommonMsgBase(commonpb.MsgType_WatchDeltaChannels),
|
||||
CollectionID: defaultCollectionID,
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
t.Run("test timestamp", func(t *testing.T) {
|
||||
task := watchDeltaChannelsTask{
|
||||
req: genWatchDeltaChannelsRequest(),
|
||||
}
|
||||
timestamp := Timestamp(1000)
|
||||
task.req.Base.Timestamp = timestamp
|
||||
resT := task.Timestamp()
|
||||
assert.Equal(t, timestamp, resT)
|
||||
task.req.Base = nil
|
||||
resT = task.Timestamp()
|
||||
assert.Equal(t, Timestamp(0), resT)
|
||||
})
|
||||
|
||||
t.Run("test OnEnqueue", func(t *testing.T) {
|
||||
task := watchDeltaChannelsTask{
|
||||
req: genWatchDeltaChannelsRequest(),
|
||||
}
|
||||
err := task.OnEnqueue()
|
||||
assert.NoError(t, err)
|
||||
task.req.Base = nil
|
||||
err = task.OnEnqueue()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("test execute", func(t *testing.T) {
|
||||
node, err := genSimpleQueryNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task := watchDeltaChannelsTask{
|
||||
req: genWatchDeltaChannelsRequest(),
|
||||
node: node,
|
||||
}
|
||||
task.ctx = ctx
|
||||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultDeltaChannel,
|
||||
SeekPosition: &internalpb.MsgPosition{
|
||||
ChannelName: defaultDeltaChannel,
|
||||
MsgID: []byte{1, 2, 3},
|
||||
MsgGroup: defaultSubName,
|
||||
Timestamp: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("test execute without init collection", func(t *testing.T) {
|
||||
node, err := genSimpleQueryNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task := watchDeltaChannelsTask{
|
||||
req: genWatchDeltaChannelsRequest(),
|
||||
node: node,
|
||||
}
|
||||
task.ctx = ctx
|
||||
task.req.Infos = []*datapb.VchannelInfo{
|
||||
{
|
||||
CollectionID: defaultCollectionID,
|
||||
ChannelName: defaultDeltaChannel,
|
||||
SeekPosition: &internalpb.MsgPosition{
|
||||
ChannelName: defaultDeltaChannel,
|
||||
MsgID: []byte{1, 2, 3},
|
||||
MsgGroup: defaultSubName,
|
||||
Timestamp: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
task.req.CollectionID++
|
||||
err = task.Execute(ctx)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -613,7 +701,7 @@ func TestTask_releaseCollectionTask(t *testing.T) {
|
|||
|
||||
col, err := node.historical.replica.getCollectionByID(defaultCollectionID)
|
||||
assert.NoError(t, err)
|
||||
col.addVDeltaChannels([]Channel{defaultHistoricalVChannel})
|
||||
col.addVDeltaChannels([]Channel{defaultDeltaChannel})
|
||||
|
||||
task := releaseCollectionTask{
|
||||
req: genReleaseCollectionRequest(),
|
||||
|
@ -672,9 +760,7 @@ func TestTask_releasePartitionTask(t *testing.T) {
|
|||
req: genReleasePartitionsRequest(),
|
||||
node: node,
|
||||
}
|
||||
task.node.dataSyncService.addPartitionFlowGraph(defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
[]Channel{defaultVChannel})
|
||||
task.node.dataSyncService.addFlowGraphsForDMLChannels(defaultCollectionID, []Channel{defaultDMLChannel})
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
@ -707,7 +793,7 @@ func TestTask_releasePartitionTask(t *testing.T) {
|
|||
err = node.historical.replica.removePartition(defaultPartitionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
col.addVDeltaChannels([]Channel{defaultHistoricalVChannel})
|
||||
col.addVDeltaChannels([]Channel{defaultDeltaChannel})
|
||||
col.setLoadType(loadTypePartition)
|
||||
|
||||
err = node.queryService.addQueryCollection(defaultCollectionID)
|
||||
|
@ -717,9 +803,7 @@ func TestTask_releasePartitionTask(t *testing.T) {
|
|||
req: genReleasePartitionsRequest(),
|
||||
node: node,
|
||||
}
|
||||
task.node.dataSyncService.addPartitionFlowGraph(defaultCollectionID,
|
||||
defaultPartitionID,
|
||||
[]Channel{defaultVChannel})
|
||||
task.node.dataSyncService.addFlowGraphsForDMLChannels(defaultCollectionID, []Channel{defaultDMLChannel})
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
@ -68,7 +67,7 @@ func (ts *tSafe) registerTSafeWatcher(t *tSafeWatcher) error {
|
|||
ts.tSafeMu.Lock()
|
||||
defer ts.tSafeMu.Unlock()
|
||||
if ts.watcher != nil {
|
||||
return errors.New(fmt.Sprintln("tSafeWatcher has been existed, channel = ", ts.channel))
|
||||
return fmt.Errorf("tSafeWatcher has been existed, channel = %s", ts.channel)
|
||||
}
|
||||
ts.watcher = t
|
||||
return nil
|
||||
|
|
|
@ -25,36 +25,36 @@ import (
|
|||
func TestTSafeReplica(t *testing.T) {
|
||||
t.Run("test valid", func(t *testing.T) {
|
||||
replica := newTSafeReplica()
|
||||
replica.addTSafe(defaultVChannel)
|
||||
replica.addTSafe(defaultDMLChannel)
|
||||
watcher := newTSafeWatcher()
|
||||
assert.NotNil(t, watcher)
|
||||
|
||||
err := replica.registerTSafeWatcher(defaultVChannel, watcher)
|
||||
err := replica.registerTSafeWatcher(defaultDMLChannel, watcher)
|
||||
assert.NoError(t, err)
|
||||
|
||||
timestamp := Timestamp(1000)
|
||||
err = replica.setTSafe(defaultVChannel, timestamp)
|
||||
err = replica.setTSafe(defaultDMLChannel, timestamp)
|
||||
assert.NoError(t, err)
|
||||
|
||||
resT, err := replica.getTSafe(defaultVChannel)
|
||||
resT, err := replica.getTSafe(defaultDMLChannel)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, timestamp, resT)
|
||||
|
||||
replica.removeTSafe(defaultVChannel)
|
||||
_, err = replica.getTSafe(defaultVChannel)
|
||||
replica.removeTSafe(defaultDMLChannel)
|
||||
_, err = replica.getTSafe(defaultDMLChannel)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("test invalid", func(t *testing.T) {
|
||||
replica := newTSafeReplica()
|
||||
|
||||
err := replica.registerTSafeWatcher(defaultVChannel, nil)
|
||||
err := replica.registerTSafeWatcher(defaultDMLChannel, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
_, err = replica.getTSafe(defaultVChannel)
|
||||
_, err = replica.getTSafe(defaultDMLChannel)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = replica.setTSafe(defaultVChannel, Timestamp(1000))
|
||||
err = replica.setTSafe(defaultDMLChannel, Timestamp(1000))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -25,8 +25,6 @@ const (
|
|||
timestampFieldID FieldID = 1
|
||||
)
|
||||
|
||||
const invalidTimestamp = Timestamp(0)
|
||||
|
||||
type (
|
||||
// UniqueID is an identifier that is guaranteed to be unique among all the collections, partitions and segments
|
||||
UniqueID = typeutil.UniqueID
|
||||
|
@ -49,3 +47,11 @@ type TimeRange struct {
|
|||
timestampMin Timestamp
|
||||
timestampMax Timestamp
|
||||
}
|
||||
|
||||
// loadType is load collection or load partition
|
||||
type loadType = int32
|
||||
|
||||
const (
|
||||
loadTypeCollection loadType = 0
|
||||
loadTypePartition loadType = 1
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue