Estimate segment size according index and binlog file header (#11875)

Signed-off-by: xige-16 <xi.ge@zilliz.com>
pull/11946/head
xige-16 2021-11-17 09:47:12 +08:00 committed by GitHub
parent 5edbb82610
commit 8a0ee27799
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1424 additions and 417 deletions

View File

@ -9,6 +9,7 @@ import "milvus.proto";
import "internal.proto";
import "schema.proto";
import "data_coord.proto";
import "index_coord.proto";
service QueryCoord {
rpc GetComponentStates(internal.GetComponentStatesRequest) returns (internal.ComponentStates) {}
@ -167,6 +168,8 @@ message SegmentInfo {
repeated int64 compactionFrom = 11;
bool createdByCompaction = 12;
common.SegmentState state = 13;
bool enable_index = 14;
repeated index.IndexFilePathInfo index_path_infos = 15;
}
message GetSegmentInfoResponse {
@ -245,6 +248,8 @@ message SegmentLoadInfo {
repeated data.FieldBinlog statslogs = 8;
repeated data.DeltaLogInfo deltalogs = 9;
repeated int64 compactionFrom = 10; // segmentIDs compacted from
bool enable_index = 11;
repeated index.IndexFilePathInfo index_path_infos = 12;
}
message LoadSegmentsRequest {

View File

@ -9,6 +9,7 @@ import (
proto "github.com/golang/protobuf/proto"
commonpb "github.com/milvus-io/milvus/internal/proto/commonpb"
datapb "github.com/milvus-io/milvus/internal/proto/datapb"
indexpb "github.com/milvus-io/milvus/internal/proto/indexpb"
internalpb "github.com/milvus-io/milvus/internal/proto/internalpb"
milvuspb "github.com/milvus-io/milvus/internal/proto/milvuspb"
schemapb "github.com/milvus-io/milvus/internal/proto/schemapb"
@ -981,22 +982,24 @@ func (m *GetSegmentInfoRequest) GetCollectionID() int64 {
}
type SegmentInfo struct {
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionID int64 `protobuf:"varint,3,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
NodeID int64 `protobuf:"varint,4,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
MemSize int64 `protobuf:"varint,5,opt,name=mem_size,json=memSize,proto3" json:"mem_size,omitempty"`
NumRows int64 `protobuf:"varint,6,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"`
IndexName string `protobuf:"bytes,7,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
IndexID int64 `protobuf:"varint,8,opt,name=indexID,proto3" json:"indexID,omitempty"`
ChannelID string `protobuf:"bytes,9,opt,name=channelID,proto3" json:"channelID,omitempty"`
SegmentState SegmentState `protobuf:"varint,10,opt,name=segment_state,json=segmentState,proto3,enum=milvus.proto.query.SegmentState" json:"segment_state,omitempty"`
CompactionFrom []int64 `protobuf:"varint,11,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
CreatedByCompaction bool `protobuf:"varint,12,opt,name=createdByCompaction,proto3" json:"createdByCompaction,omitempty"`
State commonpb.SegmentState `protobuf:"varint,13,opt,name=state,proto3,enum=milvus.proto.common.SegmentState" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionID int64 `protobuf:"varint,3,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
NodeID int64 `protobuf:"varint,4,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
MemSize int64 `protobuf:"varint,5,opt,name=mem_size,json=memSize,proto3" json:"mem_size,omitempty"`
NumRows int64 `protobuf:"varint,6,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"`
IndexName string `protobuf:"bytes,7,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
IndexID int64 `protobuf:"varint,8,opt,name=indexID,proto3" json:"indexID,omitempty"`
ChannelID string `protobuf:"bytes,9,opt,name=channelID,proto3" json:"channelID,omitempty"`
SegmentState SegmentState `protobuf:"varint,10,opt,name=segment_state,json=segmentState,proto3,enum=milvus.proto.query.SegmentState" json:"segment_state,omitempty"`
CompactionFrom []int64 `protobuf:"varint,11,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
CreatedByCompaction bool `protobuf:"varint,12,opt,name=createdByCompaction,proto3" json:"createdByCompaction,omitempty"`
State commonpb.SegmentState `protobuf:"varint,13,opt,name=state,proto3,enum=milvus.proto.common.SegmentState" json:"state,omitempty"`
EnableIndex bool `protobuf:"varint,14,opt,name=enable_index,json=enableIndex,proto3" json:"enable_index,omitempty"`
IndexPathInfos []*indexpb.IndexFilePathInfo `protobuf:"bytes,15,rep,name=index_path_infos,json=indexPathInfos,proto3" json:"index_path_infos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SegmentInfo) Reset() { *m = SegmentInfo{} }
@ -1115,6 +1118,20 @@ func (m *SegmentInfo) GetState() commonpb.SegmentState {
return commonpb.SegmentState_SegmentStateNone
}
func (m *SegmentInfo) GetEnableIndex() bool {
if m != nil {
return m.EnableIndex
}
return false
}
func (m *SegmentInfo) GetIndexPathInfos() []*indexpb.IndexFilePathInfo {
if m != nil {
return m.IndexPathInfos
}
return nil
}
type GetSegmentInfoResponse struct {
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
Infos []*SegmentInfo `protobuf:"bytes,2,rep,name=infos,proto3" json:"infos,omitempty"`
@ -1481,19 +1498,21 @@ func (m *WatchDeltaChannelsRequest) GetInfos() []*datapb.VchannelInfo {
//used for handoff task
type SegmentLoadInfo struct {
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
DbID int64 `protobuf:"varint,4,opt,name=dbID,proto3" json:"dbID,omitempty"`
FlushTime int64 `protobuf:"varint,5,opt,name=flush_time,json=flushTime,proto3" json:"flush_time,omitempty"`
BinlogPaths []*datapb.FieldBinlog `protobuf:"bytes,6,rep,name=binlog_paths,json=binlogPaths,proto3" json:"binlog_paths,omitempty"`
NumOfRows int64 `protobuf:"varint,7,opt,name=num_of_rows,json=numOfRows,proto3" json:"num_of_rows,omitempty"`
Statslogs []*datapb.FieldBinlog `protobuf:"bytes,8,rep,name=statslogs,proto3" json:"statslogs,omitempty"`
Deltalogs []*datapb.DeltaLogInfo `protobuf:"bytes,9,rep,name=deltalogs,proto3" json:"deltalogs,omitempty"`
CompactionFrom []int64 `protobuf:"varint,10,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
DbID int64 `protobuf:"varint,4,opt,name=dbID,proto3" json:"dbID,omitempty"`
FlushTime int64 `protobuf:"varint,5,opt,name=flush_time,json=flushTime,proto3" json:"flush_time,omitempty"`
BinlogPaths []*datapb.FieldBinlog `protobuf:"bytes,6,rep,name=binlog_paths,json=binlogPaths,proto3" json:"binlog_paths,omitempty"`
NumOfRows int64 `protobuf:"varint,7,opt,name=num_of_rows,json=numOfRows,proto3" json:"num_of_rows,omitempty"`
Statslogs []*datapb.FieldBinlog `protobuf:"bytes,8,rep,name=statslogs,proto3" json:"statslogs,omitempty"`
Deltalogs []*datapb.DeltaLogInfo `protobuf:"bytes,9,rep,name=deltalogs,proto3" json:"deltalogs,omitempty"`
CompactionFrom []int64 `protobuf:"varint,10,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
EnableIndex bool `protobuf:"varint,11,opt,name=enable_index,json=enableIndex,proto3" json:"enable_index,omitempty"`
IndexPathInfos []*indexpb.IndexFilePathInfo `protobuf:"bytes,12,rep,name=index_path_infos,json=indexPathInfos,proto3" json:"index_path_infos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SegmentLoadInfo) Reset() { *m = SegmentLoadInfo{} }
@ -1591,6 +1610,20 @@ func (m *SegmentLoadInfo) GetCompactionFrom() []int64 {
return nil
}
func (m *SegmentLoadInfo) GetEnableIndex() bool {
if m != nil {
return m.EnableIndex
}
return false
}
func (m *SegmentLoadInfo) GetIndexPathInfos() []*indexpb.IndexFilePathInfo {
if m != nil {
return m.IndexPathInfos
}
return nil
}
type LoadSegmentsRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
DstNodeID int64 `protobuf:"varint,2,opt,name=dst_nodeID,json=dstNodeID,proto3" json:"dst_nodeID,omitempty"`
@ -2336,154 +2369,158 @@ func init() {
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
// 2347 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0x4d, 0x6f, 0x1c, 0x49,
0xd5, 0x3d, 0x33, 0xb6, 0x67, 0xde, 0x7c, 0x75, 0x2a, 0xb1, 0x77, 0x62, 0x92, 0xac, 0xb7, 0xb3,
0xf9, 0x58, 0x2f, 0xeb, 0x64, 0x9d, 0xe5, 0x63, 0x05, 0x7b, 0xd8, 0x78, 0x36, 0xde, 0x59, 0x12,
0xc7, 0xb4, 0xbd, 0x8b, 0x88, 0x22, 0x35, 0xed, 0xe9, 0xf2, 0xb8, 0x95, 0xee, 0xae, 0x49, 0x57,
0x4f, 0x12, 0xe7, 0xcc, 0x01, 0x0e, 0x88, 0x1f, 0x00, 0x42, 0x42, 0x02, 0xa1, 0x3d, 0x20, 0x71,
0x81, 0x73, 0x2e, 0xdc, 0xf9, 0x05, 0x48, 0x08, 0x7e, 0x00, 0x17, 0x38, 0xa3, 0xfa, 0xe8, 0x9e,
0xfe, 0xa8, 0xb1, 0xc7, 0x36, 0xd9, 0x44, 0x88, 0x5b, 0xf7, 0xab, 0x57, 0xef, 0xbd, 0x7a, 0xdf,
0xaf, 0x0a, 0xce, 0x3c, 0x1e, 0xe1, 0xf0, 0xc0, 0xea, 0x13, 0x12, 0x3a, 0xab, 0xc3, 0x90, 0x44,
0x04, 0x21, 0xdf, 0xf5, 0x9e, 0x8c, 0xa8, 0xf8, 0x5b, 0xe5, 0xeb, 0x4b, 0x8d, 0x3e, 0xf1, 0x7d,
0x12, 0x08, 0xd8, 0x52, 0x23, 0x8d, 0xb1, 0xd4, 0x72, 0x83, 0x08, 0x87, 0x81, 0xed, 0xc5, 0xab,
0xb4, 0xbf, 0x8f, 0x7d, 0x5b, 0xfe, 0xe9, 0x8e, 0x1d, 0xd9, 0x69, 0xfa, 0xc6, 0x8f, 0x35, 0x58,
0xdc, 0xde, 0x27, 0x4f, 0xd7, 0x89, 0xe7, 0xe1, 0x7e, 0xe4, 0x92, 0x80, 0x9a, 0xf8, 0xf1, 0x08,
0xd3, 0x08, 0xdd, 0x84, 0xca, 0xae, 0x4d, 0x71, 0x47, 0x5b, 0xd6, 0xae, 0xd7, 0xd7, 0x2e, 0xac,
0x66, 0x24, 0x91, 0x22, 0xdc, 0xa3, 0x83, 0xdb, 0x36, 0xc5, 0x26, 0xc7, 0x44, 0x08, 0x2a, 0xce,
0x6e, 0xaf, 0xdb, 0x29, 0x2d, 0x6b, 0xd7, 0xcb, 0x26, 0xff, 0x46, 0x6f, 0x43, 0xb3, 0x9f, 0xd0,
0xee, 0x75, 0x69, 0xa7, 0xbc, 0x5c, 0xbe, 0x5e, 0x36, 0xb3, 0x40, 0xe3, 0x77, 0x1a, 0xbc, 0x51,
0x10, 0x83, 0x0e, 0x49, 0x40, 0x31, 0xba, 0x05, 0x73, 0x34, 0xb2, 0xa3, 0x11, 0x95, 0x92, 0x7c,
0x4d, 0x29, 0xc9, 0x36, 0x47, 0x31, 0x25, 0x6a, 0x91, 0x6d, 0x49, 0xc1, 0x16, 0xbd, 0x0f, 0xe7,
0xdc, 0xe0, 0x1e, 0xf6, 0x49, 0x78, 0x60, 0x0d, 0x71, 0xd8, 0xc7, 0x41, 0x64, 0x0f, 0x70, 0x2c,
0xe3, 0xd9, 0x78, 0x6d, 0x6b, 0xbc, 0x64, 0xfc, 0x56, 0x83, 0x05, 0x26, 0xe9, 0x96, 0x1d, 0x46,
0xee, 0x4b, 0xd0, 0x97, 0x01, 0x8d, 0xb4, 0x8c, 0x9d, 0x32, 0x5f, 0xcb, 0xc0, 0x18, 0xce, 0x30,
0x66, 0xcf, 0xce, 0x56, 0xe1, 0xe2, 0x66, 0x60, 0xc6, 0x6f, 0xa4, 0x61, 0xd3, 0x72, 0x9e, 0x46,
0xa1, 0x79, 0x9e, 0xa5, 0x22, 0xcf, 0x93, 0xa8, 0xf3, 0x85, 0x06, 0x0b, 0x77, 0x89, 0xed, 0x8c,
0x0d, 0xff, 0xd5, 0xab, 0xf3, 0x23, 0x98, 0x13, 0x51, 0xd2, 0xa9, 0x70, 0x5e, 0x57, 0xb2, 0xbc,
0x64, 0x04, 0x8d, 0x25, 0xdc, 0xe6, 0x00, 0x53, 0x6e, 0x32, 0x7e, 0xa9, 0x41, 0xc7, 0xc4, 0x1e,
0xb6, 0x29, 0x7e, 0x95, 0xa7, 0x58, 0x84, 0xb9, 0x80, 0x38, 0xb8, 0xd7, 0xe5, 0xa7, 0x28, 0x9b,
0xf2, 0xcf, 0xf8, 0x87, 0xd4, 0xf0, 0x6b, 0xee, 0xb0, 0x29, 0x2b, 0xcc, 0x9e, 0xc4, 0x0a, 0x2f,
0xc6, 0x56, 0x78, 0xdd, 0x4f, 0x3a, 0xb6, 0xd4, 0x6c, 0xc6, 0x52, 0x3f, 0x84, 0xf3, 0xeb, 0x21,
0xb6, 0x23, 0xfc, 0x7d, 0x96, 0xe6, 0xd7, 0xf7, 0xed, 0x20, 0xc0, 0x5e, 0x7c, 0x84, 0x3c, 0x73,
0x4d, 0xc1, 0xbc, 0x03, 0xf3, 0xc3, 0x90, 0x3c, 0x3b, 0x48, 0xe4, 0x8e, 0x7f, 0x8d, 0x5f, 0x6b,
0xb0, 0xa4, 0xa2, 0x7d, 0x9a, 0x8c, 0x70, 0x0d, 0xda, 0xa1, 0x10, 0xce, 0xea, 0x0b, 0x7a, 0x9c,
0x6b, 0xcd, 0x6c, 0x49, 0xb0, 0xe4, 0x82, 0xae, 0x40, 0x2b, 0xc4, 0x74, 0xe4, 0x8d, 0xf1, 0xca,
0x1c, 0xaf, 0x29, 0xa0, 0x12, 0xcd, 0xf8, 0x52, 0x83, 0xf3, 0x1b, 0x38, 0x4a, 0xac, 0xc7, 0xd8,
0xe1, 0xd7, 0x34, 0xbb, 0xfe, 0x4a, 0x83, 0x76, 0x4e, 0x50, 0xb4, 0x0c, 0xf5, 0x14, 0x8e, 0x34,
0x50, 0x1a, 0x84, 0xbe, 0x0d, 0xb3, 0x4c, 0x77, 0x98, 0x8b, 0xd4, 0x5a, 0x33, 0x56, 0x8b, 0xc5,
0x7d, 0x35, 0x4b, 0xd5, 0x14, 0x1b, 0xd0, 0x0d, 0x38, 0xab, 0xc8, 0xac, 0x52, 0x7c, 0x54, 0x4c,
0xac, 0xc6, 0xef, 0x35, 0x58, 0x52, 0x29, 0xf3, 0x34, 0x06, 0x7f, 0x00, 0x8b, 0xc9, 0x69, 0x2c,
0x07, 0xd3, 0x7e, 0xe8, 0x0e, 0x79, 0x98, 0xf1, 0x62, 0x50, 0x5f, 0xbb, 0x7c, 0xf4, 0x79, 0xa8,
0xb9, 0x90, 0x90, 0xe8, 0xa6, 0x28, 0x18, 0x3f, 0xd3, 0x60, 0x61, 0x03, 0x47, 0xdb, 0x78, 0xe0,
0xe3, 0x20, 0xea, 0x05, 0x7b, 0xe4, 0xe4, 0x86, 0xbf, 0x04, 0x40, 0x25, 0x9d, 0xa4, 0x50, 0xa5,
0x20, 0xd3, 0x38, 0x81, 0xf1, 0xcf, 0x32, 0xd4, 0x53, 0xc2, 0xa0, 0x0b, 0x50, 0x4b, 0x28, 0x48,
0xd3, 0x8e, 0x01, 0x05, 0x8a, 0x25, 0x85, 0x5b, 0xe5, 0xdc, 0xa3, 0x5c, 0x74, 0x8f, 0x09, 0x19,
0x1c, 0x9d, 0x87, 0xaa, 0x8f, 0x7d, 0x8b, 0xba, 0xcf, 0xb1, 0xcc, 0x18, 0xf3, 0x3e, 0xf6, 0xb7,
0xdd, 0xe7, 0x98, 0x2d, 0x05, 0x23, 0xdf, 0x0a, 0xc9, 0x53, 0xda, 0x99, 0x13, 0x4b, 0xc1, 0xc8,
0x37, 0xc9, 0x53, 0x8a, 0x2e, 0x02, 0xb8, 0x81, 0x83, 0x9f, 0x59, 0x81, 0xed, 0xe3, 0xce, 0x3c,
0x8f, 0xb8, 0x1a, 0x87, 0x6c, 0xda, 0x3e, 0x66, 0xb9, 0x82, 0xff, 0xf4, 0xba, 0x9d, 0xaa, 0xd8,
0x28, 0x7f, 0xd9, 0x51, 0x65, 0x9c, 0xf6, 0xba, 0x9d, 0x9a, 0xd8, 0x97, 0x00, 0xd0, 0x27, 0xd0,
0x94, 0xe7, 0xb6, 0x84, 0x2f, 0x03, 0xf7, 0xe5, 0x65, 0x95, 0xed, 0xa5, 0x02, 0x85, 0x27, 0x37,
0x68, 0xea, 0x0f, 0x5d, 0x85, 0x56, 0x9f, 0xf8, 0x43, 0x9b, 0x6b, 0xe7, 0x4e, 0x48, 0xfc, 0x4e,
0x9d, 0xdb, 0x29, 0x07, 0x45, 0x37, 0xe1, 0x6c, 0x9f, 0xe7, 0x2d, 0xe7, 0xf6, 0xc1, 0x7a, 0xb2,
0xd4, 0x69, 0x2c, 0x6b, 0xd7, 0xab, 0xa6, 0x6a, 0x09, 0x7d, 0x2b, 0x0e, 0xb2, 0x26, 0x17, 0xec,
0x2d, 0xb5, 0x67, 0xa7, 0x25, 0x13, 0xf8, 0xbc, 0x15, 0xce, 0xbb, 0xe0, 0x69, 0xc2, 0xe5, 0x1b,
0x30, 0xeb, 0x06, 0x7b, 0x24, 0x8e, 0x8e, 0x37, 0x0f, 0xd1, 0x10, 0x67, 0x26, 0xb0, 0x8d, 0x3f,
0x95, 0x61, 0xf1, 0x63, 0xc7, 0x51, 0xd5, 0x80, 0xe3, 0x87, 0xc2, 0xd8, 0xa5, 0x4a, 0x19, 0x97,
0x9a, 0x26, 0x0f, 0xbe, 0x0b, 0x67, 0x72, 0xf9, 0x5d, 0x7a, 0x66, 0xcd, 0xd4, 0xb3, 0x19, 0xbe,
0xd7, 0x45, 0xef, 0x80, 0x9e, 0xcd, 0xf1, 0xb2, 0xba, 0xd5, 0xcc, 0x76, 0x26, 0xcb, 0xf7, 0xba,
0xe8, 0x9b, 0xf0, 0xc6, 0xc0, 0x23, 0xbb, 0xb6, 0x67, 0x51, 0x6c, 0x7b, 0xd8, 0xb1, 0xc6, 0x81,
0x35, 0xc7, 0x7d, 0x60, 0x41, 0x2c, 0x6f, 0xf3, 0xd5, 0xed, 0x24, 0xc8, 0x36, 0x98, 0xe7, 0xe1,
0x47, 0xd6, 0x90, 0x50, 0x1e, 0x31, 0xdc, 0xa7, 0xeb, 0xf9, 0x2c, 0x9a, 0xcc, 0x3f, 0xf7, 0xe8,
0x60, 0x4b, 0x62, 0x32, 0xdf, 0xc3, 0x8f, 0xe2, 0x3f, 0xf4, 0x39, 0x2c, 0x2a, 0x05, 0xa0, 0x9d,
0xea, 0x74, 0x96, 0x3a, 0xa7, 0x10, 0x90, 0x1a, 0x7f, 0xd3, 0xe0, 0xbc, 0x89, 0x7d, 0xf2, 0x04,
0xff, 0xcf, 0xda, 0xce, 0xf8, 0x7b, 0x09, 0x16, 0x7f, 0x60, 0x47, 0xfd, 0xfd, 0xae, 0x2f, 0x81,
0xf4, 0xd5, 0x1c, 0x30, 0x97, 0x4d, 0x2b, 0xc5, 0x6c, 0x9a, 0x84, 0xdf, 0xac, 0xca, 0xa8, 0x6c,
0x10, 0x5e, 0xfd, 0x22, 0x3e, 0xef, 0x38, 0xfc, 0x52, 0x6d, 0xe8, 0xdc, 0x09, 0xda, 0x50, 0xb4,
0x0e, 0x4d, 0xfc, 0xac, 0xef, 0x8d, 0x1c, 0x6c, 0x09, 0xee, 0xf3, 0x9c, 0xfb, 0x25, 0x05, 0xf7,
0xb4, 0x47, 0x35, 0xe4, 0xa6, 0x1e, 0x4f, 0x01, 0x2f, 0x34, 0x38, 0x2f, 0xb4, 0x8c, 0xbd, 0xc8,
0x7e, 0xb5, 0x8a, 0x4e, 0xd4, 0x58, 0x39, 0x8e, 0x1a, 0x8d, 0x3f, 0x94, 0xa1, 0x2d, 0x0f, 0xc8,
0x86, 0x8f, 0x29, 0x6a, 0x68, 0xce, 0xa2, 0xa5, 0xa2, 0x45, 0xa7, 0x11, 0x37, 0x6e, 0xfa, 0x2a,
0xa9, 0xa6, 0xef, 0x22, 0xc0, 0x9e, 0x37, 0xa2, 0xfb, 0x56, 0xe4, 0xfa, 0x71, 0x05, 0xad, 0x71,
0xc8, 0x8e, 0xeb, 0x63, 0xf4, 0x31, 0x34, 0x76, 0xdd, 0xc0, 0x23, 0x03, 0x6b, 0x68, 0x47, 0xfb,
0x94, 0x27, 0x21, 0xb5, 0xc5, 0xee, 0xb8, 0xd8, 0x73, 0x6e, 0x73, 0x5c, 0xb3, 0x2e, 0xf6, 0x6c,
0xb1, 0x2d, 0xe8, 0x12, 0xd4, 0x59, 0x19, 0x26, 0x7b, 0xa2, 0x12, 0xcf, 0x0b, 0x16, 0xc1, 0xc8,
0xbf, 0xbf, 0xc7, 0x6b, 0xf1, 0x77, 0xa1, 0xc6, 0x8a, 0x02, 0xf5, 0xc8, 0x20, 0x4e, 0x32, 0x47,
0xd1, 0x1f, 0x6f, 0x40, 0x1f, 0x41, 0xcd, 0x61, 0x8e, 0xc0, 0x77, 0xd7, 0x26, 0x9a, 0x81, 0x3b,
0xcb, 0x5d, 0x32, 0xe0, 0x66, 0x18, 0xef, 0x50, 0x94, 0x5a, 0x50, 0x95, 0x5a, 0xe3, 0xdf, 0x25,
0x38, 0xcb, 0x6c, 0x15, 0x27, 0xb4, 0x93, 0xfb, 0xdb, 0x45, 0x00, 0x87, 0x46, 0x56, 0xc6, 0xe7,
0x6a, 0x0e, 0x8d, 0x36, 0x85, 0xdb, 0x7d, 0x18, 0xbb, 0x54, 0x79, 0x72, 0xdb, 0x98, 0xf3, 0x9d,
0x62, 0x74, 0x9e, 0x64, 0x54, 0x47, 0xdf, 0x83, 0x96, 0x47, 0x6c, 0xc7, 0xea, 0x93, 0xc0, 0x11,
0x35, 0x64, 0x96, 0x37, 0x09, 0x6f, 0xab, 0x44, 0xd8, 0x09, 0xdd, 0xc1, 0x00, 0x87, 0xeb, 0x31,
0xae, 0xd9, 0xf4, 0xf8, 0x45, 0x85, 0xfc, 0x45, 0x97, 0xa1, 0x49, 0xc9, 0x28, 0xec, 0xe3, 0xf8,
0xa0, 0xa2, 0x01, 0x6b, 0x08, 0xe0, 0xa6, 0x3a, 0xc4, 0xe6, 0x15, 0xbd, 0xe6, 0x5f, 0x35, 0x58,
0x94, 0xa3, 0xeb, 0xe9, 0x75, 0x3f, 0x29, 0xd6, 0xe3, 0xc0, 0x28, 0x1f, 0x32, 0x0d, 0x55, 0xa6,
0x98, 0x86, 0x66, 0x15, 0x03, 0x6d, 0xb6, 0xe1, 0x9e, 0xcb, 0x37, 0xdc, 0xc6, 0x0e, 0x34, 0x93,
0x7a, 0xc1, 0x33, 0xc1, 0x65, 0x68, 0x0a, 0xb1, 0x2c, 0xa6, 0x52, 0xec, 0xc4, 0xd3, 0xac, 0x00,
0xde, 0xe5, 0x30, 0x46, 0x35, 0xa9, 0x47, 0xa2, 0x89, 0xaa, 0x99, 0x29, 0x88, 0xf1, 0xc7, 0x12,
0xe8, 0xe9, 0x4a, 0xcb, 0x29, 0x4f, 0x33, 0x26, 0x5f, 0x83, 0xb6, 0xbc, 0x68, 0x4d, 0xca, 0x9d,
0x1c, 0x5c, 0x1f, 0xa7, 0xc9, 0x75, 0xd1, 0x07, 0xb0, 0x28, 0x10, 0x0b, 0xe5, 0x51, 0x0c, 0xb0,
0xe7, 0xf8, 0xaa, 0x99, 0xeb, 0x6f, 0x26, 0xb7, 0x17, 0x95, 0x53, 0xb4, 0x17, 0xc5, 0xf6, 0x67,
0xf6, 0x64, 0xed, 0x8f, 0xf1, 0x97, 0x32, 0xb4, 0xc6, 0x11, 0x32, 0xb5, 0xd6, 0xa6, 0xb9, 0x00,
0xdc, 0x04, 0x7d, 0x3c, 0x21, 0xf2, 0xae, 0xfa, 0xd0, 0x20, 0xcf, 0xcf, 0x86, 0xed, 0x61, 0x6e,
0xa4, 0xbe, 0x03, 0x4d, 0xa9, 0x73, 0x2b, 0x5d, 0x84, 0xde, 0x52, 0x11, 0xcb, 0x78, 0x98, 0xd9,
0x48, 0xd5, 0x24, 0x8a, 0x3e, 0x84, 0x1a, 0x8f, 0xfb, 0xe8, 0x60, 0x88, 0x65, 0xc8, 0x5f, 0x50,
0xd1, 0x60, 0x9e, 0xb7, 0x73, 0x30, 0xc4, 0x66, 0xd5, 0x93, 0x5f, 0xa7, 0xed, 0x07, 0x6e, 0xc1,
0x42, 0x28, 0x42, 0xdb, 0xb1, 0x32, 0xea, 0x9b, 0xe7, 0xea, 0x3b, 0x17, 0x2f, 0x6e, 0xa5, 0xd5,
0x38, 0x61, 0xda, 0xaf, 0x4e, 0x9c, 0xf6, 0x7f, 0x51, 0x82, 0x45, 0x26, 0xfb, 0x6d, 0xdb, 0xb3,
0x83, 0x3e, 0x9e, 0x7e, 0x70, 0xfd, 0xef, 0x14, 0xdd, 0x42, 0x26, 0xac, 0x28, 0x32, 0x61, 0xb6,
0x28, 0xcc, 0xe6, 0x8b, 0xc2, 0x9b, 0x50, 0x97, 0x34, 0x1c, 0x12, 0x60, 0xae, 0xec, 0xaa, 0x09,
0x02, 0xd4, 0x25, 0x01, 0x1f, 0x75, 0xd9, 0x7e, 0xbe, 0x3a, 0xcf, 0x57, 0xe7, 0x1d, 0x1a, 0xf1,
0xa5, 0x8b, 0x00, 0x4f, 0x6c, 0xcf, 0x75, 0xb8, 0x93, 0x70, 0x35, 0x55, 0xcd, 0x1a, 0x87, 0x30,
0x15, 0x18, 0x3f, 0xd7, 0x60, 0xf1, 0x53, 0x3b, 0x70, 0xc8, 0xde, 0xde, 0xe9, 0xf3, 0xeb, 0x3a,
0xc4, 0x83, 0x6c, 0xef, 0x38, 0xc3, 0x5d, 0x66, 0x93, 0xf1, 0x93, 0x12, 0xa0, 0x94, 0xbd, 0x4e,
0x2e, 0xcd, 0x15, 0x68, 0x65, 0x34, 0x9f, 0xbc, 0x73, 0xa4, 0x55, 0x4f, 0x59, 0xdd, 0xdb, 0x15,
0xac, 0xac, 0x10, 0xdb, 0x94, 0x04, 0xdc, 0x8c, 0x53, 0xd7, 0xbd, 0xdd, 0x58, 0x4c, 0xb6, 0x95,
0x59, 0x6a, 0x6c, 0xc8, 0xf8, 0x7a, 0x0c, 0x12, 0x4b, 0x52, 0x36, 0x7c, 0xe4, 0x27, 0xbb, 0xb8,
0x6e, 0xe8, 0x34, 0x3b, 0xd4, 0x51, 0xe3, 0x5f, 0x1a, 0x9c, 0x91, 0xbf, 0x2c, 0x7e, 0x07, 0x38,
0x2e, 0x10, 0x24, 0xf0, 0xdc, 0x20, 0xf1, 0x28, 0x99, 0x91, 0x04, 0x50, 0xba, 0xcc, 0xa7, 0xd0,
0x96, 0x48, 0x49, 0x86, 0x9d, 0xd2, 0x1a, 0x2d, 0xb1, 0x2f, 0xc9, 0xad, 0x57, 0xa0, 0x45, 0xf6,
0xf6, 0xd2, 0xfc, 0x84, 0x9b, 0x37, 0x25, 0x54, 0x32, 0xfc, 0x0c, 0xf4, 0x18, 0xed, 0xb8, 0x39,
0xbd, 0x2d, 0x37, 0x26, 0xd3, 0xe2, 0x4f, 0x35, 0xe8, 0x64, 0x33, 0x7c, 0xea, 0xf8, 0xc7, 0x77,
0x84, 0xef, 0x64, 0x2f, 0x1b, 0xae, 0x1c, 0x22, 0xcf, 0x98, 0x8f, 0xec, 0xaa, 0x56, 0x9e, 0x43,
0x2b, 0x9b, 0x8a, 0x51, 0x03, 0xaa, 0x9b, 0x24, 0xfa, 0xe4, 0x99, 0x4b, 0x23, 0x7d, 0x06, 0xb5,
0x00, 0x36, 0x49, 0xb4, 0x15, 0x62, 0x8a, 0x83, 0x48, 0xd7, 0x10, 0xc0, 0xdc, 0xfd, 0xa0, 0xeb,
0xd2, 0x47, 0x7a, 0x09, 0x9d, 0x95, 0x17, 0xa1, 0xb6, 0xd7, 0x93, 0x79, 0x49, 0x2f, 0xb3, 0xed,
0xc9, 0x5f, 0x05, 0xe9, 0xd0, 0x48, 0x50, 0x36, 0xb6, 0x3e, 0xd7, 0x67, 0x51, 0x0d, 0x66, 0xc5,
0xe7, 0xdc, 0xca, 0x7d, 0xd0, 0xf3, 0x0e, 0x87, 0xea, 0x30, 0xbf, 0x2f, 0xe2, 0x55, 0x9f, 0x41,
0x6d, 0xa8, 0x7b, 0xe3, 0x50, 0xd1, 0x35, 0x06, 0x18, 0x84, 0xc3, 0xbe, 0x0c, 0x1a, 0xbd, 0xc4,
0xb8, 0x31, 0xab, 0x75, 0xc9, 0xd3, 0x40, 0x2f, 0xaf, 0x7c, 0x06, 0x8d, 0xf4, 0xed, 0x0e, 0xaa,
0x42, 0x65, 0x93, 0x04, 0x58, 0x9f, 0x61, 0x64, 0x37, 0x42, 0xf2, 0xd4, 0x0d, 0x06, 0xe2, 0x0c,
0x77, 0x42, 0xf2, 0x1c, 0x07, 0x7a, 0x89, 0x2d, 0x30, 0xbf, 0x64, 0x0b, 0x65, 0xb6, 0x20, 0x9c,
0x54, 0xaf, 0xac, 0xbc, 0x0f, 0xd5, 0xb8, 0x24, 0xa0, 0x33, 0xd0, 0xcc, 0x3c, 0xa3, 0xe8, 0x33,
0x08, 0x89, 0x76, 0x72, 0x9c, 0xfc, 0x75, 0x6d, 0xed, 0xcf, 0x75, 0x00, 0xd1, 0x95, 0x10, 0x12,
0x3a, 0x68, 0x08, 0x68, 0x03, 0x47, 0xeb, 0xc4, 0x1f, 0x92, 0x20, 0x16, 0x89, 0xa2, 0x9b, 0x13,
0x8a, 0x76, 0x11, 0x55, 0x9e, 0x72, 0xe9, 0xea, 0x84, 0x1d, 0x39, 0x74, 0x63, 0x06, 0xf9, 0x9c,
0x23, 0x9b, 0x6c, 0x76, 0xdc, 0xfe, 0xa3, 0xf8, 0x0e, 0xfe, 0x10, 0x8e, 0x39, 0xd4, 0x98, 0x63,
0xae, 0x62, 0xcb, 0x9f, 0xed, 0x28, 0x74, 0x83, 0x41, 0x7c, 0x31, 0x66, 0xcc, 0xa0, 0xc7, 0x70,
0x6e, 0x03, 0x73, 0xee, 0x2e, 0x8d, 0xdc, 0x3e, 0x8d, 0x19, 0xae, 0x4d, 0x66, 0x58, 0x40, 0x3e,
0x26, 0x4b, 0x0f, 0xda, 0xb9, 0xb7, 0x62, 0xb4, 0xa2, 0xf4, 0x77, 0xe5, 0xbb, 0xf6, 0xd2, 0xbb,
0x53, 0xe1, 0x26, 0xdc, 0x5c, 0x68, 0x65, 0xdf, 0x51, 0xd1, 0x3b, 0x93, 0x08, 0x14, 0x1e, 0x9e,
0x96, 0x56, 0xa6, 0x41, 0x4d, 0x58, 0x3d, 0x80, 0x56, 0xf6, 0xa5, 0x4e, 0xcd, 0x4a, 0xf9, 0x9a,
0xb7, 0x74, 0xd8, 0x9d, 0xa4, 0x31, 0x83, 0x7e, 0x04, 0x67, 0x0a, 0xcf, 0x63, 0xe8, 0xeb, 0x2a,
0xf2, 0x93, 0x5e, 0xd1, 0x8e, 0xe2, 0x20, 0xa5, 0x1f, 0x6b, 0x71, 0xb2, 0xf4, 0x85, 0x77, 0xd2,
0xe9, 0xa5, 0x4f, 0x91, 0x3f, 0x4c, 0xfa, 0x63, 0x73, 0x18, 0x01, 0x2a, 0x3e, 0x90, 0xa1, 0xf7,
0x54, 0x2c, 0x26, 0x3e, 0xd2, 0x2d, 0xad, 0x4e, 0x8b, 0x9e, 0x98, 0x7c, 0xc4, 0xa3, 0x35, 0xff,
0x94, 0xa4, 0x64, 0x3b, 0xf1, 0x6d, 0x4c, 0xcd, 0x76, 0xf2, 0xeb, 0x8f, 0x70, 0xea, 0xec, 0x55,
0xb7, 0xda, 0x56, 0xca, 0x17, 0x19, 0xb5, 0x53, 0xab, 0x6f, 0xce, 0x8d, 0x19, 0xb4, 0x03, 0xf5,
0x54, 0xab, 0x83, 0xae, 0x4e, 0xf2, 0x89, 0x6c, 0x2f, 0x74, 0x94, 0xb9, 0x2c, 0x80, 0x0d, 0x1c,
0xdd, 0xc3, 0x51, 0xe8, 0xf6, 0x69, 0x9e, 0xa8, 0xfc, 0x19, 0x23, 0xc4, 0x44, 0xaf, 0x1d, 0x89,
0x17, 0x8b, 0xbd, 0xf6, 0x25, 0x40, 0x8d, 0xdb, 0x8c, 0xd5, 0xfe, 0xff, 0xa7, 0xf1, 0x97, 0x90,
0xc6, 0x1f, 0x42, 0x3b, 0xf7, 0xce, 0xa1, 0x4e, 0xe3, 0xea, 0xc7, 0x90, 0xa3, 0x1c, 0x64, 0x17,
0x50, 0xf1, 0x32, 0x5e, 0x1d, 0x58, 0x13, 0x2f, 0xed, 0x8f, 0xe2, 0xf1, 0x10, 0xda, 0xb9, 0xcb,
0x70, 0xf5, 0x09, 0xd4, 0x37, 0xe6, 0x53, 0x9c, 0xa0, 0x78, 0x09, 0xac, 0x3e, 0xc1, 0xc4, 0xcb,
0xe2, 0xa3, 0x78, 0x7c, 0x01, 0x8d, 0xf4, 0x95, 0x1f, 0xba, 0x36, 0x29, 0x3a, 0x73, 0x83, 0xd3,
0xab, 0xcf, 0xd7, 0x2f, 0xbf, 0x9e, 0x3d, 0x84, 0x76, 0xee, 0x56, 0x4e, 0x6d, 0x5d, 0xf5, 0xd5,
0xdd, 0x51, 0xd4, 0xbf, 0xc2, 0x0c, 0xfc, 0xb2, 0x73, 0xe5, 0xed, 0x0f, 0x1e, 0xac, 0x0d, 0xdc,
0x68, 0x7f, 0xb4, 0xcb, 0x4e, 0x79, 0x43, 0x60, 0xbe, 0xe7, 0x12, 0xf9, 0x75, 0x23, 0x4e, 0x1a,
0x37, 0x38, 0xa5, 0x1b, 0x5c, 0xda, 0xe1, 0xee, 0xee, 0x1c, 0xff, 0xbd, 0xf5, 0x9f, 0x00, 0x00,
0x00, 0xff, 0xff, 0x0d, 0x49, 0xab, 0x16, 0xf6, 0x28, 0x00, 0x00,
// 2413 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0x4d, 0x6f, 0xdc, 0xc6,
0x55, 0xdc, 0x0f, 0x69, 0xf7, 0xed, 0x17, 0x35, 0xb6, 0x94, 0xf5, 0x36, 0x76, 0x64, 0x3a, 0xfe,
0x88, 0xd2, 0xc8, 0x8e, 0x9c, 0x7e, 0x04, 0x6d, 0x0e, 0xb1, 0x36, 0x56, 0x36, 0xb5, 0x65, 0x95,
0x52, 0x52, 0xd4, 0x30, 0xb0, 0xe5, 0x2e, 0x47, 0x2b, 0xc2, 0x24, 0x67, 0xcd, 0xe1, 0x5a, 0x96,
0xcf, 0x3d, 0xb4, 0x87, 0xa2, 0x3f, 0xa0, 0x45, 0x81, 0x02, 0x29, 0x8a, 0x1c, 0x7a, 0x6c, 0xcf,
0xbe, 0xf4, 0xde, 0x5f, 0x50, 0xa0, 0x68, 0xfe, 0x42, 0x7b, 0x2e, 0xe6, 0x83, 0x5c, 0x92, 0x3b,
0x2b, 0xad, 0xa4, 0x3a, 0x36, 0x8a, 0xde, 0xc8, 0x37, 0x6f, 0xde, 0x7b, 0xf3, 0x3e, 0xe7, 0xbd,
0x81, 0xc5, 0x27, 0x23, 0x1c, 0x1c, 0x76, 0xfb, 0x84, 0x04, 0xf6, 0xda, 0x30, 0x20, 0x21, 0x41,
0xc8, 0x73, 0xdc, 0xa7, 0x23, 0x2a, 0xfe, 0xd6, 0xf8, 0x7a, 0xab, 0xda, 0x27, 0x9e, 0x47, 0x7c,
0x01, 0x6b, 0x55, 0x93, 0x18, 0xad, 0xba, 0xe3, 0x87, 0x38, 0xf0, 0x2d, 0x37, 0x5a, 0xa5, 0xfd,
0x7d, 0xec, 0x59, 0xf2, 0x4f, 0xb7, 0xad, 0xd0, 0x4a, 0xd2, 0x6f, 0x2d, 0x3a, 0xbe, 0x8d, 0x9f,
0x25, 0x41, 0xc6, 0xcf, 0x35, 0x58, 0xde, 0xd9, 0x27, 0x07, 0x1b, 0xc4, 0x75, 0x71, 0x3f, 0x74,
0x88, 0x4f, 0x4d, 0xfc, 0x64, 0x84, 0x69, 0x88, 0x6e, 0x41, 0xa1, 0x67, 0x51, 0xdc, 0xd4, 0x56,
0xb4, 0x1b, 0x95, 0xf5, 0x37, 0xd7, 0x52, 0xc2, 0x49, 0xa9, 0xee, 0xd3, 0xc1, 0x1d, 0x8b, 0x62,
0x93, 0x63, 0x22, 0x04, 0x05, 0xbb, 0xd7, 0x69, 0x37, 0x73, 0x2b, 0xda, 0x8d, 0xbc, 0xc9, 0xbf,
0xd1, 0xdb, 0x50, 0xeb, 0xc7, 0xb4, 0x3b, 0x6d, 0xda, 0xcc, 0xaf, 0xe4, 0x6f, 0xe4, 0xcd, 0x34,
0xd0, 0xf8, 0xa3, 0x06, 0x6f, 0x4c, 0x88, 0x41, 0x87, 0xc4, 0xa7, 0x18, 0xdd, 0x86, 0x79, 0x1a,
0x5a, 0xe1, 0x88, 0x4a, 0x49, 0xbe, 0xa5, 0x94, 0x64, 0x87, 0xa3, 0x98, 0x12, 0x75, 0x92, 0x6d,
0x4e, 0xc1, 0x16, 0xbd, 0x0f, 0xe7, 0x1d, 0xff, 0x3e, 0xf6, 0x48, 0x70, 0xd8, 0x1d, 0xe2, 0xa0,
0x8f, 0xfd, 0xd0, 0x1a, 0xe0, 0x48, 0xc6, 0x73, 0xd1, 0xda, 0xf6, 0x78, 0xc9, 0xf8, 0x83, 0x06,
0x4b, 0x4c, 0xd2, 0x6d, 0x2b, 0x08, 0x9d, 0x97, 0xa0, 0x2f, 0x03, 0xaa, 0x49, 0x19, 0x9b, 0x79,
0xbe, 0x96, 0x82, 0x31, 0x9c, 0x61, 0xc4, 0x9e, 0x9d, 0xad, 0xc0, 0xc5, 0x4d, 0xc1, 0x8c, 0x2f,
0xa5, 0x61, 0x93, 0x72, 0x9e, 0x45, 0xa1, 0x59, 0x9e, 0xb9, 0x49, 0x9e, 0xa7, 0x51, 0xe7, 0x0b,
0x0d, 0x96, 0xee, 0x11, 0xcb, 0x1e, 0x1b, 0xfe, 0x9b, 0x57, 0xe7, 0x47, 0x30, 0x2f, 0x02, 0xa7,
0x59, 0xe0, 0xbc, 0xae, 0xa6, 0x79, 0xc9, 0xa0, 0x1a, 0x4b, 0xb8, 0xc3, 0x01, 0xa6, 0xdc, 0x64,
0xfc, 0x56, 0x83, 0xa6, 0x89, 0x5d, 0x6c, 0x51, 0xfc, 0x2a, 0x4f, 0xb1, 0x0c, 0xf3, 0x3e, 0xb1,
0x71, 0xa7, 0xcd, 0x4f, 0x91, 0x37, 0xe5, 0x9f, 0xf1, 0xb5, 0xd4, 0xf0, 0x6b, 0xee, 0xb0, 0x09,
0x2b, 0x14, 0x4f, 0x63, 0x85, 0x17, 0x63, 0x2b, 0xbc, 0xee, 0x27, 0x1d, 0x5b, 0xaa, 0x98, 0xb2,
0xd4, 0x4f, 0xe1, 0xc2, 0x46, 0x80, 0xad, 0x10, 0xff, 0x98, 0x65, 0xfe, 0x8d, 0x7d, 0xcb, 0xf7,
0xb1, 0x1b, 0x1d, 0x21, 0xcb, 0x5c, 0x53, 0x30, 0x6f, 0xc2, 0xc2, 0x30, 0x20, 0xcf, 0x0e, 0x63,
0xb9, 0xa3, 0x5f, 0xe3, 0xf7, 0x1a, 0xb4, 0x54, 0xb4, 0xcf, 0x92, 0x11, 0xae, 0x43, 0x23, 0x10,
0xc2, 0x75, 0xfb, 0x82, 0x1e, 0xe7, 0x5a, 0x36, 0xeb, 0x12, 0x2c, 0xb9, 0xa0, 0xab, 0x50, 0x0f,
0x30, 0x1d, 0xb9, 0x63, 0xbc, 0x3c, 0xc7, 0xab, 0x09, 0xa8, 0x44, 0x33, 0xbe, 0xd2, 0xe0, 0xc2,
0x26, 0x0e, 0x63, 0xeb, 0x31, 0x76, 0xf8, 0x35, 0xcd, 0xae, 0xbf, 0xd3, 0xa0, 0x91, 0x11, 0x14,
0xad, 0x40, 0x25, 0x81, 0x23, 0x0d, 0x94, 0x04, 0xa1, 0xef, 0x43, 0x91, 0xe9, 0x0e, 0x73, 0x91,
0xea, 0xeb, 0xc6, 0xda, 0x64, 0xbd, 0x5f, 0x4b, 0x53, 0x35, 0xc5, 0x06, 0x74, 0x13, 0xce, 0x29,
0x32, 0xab, 0x14, 0x1f, 0x4d, 0x26, 0x56, 0xe3, 0x4f, 0x1a, 0xb4, 0x54, 0xca, 0x3c, 0x8b, 0xc1,
0x1f, 0xc2, 0x72, 0x7c, 0x9a, 0xae, 0x8d, 0x69, 0x3f, 0x70, 0x86, 0x3c, 0xcc, 0x78, 0x31, 0xa8,
0xac, 0x5f, 0x39, 0xfe, 0x3c, 0xd4, 0x5c, 0x8a, 0x49, 0xb4, 0x13, 0x14, 0x8c, 0x5f, 0x69, 0xb0,
0xb4, 0x89, 0xc3, 0x1d, 0x3c, 0xf0, 0xb0, 0x1f, 0x76, 0xfc, 0x3d, 0x72, 0x7a, 0xc3, 0x5f, 0x02,
0xa0, 0x92, 0x4e, 0x5c, 0xa8, 0x12, 0x90, 0x59, 0x9c, 0xc0, 0xf8, 0xba, 0x00, 0x95, 0x84, 0x30,
0xe8, 0x4d, 0x28, 0xc7, 0x14, 0xa4, 0x69, 0xc7, 0x80, 0x09, 0x8a, 0x39, 0x85, 0x5b, 0x65, 0xdc,
0x23, 0x3f, 0xe9, 0x1e, 0x53, 0x32, 0x38, 0xba, 0x00, 0x25, 0x0f, 0x7b, 0x5d, 0xea, 0x3c, 0xc7,
0x32, 0x63, 0x2c, 0x78, 0xd8, 0xdb, 0x71, 0x9e, 0x63, 0xb6, 0xe4, 0x8f, 0xbc, 0x6e, 0x40, 0x0e,
0x68, 0x73, 0x5e, 0x2c, 0xf9, 0x23, 0xcf, 0x24, 0x07, 0x14, 0x5d, 0x04, 0x10, 0xd7, 0x3d, 0xdf,
0xf2, 0x70, 0x73, 0x81, 0x47, 0x5c, 0x99, 0x43, 0xb6, 0x2c, 0x0f, 0xb3, 0x5c, 0xc1, 0x7f, 0x3a,
0xed, 0x66, 0x49, 0x6c, 0x94, 0xbf, 0xec, 0xa8, 0x32, 0x4e, 0x3b, 0xed, 0x66, 0x59, 0xec, 0x8b,
0x01, 0xe8, 0x13, 0xa8, 0xc9, 0x73, 0x77, 0x85, 0x2f, 0x03, 0xf7, 0xe5, 0x15, 0x95, 0xed, 0xa5,
0x02, 0x85, 0x27, 0x57, 0x69, 0xe2, 0x0f, 0x5d, 0x83, 0x7a, 0x9f, 0x78, 0x43, 0x8b, 0x6b, 0xe7,
0x6e, 0x40, 0xbc, 0x66, 0x85, 0xdb, 0x29, 0x03, 0x45, 0xb7, 0xe0, 0x5c, 0x9f, 0xe7, 0x2d, 0xfb,
0xce, 0xe1, 0x46, 0xbc, 0xd4, 0xac, 0xae, 0x68, 0x37, 0x4a, 0xa6, 0x6a, 0x09, 0x7d, 0x2f, 0x0a,
0xb2, 0x1a, 0x17, 0xec, 0xb2, 0xda, 0xb3, 0x93, 0x92, 0xc9, 0x18, 0xbb, 0x0c, 0x55, 0xec, 0x5b,
0x3d, 0x17, 0x77, 0xb9, 0x26, 0x9a, 0x75, 0xce, 0xa3, 0x22, 0x60, 0x1d, 0x06, 0x42, 0x0f, 0x40,
0x17, 0x3a, 0x1d, 0x5a, 0xe1, 0x7e, 0xd7, 0xf1, 0xf7, 0x08, 0x6d, 0x36, 0xb8, 0xef, 0x67, 0xaa,
0x15, 0xc7, 0x5a, 0xe3, 0x9b, 0xee, 0x3a, 0x2e, 0xde, 0xb6, 0xc2, 0x7d, 0xee, 0xd3, 0x75, 0xbe,
0x10, 0xfd, 0x52, 0x7e, 0xfd, 0xce, 0xba, 0xfd, 0x59, 0x42, 0xf4, 0x3b, 0x50, 0x14, 0x52, 0x89,
0x88, 0x7c, 0xeb, 0x08, 0xab, 0x70, 0x66, 0x02, 0xdb, 0xf8, 0x4b, 0x1e, 0x96, 0x3f, 0xb6, 0x6d,
0x55, 0xdd, 0x39, 0x79, 0xf8, 0x8d, 0xdd, 0x38, 0x97, 0x72, 0xe3, 0x59, 0x72, 0xef, 0xbb, 0xb0,
0x98, 0xa9, 0x29, 0x32, 0x1a, 0xca, 0xa6, 0x9e, 0xae, 0x2a, 0x9d, 0x36, 0x7a, 0x07, 0xf4, 0x74,
0x5d, 0x91, 0x15, 0xb5, 0x6c, 0x36, 0x52, 0x95, 0xa5, 0xd3, 0x46, 0xdf, 0x85, 0x37, 0x06, 0x2e,
0xe9, 0x59, 0x6e, 0x97, 0x62, 0xcb, 0xc5, 0x76, 0x77, 0x1c, 0xcc, 0xf3, 0xdc, 0xef, 0x96, 0xc4,
0xf2, 0x0e, 0x5f, 0xdd, 0x89, 0x03, 0x7b, 0x93, 0x79, 0x3b, 0x7e, 0xdc, 0x1d, 0x12, 0xca, 0xa3,
0x94, 0xc7, 0x51, 0x25, 0x9b, 0xb9, 0xe3, 0x36, 0xec, 0x3e, 0x1d, 0x6c, 0x4b, 0x4c, 0xe6, 0xef,
0xf8, 0x71, 0xf4, 0x87, 0x3e, 0x87, 0x65, 0xa5, 0x00, 0xb4, 0x59, 0x9a, 0xcd, 0x52, 0xe7, 0x15,
0x02, 0x52, 0xe3, 0x1f, 0x1a, 0x5c, 0x30, 0xb1, 0x47, 0x9e, 0xe2, 0xff, 0x59, 0xdb, 0x19, 0xff,
0xcc, 0xc1, 0xf2, 0x4f, 0xac, 0xb0, 0xbf, 0xdf, 0xf6, 0x24, 0x90, 0xbe, 0x9a, 0x03, 0x66, 0x32,
0x78, 0x61, 0x32, 0x83, 0xc7, 0xe1, 0x57, 0x54, 0x19, 0x95, 0xf5, 0xe3, 0x6b, 0x5f, 0x44, 0xe7,
0x1d, 0x87, 0x5f, 0xe2, 0xea, 0x3b, 0x7f, 0x8a, 0xab, 0x2f, 0xda, 0x80, 0x1a, 0x7e, 0xd6, 0x77,
0x47, 0x36, 0x96, 0x29, 0x69, 0x81, 0x73, 0xbf, 0xa4, 0xe0, 0x9e, 0xf4, 0xa8, 0xaa, 0xdc, 0x24,
0x32, 0xd1, 0x0b, 0x0d, 0x2e, 0x08, 0x2d, 0x63, 0x37, 0xb4, 0x5e, 0xad, 0xa2, 0x63, 0x35, 0x16,
0x4e, 0xa2, 0x46, 0xe3, 0xcb, 0x02, 0x34, 0xe4, 0x01, 0x59, 0xc3, 0x33, 0x43, 0xdd, 0xce, 0x58,
0x34, 0x37, 0x69, 0xd1, 0x59, 0xc4, 0x8d, 0x2e, 0x9a, 0x85, 0xc4, 0x45, 0xf3, 0x22, 0xc0, 0x9e,
0x3b, 0xa2, 0xfb, 0xdd, 0xd0, 0xf1, 0xa2, 0xaa, 0x5d, 0xe6, 0x90, 0x5d, 0xc7, 0xc3, 0xe8, 0x63,
0xa8, 0xf6, 0x1c, 0xdf, 0x25, 0x03, 0x5e, 0x49, 0x28, 0x4f, 0x42, 0x6a, 0x8b, 0xdd, 0x75, 0xb0,
0x6b, 0xdf, 0xe1, 0xb8, 0x66, 0x45, 0xec, 0x61, 0xe5, 0x83, 0xa2, 0x4b, 0x50, 0x61, 0xa5, 0x9f,
0xec, 0x89, 0xea, 0xbf, 0x20, 0x58, 0xf8, 0x23, 0xef, 0xc1, 0x1e, 0xaf, 0xff, 0x3f, 0x84, 0x32,
0x2b, 0x0a, 0xd4, 0x25, 0x83, 0x28, 0xc9, 0x1c, 0x47, 0x7f, 0xbc, 0x01, 0x7d, 0x04, 0x65, 0x9b,
0x39, 0x02, 0xdf, 0x5d, 0x9e, 0x6a, 0x06, 0xee, 0x2c, 0xf7, 0xc8, 0x80, 0x9b, 0x61, 0xbc, 0x43,
0x51, 0xde, 0x41, 0x59, 0xde, 0xb3, 0x35, 0xb7, 0x32, 0x5b, 0xcd, 0xad, 0x9e, 0xa5, 0xe6, 0xfe,
0x3b, 0x07, 0xe7, 0x98, 0x7f, 0x44, 0x49, 0xf4, 0xf4, 0x3e, 0x7e, 0x11, 0xc0, 0xa6, 0x61, 0x37,
0xe5, 0xe7, 0x65, 0x9b, 0x86, 0x5b, 0xc2, 0xd5, 0x3f, 0x8c, 0xdc, 0x38, 0x3f, 0xfd, 0x7a, 0x9c,
0xf1, 0xd7, 0xc9, 0x8c, 0x70, 0x9a, 0x91, 0x04, 0xfa, 0x11, 0xd4, 0x5d, 0x62, 0xd9, 0xdd, 0x3e,
0xf1, 0x6d, 0x51, 0xb7, 0x8a, 0xfc, 0x32, 0xf4, 0xb6, 0x4a, 0x84, 0xdd, 0xc0, 0x19, 0x0c, 0x70,
0xb0, 0x11, 0xe1, 0x9a, 0x35, 0x97, 0x0f, 0x64, 0xe4, 0x2f, 0xba, 0x02, 0x35, 0x4a, 0x46, 0x41,
0x1f, 0x47, 0x07, 0x15, 0x17, 0xcd, 0xaa, 0x00, 0x6e, 0xa9, 0xc3, 0x7a, 0x41, 0x71, 0xa7, 0xfe,
0xbb, 0x06, 0xcb, 0xb2, 0x45, 0x3f, 0xbb, 0xee, 0xa7, 0xe5, 0x97, 0x28, 0x18, 0xf3, 0x47, 0x74,
0x7d, 0x85, 0x19, 0xba, 0xbe, 0xa2, 0xa2, 0x71, 0x4f, 0x37, 0x16, 0xf3, 0xd9, 0xc6, 0xc2, 0xd8,
0x85, 0x5a, 0x5c, 0xa3, 0x78, 0xf6, 0xb9, 0x02, 0x35, 0x21, 0x56, 0x97, 0xa9, 0x14, 0xdb, 0x51,
0xd7, 0x2e, 0x80, 0xf7, 0x38, 0x8c, 0x51, 0x8d, 0x6b, 0xa0, 0xb8, 0xb8, 0x95, 0xcd, 0x04, 0xc4,
0xf8, 0x73, 0x0e, 0xf4, 0x64, 0x75, 0xe7, 0x94, 0x67, 0x19, 0x07, 0x5c, 0x87, 0x86, 0x9c, 0x31,
0xc7, 0x25, 0x56, 0x36, 0xe8, 0x4f, 0x92, 0xe4, 0xda, 0xe8, 0x03, 0x58, 0x16, 0x88, 0x13, 0x25,
0x59, 0x34, 0xea, 0xe7, 0xf9, 0xaa, 0x99, 0xb9, 0x53, 0x4d, 0xbf, 0xd2, 0x14, 0xce, 0x70, 0xa5,
0x99, 0xbc, 0x72, 0x15, 0x4f, 0x77, 0xe5, 0x32, 0xfe, 0x96, 0x87, 0xfa, 0x38, 0x42, 0x66, 0xd6,
0xda, 0x2c, 0x83, 0xce, 0x2d, 0xd0, 0xc7, 0x9d, 0x30, 0xef, 0x1e, 0x8e, 0x0c, 0xf2, 0x6c, 0x0f,
0xdc, 0x18, 0x66, 0x46, 0x07, 0x77, 0xa1, 0x26, 0x75, 0xde, 0x4d, 0x16, 0xbe, 0xcb, 0x2a, 0x62,
0x29, 0x0f, 0x33, 0xab, 0x89, 0x3a, 0x48, 0xd1, 0x87, 0x50, 0xe6, 0x71, 0x1f, 0x1e, 0x0e, 0xb1,
0x0c, 0xf9, 0x37, 0x55, 0x34, 0x98, 0xe7, 0xed, 0x1e, 0x0e, 0xb1, 0x59, 0x72, 0xe5, 0xd7, 0x59,
0xef, 0x20, 0xb7, 0x61, 0x29, 0x10, 0xa1, 0x6d, 0x77, 0x53, 0xea, 0x5b, 0xe0, 0xea, 0x3b, 0x1f,
0x2d, 0x6e, 0x27, 0xd5, 0x38, 0x65, 0xaa, 0x51, 0x9a, 0x3a, 0xd5, 0xf8, 0x4d, 0x0e, 0x96, 0x99,
0xec, 0x77, 0x2c, 0xd7, 0xf2, 0xfb, 0x78, 0xf6, 0x06, 0xfd, 0xbf, 0x53, 0xe8, 0x27, 0x32, 0x61,
0x41, 0x91, 0x09, 0xd3, 0x45, 0xa1, 0x98, 0x2d, 0x0a, 0x6f, 0x41, 0x45, 0xd2, 0xb0, 0x89, 0x8f,
0xb9, 0xb2, 0x4b, 0x26, 0x08, 0x50, 0x9b, 0xf8, 0xbc, 0xa5, 0x67, 0xfb, 0xf9, 0xea, 0x02, 0x5f,
0x5d, 0xb0, 0x69, 0xc8, 0x97, 0x2e, 0x02, 0x3c, 0xb5, 0x5c, 0xc7, 0xe6, 0x4e, 0xc2, 0xd5, 0x54,
0x32, 0xcb, 0x1c, 0xc2, 0x54, 0x60, 0xfc, 0x5a, 0x83, 0xe5, 0x4f, 0x2d, 0xdf, 0x26, 0x7b, 0x7b,
0x67, 0xcf, 0xaf, 0x1b, 0x10, 0x35, 0xec, 0x9d, 0x93, 0x34, 0x94, 0xa9, 0x4d, 0xc6, 0x2f, 0x72,
0x80, 0x12, 0xf6, 0x3a, 0xbd, 0x34, 0x57, 0xa1, 0x9e, 0xd2, 0x7c, 0xfc, 0x9e, 0x93, 0x54, 0x3d,
0x65, 0x75, 0xaf, 0x27, 0x58, 0x75, 0x03, 0x6c, 0x51, 0xe2, 0x73, 0x33, 0xce, 0x5c, 0xf7, 0x7a,
0x91, 0x98, 0x6c, 0x2b, 0xb3, 0xd4, 0xd8, 0x90, 0xd1, 0x18, 0x10, 0x62, 0x4b, 0x52, 0xd6, 0xf0,
0x64, 0xbb, 0xc9, 0xa8, 0x6e, 0xe8, 0x34, 0xdd, 0x48, 0x52, 0xe3, 0x5f, 0x1a, 0x2c, 0xca, 0x5f,
0x16, 0xbf, 0x03, 0x1c, 0x15, 0x08, 0xe2, 0xbb, 0x8e, 0x1f, 0x7b, 0x94, 0xcc, 0x48, 0x02, 0x28,
0x5d, 0xe6, 0x53, 0x68, 0x48, 0xa4, 0x38, 0xc3, 0xce, 0x68, 0x8d, 0xba, 0xd8, 0x17, 0xe7, 0xd6,
0xab, 0x50, 0x27, 0x7b, 0x7b, 0x49, 0x7e, 0xc2, 0xcd, 0x6b, 0x12, 0x2a, 0x19, 0x7e, 0x06, 0x7a,
0x84, 0x76, 0xd2, 0x9c, 0xde, 0x90, 0x1b, 0xe3, 0x0e, 0xf5, 0x97, 0x1a, 0x34, 0xd3, 0x19, 0x3e,
0x71, 0xfc, 0x93, 0x3b, 0xc2, 0x0f, 0xd2, 0x03, 0x8e, 0xab, 0x47, 0xc8, 0x33, 0xe6, 0x23, 0x6f,
0x55, 0xab, 0xcf, 0xa1, 0x9e, 0x4e, 0xc5, 0xa8, 0x0a, 0xa5, 0x2d, 0x12, 0x7e, 0xf2, 0xcc, 0xa1,
0xa1, 0x3e, 0x87, 0xea, 0x00, 0x5b, 0x24, 0xdc, 0x0e, 0x30, 0xc5, 0x7e, 0xa8, 0x6b, 0x08, 0x60,
0xfe, 0x81, 0xdf, 0x76, 0xe8, 0x63, 0x3d, 0x87, 0xce, 0xc9, 0x81, 0xaf, 0xe5, 0x76, 0x64, 0x5e,
0xd2, 0xf3, 0x6c, 0x7b, 0xfc, 0x57, 0x40, 0x3a, 0x54, 0x63, 0x94, 0xcd, 0xed, 0xcf, 0xf5, 0x22,
0x2a, 0x43, 0x51, 0x7c, 0xce, 0xaf, 0x3e, 0x00, 0x3d, 0xeb, 0x70, 0xa8, 0x02, 0x0b, 0xfb, 0x22,
0x5e, 0xf5, 0x39, 0xd4, 0x80, 0x8a, 0x3b, 0x0e, 0x15, 0x5d, 0x63, 0x80, 0x41, 0x30, 0xec, 0xcb,
0xa0, 0xd1, 0x73, 0x8c, 0x1b, 0xb3, 0x5a, 0x9b, 0x1c, 0xf8, 0x7a, 0x7e, 0xf5, 0x33, 0xa8, 0x26,
0xa7, 0x58, 0xa8, 0x04, 0x85, 0x2d, 0xe2, 0x63, 0x7d, 0x8e, 0x91, 0xdd, 0x0c, 0xc8, 0x81, 0xe3,
0x0f, 0xc4, 0x19, 0xee, 0x06, 0xe4, 0x39, 0xf6, 0xf5, 0x1c, 0x5b, 0x60, 0x7e, 0xc9, 0x16, 0xf2,
0x6c, 0x41, 0x38, 0xa9, 0x5e, 0x58, 0x7d, 0x1f, 0x4a, 0x51, 0x49, 0x40, 0x8b, 0x50, 0x4b, 0x3d,
0x17, 0xe9, 0x73, 0x08, 0x89, 0xeb, 0xe4, 0x38, 0xf9, 0xeb, 0xda, 0xfa, 0x5f, 0x2b, 0x00, 0xe2,
0x56, 0x42, 0x48, 0x60, 0xa3, 0x21, 0xa0, 0x4d, 0x1c, 0x6e, 0x10, 0x6f, 0x48, 0xfc, 0x48, 0x24,
0x8a, 0x6e, 0x4d, 0x29, 0xda, 0x93, 0xa8, 0xf2, 0x94, 0xad, 0x6b, 0x53, 0x76, 0x64, 0xd0, 0x8d,
0x39, 0xe4, 0x71, 0x8e, 0xac, 0x9b, 0xda, 0x75, 0xfa, 0x8f, 0xa3, 0xb7, 0x86, 0x23, 0x38, 0x66,
0x50, 0x23, 0x8e, 0x99, 0x8a, 0x2d, 0x7f, 0x76, 0xc2, 0xc0, 0xf1, 0x07, 0xd1, 0x30, 0xce, 0x98,
0x43, 0x4f, 0xe0, 0xfc, 0x26, 0xe6, 0xdc, 0x1d, 0x1a, 0x3a, 0x7d, 0x1a, 0x31, 0x5c, 0x9f, 0xce,
0x70, 0x02, 0xf9, 0x84, 0x2c, 0x5d, 0x68, 0x64, 0xde, 0xc4, 0xd1, 0xaa, 0xd2, 0xdf, 0x95, 0xef,
0xf7, 0xad, 0x77, 0x67, 0xc2, 0x8d, 0xb9, 0x39, 0x50, 0x4f, 0xbf, 0x17, 0xa3, 0x77, 0xa6, 0x11,
0x98, 0x78, 0x60, 0x6b, 0xad, 0xce, 0x82, 0x1a, 0xb3, 0x7a, 0x08, 0xf5, 0xf4, 0x8b, 0xa4, 0x9a,
0x95, 0xf2, 0xd5, 0xb2, 0x75, 0xd4, 0x1c, 0xd4, 0x98, 0x43, 0x3f, 0x83, 0xc5, 0x89, 0x67, 0x40,
0xf4, 0x6d, 0x15, 0xf9, 0x69, 0xaf, 0x85, 0xc7, 0x71, 0x90, 0xd2, 0x8f, 0xb5, 0x38, 0x5d, 0xfa,
0x89, 0xf7, 0xe0, 0xd9, 0xa5, 0x4f, 0x90, 0x3f, 0x4a, 0xfa, 0x13, 0x73, 0x18, 0x01, 0x9a, 0x7c,
0x08, 0x44, 0xef, 0xa9, 0x58, 0x4c, 0x7d, 0x8c, 0x6c, 0xad, 0xcd, 0x8a, 0x1e, 0x9b, 0x7c, 0xc4,
0xa3, 0x35, 0xfb, 0x64, 0xa6, 0x64, 0x3b, 0xf5, 0x0d, 0x50, 0xcd, 0x76, 0xfa, 0x2b, 0x97, 0x70,
0xea, 0xf4, 0x78, 0x5d, 0x6d, 0x2b, 0xe5, 0xcb, 0x93, 0xda, 0xa9, 0xd5, 0xd3, 0x7a, 0x63, 0x0e,
0xed, 0x42, 0x25, 0x71, 0xd5, 0x41, 0xd7, 0xa6, 0xf9, 0x44, 0xfa, 0x2e, 0x74, 0x9c, 0xb9, 0xba,
0x00, 0x9b, 0x38, 0xbc, 0x8f, 0xc3, 0xc0, 0xe9, 0xd3, 0x2c, 0x51, 0xf9, 0x33, 0x46, 0x88, 0x88,
0x5e, 0x3f, 0x16, 0x2f, 0x12, 0x7b, 0xfd, 0x2b, 0x80, 0x32, 0xb7, 0x19, 0xab, 0xfd, 0xff, 0x4f,
0xe3, 0x2f, 0x21, 0x8d, 0x3f, 0x82, 0x46, 0xe6, 0x6d, 0x45, 0x9d, 0xc6, 0xd5, 0x0f, 0x30, 0xc7,
0x39, 0x48, 0x0f, 0xd0, 0xe4, 0x03, 0x80, 0x3a, 0xb0, 0xa6, 0x3e, 0x14, 0x1c, 0xc7, 0xe3, 0x11,
0x34, 0x32, 0x03, 0x78, 0xf5, 0x09, 0xd4, 0x53, 0xfa, 0x19, 0x4e, 0x30, 0x39, 0x78, 0x56, 0x9f,
0x60, 0xea, 0x80, 0xfa, 0x38, 0x1e, 0x5f, 0x40, 0x35, 0x39, 0xf2, 0x43, 0xd7, 0xa7, 0x45, 0x67,
0xa6, 0x71, 0x7a, 0xf5, 0xf9, 0xfa, 0xe5, 0xd7, 0xb3, 0x47, 0xd0, 0xc8, 0x4c, 0xe5, 0xd4, 0xd6,
0x55, 0x8f, 0xee, 0x8e, 0xa3, 0xfe, 0x0d, 0x66, 0xe0, 0x97, 0x9d, 0x2b, 0xef, 0x7c, 0xf0, 0x70,
0x7d, 0xe0, 0x84, 0xfb, 0xa3, 0x1e, 0x3b, 0xe5, 0x4d, 0x81, 0xf9, 0x9e, 0x43, 0xe4, 0xd7, 0xcd,
0x28, 0x69, 0xdc, 0xe4, 0x94, 0x6e, 0x72, 0x69, 0x87, 0xbd, 0xde, 0x3c, 0xff, 0xbd, 0xfd, 0x9f,
0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0xe8, 0x29, 0x4c, 0xf1, 0x29, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -23,12 +23,15 @@ import (
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/kv"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
minioKV "github.com/milvus-io/milvus/internal/kv/minio"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -78,6 +81,7 @@ type Cluster interface {
getSessionVersion() int64
getMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) []queryNodeGetMetricsResponse
estimateSegmentsSize(segments *querypb.LoadSegmentsRequest) (int64, error)
}
type newQueryNodeFn func(ctx context.Context, address string, id UniqueID, kv *etcdkv.EtcdKV) (Node, error)
@ -94,6 +98,7 @@ type queryNodeCluster struct {
ctx context.Context
cancel context.CancelFunc
client *etcdkv.EtcdKV
dataKV kv.DataKV
session *sessionutil.Session
sessionVersion int64
@ -104,6 +109,7 @@ type queryNodeCluster struct {
newNodeFn newQueryNodeFn
segmentAllocator SegmentAllocatePolicy
channelAllocator ChannelAllocatePolicy
segSizeEstimator func(request *querypb.LoadSegmentsRequest, dataKV kv.DataKV) (int64, error)
}
func newQueryNodeCluster(ctx context.Context, clusterMeta Meta, kv *etcdkv.EtcdKV, newNodeFn newQueryNodeFn, session *sessionutil.Session) (Cluster, error) {
@ -119,12 +125,27 @@ func newQueryNodeCluster(ctx context.Context, clusterMeta Meta, kv *etcdkv.EtcdK
newNodeFn: newNodeFn,
segmentAllocator: defaultSegAllocatePolicy(),
channelAllocator: defaultChannelAllocatePolicy(),
segSizeEstimator: defaultSegEstimatePolicy(),
}
err := c.reloadFromKV()
if err != nil {
return nil, err
}
option := &minioKV.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
CreateBucket: true,
BucketName: Params.MinioBucketName,
}
c.dataKV, err = minioKV.NewMinIOKV(ctx, option)
if err != nil {
return nil, err
}
return c, nil
}
@ -704,3 +725,60 @@ func (c *queryNodeCluster) allocateSegmentsToQueryNode(ctx context.Context, reqs
func (c *queryNodeCluster) allocateChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, wait bool, excludeNodeIDs []int64) error {
return c.channelAllocator(ctx, reqs, c, wait, excludeNodeIDs)
}
func (c *queryNodeCluster) estimateSegmentsSize(segments *querypb.LoadSegmentsRequest) (int64, error) {
return c.segSizeEstimator(segments, c.dataKV)
}
func defaultSegEstimatePolicy() segEstimatePolicy {
return estimateSegmentsSize
}
type segEstimatePolicy func(request *querypb.LoadSegmentsRequest, dataKv kv.DataKV) (int64, error)
func estimateSegmentsSize(segments *querypb.LoadSegmentsRequest, kvClient kv.DataKV) (int64, error) {
segmentSize := int64(0)
//TODO:: collection has multi vector field
//vecFields := make([]int64, 0)
//for _, field := range segments.Schema.Fields {
// if field.DataType == schemapb.DataType_BinaryVector || field.DataType == schemapb.DataType_FloatVector {
// vecFields = append(vecFields, field.FieldID)
// }
//}
// get fields data size, if len(indexFieldIDs) == 0, vector field would be involved in fieldBinLogs
for _, loadInfo := range segments.Infos {
// get index size
if loadInfo.EnableIndex {
for _, pathInfo := range loadInfo.IndexPathInfos {
for _, path := range pathInfo.IndexFilePaths {
indexSize, err := storage.EstimateMemorySize(kvClient, path)
if err != nil {
indexSize, err = storage.GetBinlogSize(kvClient, path)
if err != nil {
return 0, err
}
}
segmentSize += indexSize
}
}
continue
}
// get binlog size
for _, binlogPath := range loadInfo.BinlogPaths {
for _, path := range binlogPath.Binlogs {
binlogSize, err := storage.EstimateMemorySize(kvClient, path)
if err != nil {
binlogSize, err = storage.GetBinlogSize(kvClient, path)
if err != nil {
return 0, err
}
}
segmentSize += binlogSize
}
}
}
return segmentSize, nil
}

View File

@ -14,20 +14,372 @@ package querycoord
import (
"context"
"encoding/json"
"errors"
"fmt"
"path"
"strconv"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/indexnode"
"github.com/milvus-io/milvus/internal/kv"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
minioKV "github.com/milvus-io/milvus/internal/kv/minio"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
const (
indexID = UniqueID(0)
dimKey = "dim"
rowIDFieldID = 0
timestampFieldID = 1
indexName = "query-coord-index-0"
defaultDim = 128
defaultMetricType = "JACCARD"
defaultKVRootPath = "query-coord-unittest"
)
type constFieldParam struct {
id int64
dataType schemapb.DataType
}
var simpleConstField = constFieldParam{
id: 101,
dataType: schemapb.DataType_Int32,
}
type vecFieldParam struct {
id int64
dim int
metricType string
vecType schemapb.DataType
}
var simpleVecField = vecFieldParam{
id: 100,
dim: defaultDim,
metricType: defaultMetricType,
vecType: schemapb.DataType_FloatVector,
}
var timestampField = constFieldParam{
id: timestampFieldID,
dataType: schemapb.DataType_Int64,
}
var uidField = constFieldParam{
id: rowIDFieldID,
dataType: schemapb.DataType_Int64,
}
type indexParam = map[string]string
func segSizeEstimateForTest(segments *querypb.LoadSegmentsRequest, dataKV kv.DataKV) (int64, error) {
sizePerRecord, err := typeutil.EstimateSizePerRecord(segments.Schema)
if err != nil {
return 0, err
}
sizeOfReq := int64(0)
for _, loadInfo := range segments.Infos {
sizeOfReq += int64(sizePerRecord) * loadInfo.NumOfRows
}
return sizeOfReq, nil
}
func genCollectionMeta(collectionID UniqueID, schema *schemapb.CollectionSchema) *etcdpb.CollectionMeta {
colInfo := &etcdpb.CollectionMeta{
ID: collectionID,
Schema: schema,
PartitionIDs: []UniqueID{defaultPartitionID},
}
return colInfo
}
// ---------- unittest util functions ----------
// functions of inserting data init
func genInsertData(msgLength int, schema *schemapb.CollectionSchema) (*storage.InsertData, error) {
insertData := &storage.InsertData{
Data: make(map[int64]storage.FieldData),
}
for _, f := range schema.Fields {
switch f.DataType {
case schemapb.DataType_Bool:
data := make([]bool, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = true
}
insertData.Data[f.FieldID] = &storage.BoolFieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_Int8:
data := make([]int8, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = int8(i)
}
insertData.Data[f.FieldID] = &storage.Int8FieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_Int16:
data := make([]int16, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = int16(i)
}
insertData.Data[f.FieldID] = &storage.Int16FieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_Int32:
data := make([]int32, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = int32(i)
}
insertData.Data[f.FieldID] = &storage.Int32FieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_Int64:
data := make([]int64, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = int64(i)
}
insertData.Data[f.FieldID] = &storage.Int64FieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_Float:
data := make([]float32, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = float32(i)
}
insertData.Data[f.FieldID] = &storage.FloatFieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_Double:
data := make([]float64, msgLength)
for i := 0; i < msgLength; i++ {
data[i] = float64(i)
}
insertData.Data[f.FieldID] = &storage.DoubleFieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
}
case schemapb.DataType_FloatVector:
dim := simpleVecField.dim // if no dim specified, use simpleVecField's dim
for _, p := range f.TypeParams {
if p.Key == dimKey {
var err error
dim, err = strconv.Atoi(p.Value)
if err != nil {
return nil, err
}
}
}
data := make([]float32, 0)
for i := 0; i < msgLength; i++ {
for j := 0; j < dim; j++ {
data = append(data, float32(i*j)*0.1)
}
}
insertData.Data[f.FieldID] = &storage.FloatVectorFieldData{
NumRows: []int64{int64(msgLength)},
Data: data,
Dim: dim,
}
default:
err := errors.New("data type not supported")
return nil, err
}
}
return insertData, nil
}
func genStorageBlob(collectionID UniqueID,
partitionID UniqueID,
segmentID UniqueID,
msgLength int,
schema *schemapb.CollectionSchema) ([]*storage.Blob, error) {
collMeta := genCollectionMeta(collectionID, schema)
inCodec := storage.NewInsertCodec(collMeta)
insertData, err := genInsertData(msgLength, schema)
if err != nil {
return nil, err
}
// timestamp field not allowed 0 timestamp
if _, ok := insertData.Data[timestampFieldID]; ok {
insertData.Data[timestampFieldID].(*storage.Int64FieldData).Data[0] = 1
}
binLogs, _, err := inCodec.Serialize(partitionID, segmentID, insertData)
return binLogs, err
}
func saveSimpleBinLog(ctx context.Context, schema *schemapb.CollectionSchema, dataKV kv.DataKV) ([]*datapb.FieldBinlog, error) {
return saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultNumRowPerSegment, schema, dataKV)
}
func saveBinLog(ctx context.Context,
collectionID UniqueID,
partitionID UniqueID,
segmentID UniqueID,
msgLength int,
schema *schemapb.CollectionSchema,
dataKV kv.DataKV) ([]*datapb.FieldBinlog, error) {
binLogs, err := genStorageBlob(collectionID, partitionID, segmentID, msgLength, schema)
if err != nil {
return nil, err
}
log.Debug(".. [query coord unittest] Saving bin logs to MinIO ..", zap.Int("number", len(binLogs)))
kvs := make(map[string]string, len(binLogs))
// write insert binlog
fieldBinlog := make([]*datapb.FieldBinlog, 0)
for _, blob := range binLogs {
fieldID, err := strconv.ParseInt(blob.GetKey(), 10, 64)
log.Debug("[query coord unittest] save binlog", zap.Int64("fieldID", fieldID))
if err != nil {
return nil, err
}
key := genKey(collectionID, partitionID, segmentID, fieldID)
kvs[key] = string(blob.Value[:])
fieldBinlog = append(fieldBinlog, &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []string{key},
})
}
log.Debug("[query coord unittest] save binlog file to MinIO/S3")
err = dataKV.MultiSave(kvs)
return fieldBinlog, err
}
func genKey(collectionID, partitionID, segmentID UniqueID, fieldID int64) string {
ids := []string{
defaultKVRootPath,
strconv.FormatInt(collectionID, 10),
strconv.FormatInt(partitionID, 10),
strconv.FormatInt(segmentID, 10),
strconv.FormatInt(fieldID, 10),
}
return path.Join(ids...)
}
func genSimpleIndexParams() indexParam {
indexParams := make(map[string]string)
indexParams["index_type"] = "IVF_PQ"
indexParams["index_mode"] = "cpu"
indexParams["dim"] = strconv.FormatInt(defaultDim, 10)
indexParams["k"] = "10"
indexParams["nlist"] = "100"
indexParams["nprobe"] = "10"
indexParams["m"] = "4"
indexParams["nbits"] = "8"
indexParams["metric_type"] = "L2"
indexParams["SLICE_SIZE"] = "400"
return indexParams
}
func generateIndex(segmentID UniqueID) ([]string, error) {
indexParams := genSimpleIndexParams()
var indexParamsKV []*commonpb.KeyValuePair
for key, value := range indexParams {
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
Key: key,
Value: value,
})
}
typeParams := make(map[string]string)
typeParams["dim"] = strconv.Itoa(defaultDim)
var indexRowData []float32
for n := 0; n < defaultNumRowPerSegment; n++ {
for i := 0; i < defaultDim; i++ {
indexRowData = append(indexRowData, float32(n*i))
}
}
index, err := indexnode.NewCIndex(typeParams, indexParams)
if err != nil {
return nil, err
}
err = index.BuildFloatVecIndexWithoutIds(indexRowData)
if err != nil {
return nil, err
}
option := &minioKV.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
BucketName: Params.MinioBucketName,
CreateBucket: true,
}
kv, err := minioKV.NewMinIOKV(context.Background(), option)
if err != nil {
return nil, err
}
// save index to minio
binarySet, err := index.Serialize()
if err != nil {
return nil, err
}
// serialize index params
indexCodec := storage.NewIndexFileBinlogCodec()
serializedIndexBlobs, err := indexCodec.Serialize(
0,
0,
0,
0,
0,
0,
indexParams,
indexName,
indexID,
binarySet,
)
if err != nil {
return nil, err
}
indexPaths := make([]string, 0)
for _, index := range serializedIndexBlobs {
p := strconv.Itoa(int(segmentID)) + "/" + index.Key
indexPaths = append(indexPaths, p)
err := kv.Save(p, string(index.Value))
if err != nil {
return nil, err
}
}
return indexPaths, nil
}
func TestQueryNodeCluster_getMetrics(t *testing.T) {
log.Info("TestQueryNodeCluster_getMetrics, todo")
}
@ -41,11 +393,12 @@ func TestReloadClusterFromKV(t *testing.T) {
clusterSession := sessionutil.NewSession(context.Background(), Params.MetaRootPath, Params.EtcdEndpoints)
clusterSession.Init(typeutil.QueryCoordRole, Params.Address, true)
cluster := &queryNodeCluster{
ctx: baseCtx,
client: kv,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
ctx: baseCtx,
client: kv,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
segSizeEstimator: segSizeEstimateForTest,
}
queryNode, err := startQueryNodeServer(baseCtx)
@ -68,10 +421,11 @@ func TestReloadClusterFromKV(t *testing.T) {
clusterSession := sessionutil.NewSession(context.Background(), Params.MetaRootPath, Params.EtcdEndpoints)
clusterSession.Init(typeutil.QueryCoordRole, Params.Address, true)
cluster := &queryNodeCluster{
client: kv,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
client: kv,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
segSizeEstimator: segSizeEstimateForTest,
}
kvs := make(map[string]string)
@ -116,13 +470,14 @@ func TestGrpcRequest(t *testing.T) {
meta, err := newMeta(baseCtx, kv, nil, nil)
assert.Nil(t, err)
cluster := &queryNodeCluster{
ctx: baseCtx,
cancel: cancel,
client: kv,
clusterMeta: meta,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
ctx: baseCtx,
cancel: cancel,
client: kv,
clusterMeta: meta,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
segSizeEstimator: segSizeEstimateForTest,
}
t.Run("Test GetNodeInfoByIDWithNodeNotExist", func(t *testing.T) {
@ -283,3 +638,72 @@ func TestGrpcRequest(t *testing.T) {
err = removeAllSession()
assert.Nil(t, err)
}
func TestEstimateSegmentSize(t *testing.T) {
refreshParams()
baseCtx, cancel := context.WithCancel(context.Background())
option := &minioKV.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
CreateBucket: true,
BucketName: Params.MinioBucketName,
}
dataKV, err := minioKV.NewMinIOKV(baseCtx, option)
assert.Nil(t, err)
schema := genCollectionSchema(defaultCollectionID, false)
binlog := []*datapb.FieldBinlog{
{
FieldID: simpleConstField.id,
Binlogs: []string{"^&^%*&%&&(*^*&"},
},
}
loadInfo := &querypb.SegmentLoadInfo{
SegmentID: defaultSegmentID,
PartitionID: defaultPartitionID,
CollectionID: defaultCollectionID,
BinlogPaths: binlog,
NumOfRows: defaultNumRowPerSegment,
}
loadReq := &querypb.LoadSegmentsRequest{
Schema: schema,
Infos: []*querypb.SegmentLoadInfo{loadInfo},
}
size, err := estimateSegmentsSize(loadReq, dataKV)
assert.Error(t, err)
assert.Equal(t, int64(0), size)
binlog, err = saveSimpleBinLog(baseCtx, schema, dataKV)
assert.NoError(t, err)
loadInfo.BinlogPaths = binlog
size, err = estimateSegmentsSize(loadReq, dataKV)
assert.NoError(t, err)
assert.NotEqual(t, int64(0), size)
indexPath, err := generateIndex(defaultSegmentID)
assert.NoError(t, err)
indexInfo := &indexpb.IndexFilePathInfo{
IndexFilePaths: indexPath,
}
loadInfo.IndexPathInfos = []*indexpb.IndexFilePathInfo{indexInfo}
loadInfo.EnableIndex = true
size, err = estimateSegmentsSize(loadReq, dataKV)
assert.NoError(t, err)
assert.NotEqual(t, int64(0), size)
indexInfo.IndexFilePaths = []string{"&*^*(^*(&*%^&*^(&"}
size, err = estimateSegmentsSize(loadReq, dataKV)
assert.Error(t, err)
assert.Equal(t, int64(0), size)
cancel()
}

View File

@ -151,6 +151,7 @@ func (qc *QueryCoord) LoadCollection(ctx context.Context, req *querypb.LoadColle
LoadCollectionRequest: req,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
indexCoord: qc.indexCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
@ -341,7 +342,9 @@ func (qc *QueryCoord) LoadPartitions(ctx context.Context, req *querypb.LoadParti
loadPartitionTask := &loadPartitionTask{
baseTask: baseTask,
LoadPartitionsRequest: req,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
indexCoord: qc.indexCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
@ -568,6 +571,7 @@ func (qc *QueryCoord) LoadBalance(ctx context.Context, req *querypb.LoadBalanceR
LoadBalanceRequest: req,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
indexCoord: qc.indexCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}

View File

@ -483,6 +483,7 @@ func TestLoadBalanceTask(t *testing.T) {
LoadBalanceRequest: loadBalanceSegment,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}

View File

@ -0,0 +1,328 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package querycoord
import (
"context"
"errors"
"fmt"
"sync"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
)
type indexInfo struct {
segmentID UniqueID
collectionID UniqueID
partitionID UniqueID
infos []*indexpb.IndexFilePathInfo
enableIndex bool
}
type IndexChecker struct {
ctx context.Context
cancel context.CancelFunc
client kv.MetaKv
revision int64
handoffReqChan chan *querypb.SegmentInfo
unIndexedSegmentsChan chan *querypb.SegmentInfo
indexedSegmentsChan chan *querypb.SegmentInfo
meta Meta
scheduler *TaskScheduler
cluster Cluster
rootCoord types.RootCoord
indexCoord types.IndexCoord
dataCoord types.DataCoord
wg sync.WaitGroup
}
func newIndexChecker(ctx context.Context,
client kv.MetaKv, meta Meta, cluster Cluster, scheduler *TaskScheduler,
root types.RootCoord, index types.IndexCoord, data types.DataCoord) (*IndexChecker, error) {
childCtx, cancel := context.WithCancel(ctx)
reqChan := make(chan *querypb.SegmentInfo, 1024)
unIndexChan := make(chan *querypb.SegmentInfo, 1024)
indexedChan := make(chan *querypb.SegmentInfo, 1024)
checker := &IndexChecker{
ctx: childCtx,
cancel: cancel,
client: client,
handoffReqChan: reqChan,
unIndexedSegmentsChan: unIndexChan,
indexedSegmentsChan: indexedChan,
meta: meta,
scheduler: scheduler,
rootCoord: root,
indexCoord: index,
dataCoord: data,
}
err := checker.reloadFromKV()
if err != nil {
log.Error("index checker reload from kv failed", zap.Error(err))
return nil, err
}
return checker, nil
}
func (ic *IndexChecker) start() {
ic.wg.Add(2)
go ic.checkIndexLoop()
go ic.processHandoffAfterIndexDone()
}
func (ic *IndexChecker) close() {
ic.cancel()
ic.wg.Wait()
}
func (ic *IndexChecker) reloadFromKV() error {
_, handoffReqValues, version, err := ic.client.LoadWithRevision(handoffSegmentPrefix)
if err != nil {
log.Error("reloadFromKV: LoadWithRevision from kv failed", zap.Error(err))
return err
}
ic.revision = version
for _, value := range handoffReqValues {
segmentInfo := &querypb.SegmentInfo{}
err := proto.Unmarshal([]byte(value), segmentInfo)
if err != nil {
log.Error("reloadFromKV: unmarshal failed", zap.Any("error", err.Error()))
return err
}
if ic.verifyHandoffReqValid(segmentInfo) && Params.AutoHandoff {
// push the req to handoffReqChan and then wait to load after index created
// in case handoffReqChan is full, and block start process
go ic.enqueueHandoffReq(segmentInfo)
} else {
log.Debug("reloadFromKV: collection/partition has not been loaded, remove req from etcd", zap.Any("segmentInfo", segmentInfo))
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err = ic.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("reloadFromKV: remove handoff segment from etcd failed", zap.Error(err))
return err
}
}
log.Debug("reloadFromKV: process handoff request done", zap.Any("segmentInfo", segmentInfo))
}
return nil
}
func (ic *IndexChecker) verifyHandoffReqValid(req *querypb.SegmentInfo) bool {
// if collection has not been loaded, then skip the segment
collectionInfo, err := ic.meta.getCollectionInfoByID(req.CollectionID)
if err == nil {
// if partition has not been loaded or released, then skip handoff the segment
if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
for _, id := range collectionInfo.PartitionIDs {
if id == req.PartitionID {
return true
}
}
} else {
partitionReleased := false
for _, id := range collectionInfo.ReleasedPartitionIDs {
if id == req.PartitionID {
partitionReleased = true
}
}
if !partitionReleased {
return true
}
}
}
return false
}
func (ic *IndexChecker) enqueueHandoffReq(req *querypb.SegmentInfo) {
ic.handoffReqChan <- req
}
func (ic *IndexChecker) enqueueUnIndexSegment(info *querypb.SegmentInfo) {
ic.unIndexedSegmentsChan <- info
}
func (ic *IndexChecker) enqueueIndexedSegment(info *querypb.SegmentInfo) {
ic.indexedSegmentsChan <- info
}
func (ic *IndexChecker) checkIndexLoop() {
defer ic.wg.Done()
for {
select {
case <-ic.ctx.Done():
return
case segmentInfo := <-ic.handoffReqChan:
// TODO:: check whether the index exists in parallel, in case indexCoord cannot create the index normally, and then block the loop
log.Debug("checkIndexLoop: start check index for handoff segment", zap.Int64("segmentID", segmentInfo.SegmentID))
for {
if ic.verifyHandoffReqValid(segmentInfo) && Params.AutoHandoff {
indexInfo, err := getIndexInfo(ic.ctx, segmentInfo, ic.rootCoord, ic.indexCoord)
if err != nil {
continue
}
if indexInfo.enableIndex {
segmentInfo.EnableIndex = true
}
segmentInfo.IndexPathInfos = indexInfo.infos
ic.enqueueIndexedSegment(segmentInfo)
break
}
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err := ic.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("checkIndexLoop: remove handoff segment from etcd failed", zap.Error(err))
panic(err)
}
break
}
case segmentInfo := <-ic.unIndexedSegmentsChan:
//TODO:: check index after load collection/partition, some segments may don't has index when loading
log.Debug("checkIndexLoop: start check index for segment which has not loaded index", zap.Int64("segmentID", segmentInfo.SegmentID))
}
}
}
func (ic *IndexChecker) processHandoffAfterIndexDone() {
defer ic.wg.Done()
for {
select {
case <-ic.ctx.Done():
return
case segmentInfo := <-ic.indexedSegmentsChan:
collectionID := segmentInfo.CollectionID
partitionID := segmentInfo.PartitionID
segmentID := segmentInfo.SegmentID
log.Debug("processHandoffAfterIndexDone: handoff segment start", zap.Any("segmentInfo", segmentInfo))
baseTask := newBaseTask(ic.ctx, querypb.TriggerCondition_handoff)
handoffReq := &querypb.HandoffSegmentsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HandoffSegments,
},
SegmentInfos: []*querypb.SegmentInfo{segmentInfo},
}
handoffTask := &handoffTask{
baseTask: baseTask,
HandoffSegmentsRequest: handoffReq,
dataCoord: ic.dataCoord,
cluster: ic.cluster,
meta: ic.meta,
}
err := ic.scheduler.Enqueue(handoffTask)
if err != nil {
log.Error("processHandoffAfterIndexDone: handoffTask enqueue failed", zap.Error(err))
panic(err)
}
go func() {
err := handoffTask.waitToFinish()
if err != nil {
// collection or partition may have been released before handoffTask enqueue
log.Warn("processHandoffAfterIndexDone: handoffTask failed", zap.Error(err))
}
log.Debug("processHandoffAfterIndexDone: handoffTask completed", zap.Any("segment infos", handoffTask.SegmentInfos))
}()
// once task enqueue, etcd data can be cleaned, handoffTask will recover from taskScheduler's reloadFromKV()
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, collectionID, partitionID, segmentID)
err = ic.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("processHandoffAfterIndexDone: remove handoff segment from etcd failed", zap.Error(err))
panic(err)
}
}
}
}
func getIndexInfo(ctx context.Context, info *querypb.SegmentInfo, root types.RootCoord, index types.IndexCoord) (*indexInfo, error) {
indexInfo := &indexInfo{
segmentID: info.SegmentID,
partitionID: info.PartitionID,
collectionID: info.CollectionID,
}
// check the buildID of the segment's index whether exist on rootCoord
req := &milvuspb.DescribeSegmentRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeSegment,
},
CollectionID: info.CollectionID,
SegmentID: info.SegmentID,
}
response, err := root.DescribeSegment(ctx, req)
if err != nil {
return nil, err
}
if response.Status.ErrorCode != commonpb.ErrorCode_Success {
return nil, errors.New(response.Status.Reason)
}
// if the segment.EnableIndex == false, then load the segment immediately
if !response.EnableIndex {
indexInfo.enableIndex = false
return indexInfo, nil
}
// if index created done on indexNode, then handoff start
indexFilePathRequest := &indexpb.GetIndexFilePathsRequest{
IndexBuildIDs: []UniqueID{response.BuildID},
}
pathResponse, err := index.GetIndexFilePaths(ctx, indexFilePathRequest)
if err != nil {
return nil, err
}
if pathResponse.Status.ErrorCode != commonpb.ErrorCode_Success {
return nil, errors.New(pathResponse.Status.Reason)
}
if len(pathResponse.FilePaths) <= 0 {
return nil, errors.New("illegal index file paths")
}
for _, fieldPath := range pathResponse.FilePaths {
if len(fieldPath.IndexFilePaths) == 0 {
return nil, errors.New("empty index paths")
}
}
indexInfo.enableIndex = true
indexInfo.infos = pathResponse.FilePaths
return indexInfo, nil
}

View File

@ -0,0 +1,195 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package querycoord
import (
"context"
"fmt"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/allocator"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/tsoutil"
)
func TestReloadFromKV(t *testing.T) {
refreshParams()
baseCtx, cancel := context.WithCancel(context.Background())
kv, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
assert.Nil(t, err)
meta, err := newMeta(baseCtx, kv, nil, nil)
assert.Nil(t, err)
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: querypb.SegmentState_sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
t.Run("Test_CollectionNotExist", func(t *testing.T) {
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil, nil, nil)
assert.Nil(t, err)
assert.Equal(t, 0, len(indexChecker.handoffReqChan))
})
err = kv.Save(key, string(value))
assert.Nil(t, err)
meta.addCollection(defaultCollectionID, genCollectionSchema(defaultCollectionID, false))
meta.setLoadType(defaultCollectionID, querypb.LoadType_LoadPartition)
t.Run("Test_PartitionNotExist", func(t *testing.T) {
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil, nil, nil)
assert.Nil(t, err)
assert.Equal(t, 0, len(indexChecker.handoffReqChan))
})
err = kv.Save(key, string(value))
assert.Nil(t, err)
meta.setLoadType(defaultCollectionID, querypb.LoadType_loadCollection)
t.Run("Test_CollectionExist", func(t *testing.T) {
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil, nil, nil)
assert.Nil(t, err)
for {
if len(indexChecker.handoffReqChan) > 0 {
break
}
}
})
cancel()
}
func TestCheckIndexLoop(t *testing.T) {
refreshParams()
ctx, cancel := context.WithCancel(context.Background())
kv, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
assert.Nil(t, err)
meta, err := newMeta(ctx, kv, nil, nil)
assert.Nil(t, err)
rootCoord := newRootCoordMock()
assert.Nil(t, err)
indexCoord := newIndexCoordMock()
indexCoord.returnIndexFile = true
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: querypb.SegmentState_sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
t.Run("Test_ReqInValid", func(t *testing.T) {
childCtx, childCancel := context.WithCancel(context.Background())
indexChecker, err := newIndexChecker(childCtx, kv, meta, nil, nil, rootCoord, indexCoord, nil)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
indexChecker.enqueueHandoffReq(segmentInfo)
indexChecker.wg.Add(1)
go indexChecker.checkIndexLoop()
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
assert.Equal(t, 0, len(indexChecker.indexedSegmentsChan))
childCancel()
indexChecker.wg.Wait()
})
meta.addCollection(defaultCollectionID, genCollectionSchema(defaultCollectionID, false))
meta.setLoadType(defaultCollectionID, querypb.LoadType_loadCollection)
t.Run("Test_GetIndexInfo", func(t *testing.T) {
childCtx, childCancel := context.WithCancel(context.Background())
indexChecker, err := newIndexChecker(childCtx, kv, meta, nil, nil, rootCoord, indexCoord, nil)
assert.Nil(t, err)
indexChecker.enqueueHandoffReq(segmentInfo)
indexChecker.wg.Add(1)
go indexChecker.checkIndexLoop()
for {
if len(indexChecker.indexedSegmentsChan) > 0 {
break
}
}
childCancel()
indexChecker.wg.Wait()
})
cancel()
}
func TestProcessHandoffAfterIndexDone(t *testing.T) {
refreshParams()
ctx, cancel := context.WithCancel(context.Background())
kv, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
assert.Nil(t, err)
meta, err := newMeta(ctx, kv, nil, nil)
assert.Nil(t, err)
taskScheduler := &TaskScheduler{
ctx: ctx,
cancel: cancel,
client: kv,
triggerTaskQueue: NewTaskQueue(),
}
idAllocatorKV, err := tsoutil.NewTSOKVBase(Params.EtcdEndpoints, Params.KvRootPath, "queryCoordTaskID")
assert.Nil(t, err)
idAllocator := allocator.NewGlobalIDAllocator("idTimestamp", idAllocatorKV)
err = idAllocator.Initialize()
assert.Nil(t, err)
taskScheduler.taskIDAllocator = func() (UniqueID, error) {
return idAllocator.AllocOne()
}
indexChecker, err := newIndexChecker(ctx, kv, meta, nil, taskScheduler, nil, nil, nil)
assert.Nil(t, err)
indexChecker.wg.Add(1)
go indexChecker.processHandoffAfterIndexDone()
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: querypb.SegmentState_sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
indexChecker.enqueueIndexedSegment(segmentInfo)
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
assert.Equal(t, false, taskScheduler.triggerTaskQueue.taskEmpty())
cancel()
indexChecker.wg.Wait()
}

View File

@ -283,6 +283,7 @@ func (rc *rootCoordMock) DescribeSegment(ctx context.Context, req *milvuspb.Desc
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
EnableIndex: true,
}, nil
}
@ -397,16 +398,28 @@ func (data *dataCoordMock) GetRecoveryInfo(ctx context.Context, req *datapb.GetR
type indexCoordMock struct {
types.IndexCoord
returnIndexFile bool
}
func newIndexCoordMock() *indexCoordMock {
return &indexCoordMock{}
return &indexCoordMock{
returnIndexFile: false,
}
}
func (c *indexCoordMock) GetIndexFilePaths(ctx context.Context, req *indexpb.GetIndexFilePathsRequest) (*indexpb.GetIndexFilePathsResponse, error) {
return &indexpb.GetIndexFilePathsResponse{
res := &indexpb.GetIndexFilePathsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
}, nil
}
if c.returnIndexFile {
indexPaths, _ := generateIndex(defaultSegmentID)
indexPathInfo := &indexpb.IndexFilePathInfo{
IndexFilePaths: indexPaths,
}
res.FilePaths = []*indexpb.IndexFilePathInfo{indexPathInfo}
}
return res, nil
}

View File

@ -33,9 +33,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
@ -72,6 +70,7 @@ type QueryCoord struct {
newNodeFn newQueryNodeFn
scheduler *TaskScheduler
idAllocator func() (UniqueID, error)
indexChecker *IndexChecker
metricsCacheManager *metricsinfo.MetricsCacheManager
@ -151,12 +150,19 @@ func (qc *QueryCoord) Init() error {
}
// init task scheduler
qc.scheduler, initError = NewTaskScheduler(qc.loopCtx, qc.meta, qc.cluster, qc.kvClient, qc.rootCoordClient, qc.dataCoordClient, qc.idAllocator)
qc.scheduler, initError = NewTaskScheduler(qc.loopCtx, qc.meta, qc.cluster, qc.kvClient, qc.rootCoordClient, qc.dataCoordClient, qc.indexCoordClient, qc.idAllocator)
if initError != nil {
log.Error("query coordinator init task scheduler failed", zap.Error(initError))
return
}
// init index checker
qc.indexChecker, initError = newIndexChecker(qc.loopCtx, qc.kvClient, qc.meta, qc.cluster, qc.scheduler, qc.rootCoordClient, qc.indexCoordClient, qc.dataCoordClient)
if initError != nil {
log.Error("query coordinator init index checker failed", zap.Error(initError))
return
}
qc.metricsCacheManager = metricsinfo.NewMetricsCacheManager()
})
log.Debug("query coordinator init success")
@ -176,6 +182,9 @@ func (qc *QueryCoord) Start() error {
qc.scheduler.Start()
log.Debug("start scheduler ...")
qc.indexChecker.start()
log.Debug("start index checker ...")
Params.CreatedTime = time.Now()
Params.UpdatedTime = time.Now()
@ -204,6 +213,8 @@ func (qc *QueryCoord) Start() error {
func (qc *QueryCoord) Stop() error {
qc.scheduler.Close()
log.Debug("close scheduler ...")
qc.indexChecker.close()
log.Debug("close index checker ...")
qc.loopCancel()
qc.UpdateStateCode(internalpb.StateCode_Abnormal)
@ -300,6 +311,7 @@ func (qc *QueryCoord) watchNodeLoop() {
LoadBalanceRequest: loadBalanceSegment,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
indexCoord: qc.indexCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
@ -351,6 +363,7 @@ func (qc *QueryCoord) watchNodeLoop() {
LoadBalanceRequest: loadBalanceSegment,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
indexCoord: qc.indexCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
@ -370,13 +383,7 @@ func (qc *QueryCoord) watchHandoffSegmentLoop() {
defer qc.loopWg.Done()
log.Debug("query coordinator start watch segment loop")
// TODO:: recover handoff task when coord down
watchChan := qc.kvClient.WatchWithPrefix(handoffSegmentPrefix)
unIndexedSegmentChan := make(chan *querypb.SegmentInfo, 1024)
indexSegmentChan := make(chan *querypb.SegmentInfo, 1024)
go qc.checkIndexLoop(ctx, unIndexedSegmentChan, indexSegmentChan)
go qc.processHandoffAfterIndexDone(ctx, indexSegmentChan)
watchChan := qc.kvClient.WatchWithRevision(handoffSegmentPrefix, qc.indexChecker.revision+1)
for {
select {
@ -392,40 +399,16 @@ func (qc *QueryCoord) watchHandoffSegmentLoop() {
}
switch event.Type {
case mvccpb.PUT:
processDone := true
// if collection has not been loaded, then skip the segment
collectionInfo, err := qc.meta.getCollectionInfoByID(segmentInfo.CollectionID)
if err != nil {
log.Debug("watchHandoffSegmentLoop: collection has not been loaded into memory", zap.Int64("collectionID", segmentInfo.CollectionID))
if Params.AutoHandoff && qc.indexChecker.verifyHandoffReqValid(segmentInfo) {
qc.indexChecker.enqueueHandoffReq(segmentInfo)
log.Debug("watchHandoffSegmentLoop: enqueue a handoff request to index checker", zap.Any("segment info", segmentInfo))
} else {
// if partition has not been loaded or released, then skip handoff the segment
if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
for _, id := range collectionInfo.PartitionIDs {
if id == segmentInfo.PartitionID {
unIndexedSegmentChan <- segmentInfo
processDone = false
break
}
}
} else {
partitionReleased := false
for _, id := range collectionInfo.ReleasedPartitionIDs {
if id == segmentInfo.PartitionID {
partitionReleased = true
}
}
if !partitionReleased {
unIndexedSegmentChan <- segmentInfo
processDone = false
}
}
}
if processDone {
log.Debug("watchHandoffSegmentLoop: collection/partition has not been loaded or autoHandoff equal to false, remove req from etcd", zap.Any("segmentInfo", segmentInfo))
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err = qc.kvClient.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("watchHandoffSegmentLoop: remove handoff segment from etcd failed", zap.Error(err))
panic(err)
}
}
default:
@ -436,141 +419,6 @@ func (qc *QueryCoord) watchHandoffSegmentLoop() {
}
}
func (qc *QueryCoord) checkIndexLoop(ctx context.Context, unIndexedChan chan *querypb.SegmentInfo, indexedChan chan *querypb.SegmentInfo) {
for {
select {
case <-ctx.Done():
return
case segmentInfo := <-unIndexedChan:
processDone := true
// TODO:: check whether the index exists in parallel, in case indexCoord cannot create the index normally, and then block the loop
for {
// if the collection has been released, then skip handoff the segment
collectionInfo, err := qc.meta.getCollectionInfoByID(segmentInfo.CollectionID)
if err != nil {
break
}
// if the partition has been released, then skip handoff the segment
partitionReleased := false
for _, id := range collectionInfo.ReleasedPartitionIDs {
if id == segmentInfo.PartitionID {
partitionReleased = true
break
}
}
if partitionReleased {
break
}
// check the buildID of the segment's index whether exist on rootCoord
req := &milvuspb.DescribeSegmentRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeSegment,
},
CollectionID: segmentInfo.CollectionID,
SegmentID: segmentInfo.SegmentID,
}
response, err := qc.rootCoordClient.DescribeSegment(ctx, req)
if err != nil || response.Status.ErrorCode != commonpb.ErrorCode_Success {
continue
}
// if the segment.EnableIndex == false, then load the segment immediately
// only sealed segment can be balanced, so the handoff is needed
if !response.EnableIndex {
log.Debug("checkIndexLoop: segment's enableIndex equal to false, ready to handoff", zap.Int64("segmentID", segmentInfo.SegmentID))
indexedChan <- segmentInfo
processDone = false
break
}
indexFilePathRequest := &indexpb.GetIndexFilePathsRequest{
IndexBuildIDs: []UniqueID{response.BuildID},
}
// if index created done on indexNode, then handoff start
pathResponse, err := qc.indexCoordClient.GetIndexFilePaths(ctx, indexFilePathRequest)
if err != nil || pathResponse.Status.ErrorCode != commonpb.ErrorCode_Success {
continue
}
log.Debug("checkIndexLoop: create segment's index done, ready to handoff", zap.Int64("segmentID", segmentInfo.SegmentID))
indexedChan <- segmentInfo
processDone = false
break
}
if processDone {
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err := qc.kvClient.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("watchHandoffSegmentLoop: remove handoff segment from etcd failed", zap.Error(err))
}
}
}
}
}
func (qc *QueryCoord) processHandoffAfterIndexDone(ctx context.Context, indexedChan chan *querypb.SegmentInfo) {
for {
select {
case <-ctx.Done():
return
case segmentInfo := <-indexedChan:
collectionID := segmentInfo.CollectionID
partitionID := segmentInfo.PartitionID
segmentID := segmentInfo.SegmentID
if Params.AutoHandoff {
log.Debug("processHandoffAfterIndexDone: handoff segment received",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partitionID),
zap.Int64("segmentID", segmentID),
zap.Any("segmentInfo", segmentInfo),
)
baseTask := newBaseTask(ctx, querypb.TriggerCondition_handoff)
handoffReq := &querypb.HandoffSegmentsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HandoffSegments,
},
SegmentInfos: []*querypb.SegmentInfo{segmentInfo},
}
handoffTask := &handoffTask{
baseTask: baseTask,
HandoffSegmentsRequest: handoffReq,
dataCoord: qc.dataCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
err := qc.scheduler.Enqueue(handoffTask)
if err != nil {
log.Error("processHandoffAfterIndexDone: handoffTask enqueue failed", zap.Error(err))
break
}
go func() {
err := handoffTask.waitToFinish()
if err != nil {
log.Error("processHandoffAfterIndexDone: handoffTask failed", zap.Error(err))
}
}()
log.Debug("processHandoffAfterIndexDone: handoffTask completed",
zap.Any("collectionID", collectionID),
zap.Any("partitionID", partitionID),
zap.Any("segmentID", segmentID),
zap.Any("channel", segmentInfo.ChannelID),
)
}
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, collectionID, partitionID, segmentID)
err := qc.kvClient.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("processHandoffAfterIndexDone: remove handoff segment from etcd failed", zap.Error(err))
}
}
}
}
func (qc *QueryCoord) loadBalanceSegmentLoop() {
ctx, cancel := context.WithCancel(qc.loopCtx)
defer cancel()
@ -666,6 +514,7 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
LoadBalanceRequest: req,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
indexCoord: qc.indexCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}

View File

@ -93,6 +93,7 @@ func startQueryCoord(ctx context.Context) (*QueryCoord, error) {
if err != nil {
return nil, err
}
coord.cluster.(*queryNodeCluster).segSizeEstimator = segSizeEstimateForTest
err = coord.Start()
if err != nil {
return nil, err
@ -229,6 +230,9 @@ func TestHandoffSegmentLoop(t *testing.T) {
queryCoord, err := startQueryCoord(baseCtx)
assert.Nil(t, err)
indexCoord := newIndexCoordMock()
indexCoord.returnIndexFile = true
queryCoord.indexCoordClient = indexCoord
queryNode1, err := startQueryNodeServer(baseCtx)
assert.Nil(t, err)
@ -524,7 +528,9 @@ func TestLoadBalanceSegmentLoop(t *testing.T) {
loadPartitionTask := &loadPartitionTask{
baseTask: baseTask,
LoadPartitionsRequest: req,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}

View File

@ -21,7 +21,6 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func defaultSegAllocatePolicy() SegmentAllocatePolicy {
@ -94,16 +93,13 @@ func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegme
if len(reqs) == 0 {
return nil
}
dataSizePerReq := make([]int64, 0)
for _, req := range reqs {
sizePerRecord, err := typeutil.EstimateSizePerRecord(req.Schema)
sizeOfReq, err := cluster.estimateSegmentsSize(req)
if err != nil {
return err
}
sizeOfReq := int64(0)
for _, loadInfo := range req.Infos {
sizeOfReq += int64(sizePerRecord) * loadInfo.NumOfRows
}
dataSizePerReq = append(dataSizePerReq, sizeOfReq)
}

View File

@ -18,6 +18,7 @@ import (
"github.com/stretchr/testify/assert"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
minioKV "github.com/milvus-io/milvus/internal/kv/minio"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -33,15 +34,28 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
meta, err := newMeta(baseCtx, kv, nil, nil)
assert.Nil(t, err)
cluster := &queryNodeCluster{
ctx: baseCtx,
cancel: cancel,
client: kv,
clusterMeta: meta,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
ctx: baseCtx,
cancel: cancel,
client: kv,
clusterMeta: meta,
nodes: make(map[int64]Node),
newNodeFn: newQueryNodeTest,
session: clusterSession,
segSizeEstimator: segSizeEstimateForTest,
}
option := &minioKV.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
CreateBucket: true,
BucketName: Params.MinioBucketName,
}
cluster.dataKV, err = minioKV.NewMinIOKV(baseCtx, option)
assert.Nil(t, err)
schema := genCollectionSchema(defaultCollectionID, false)
firstReq := &querypb.LoadSegmentsRequest{
CollectionID: defaultCollectionID,

View File

@ -244,10 +244,11 @@ func (bt *baseTask) rollBack(ctx context.Context) []task {
type loadCollectionTask struct {
*baseTask
*querypb.LoadCollectionRequest
rootCoord types.RootCoord
dataCoord types.DataCoord
cluster Cluster
meta Meta
rootCoord types.RootCoord
dataCoord types.DataCoord
indexCoord types.IndexCoord
cluster Cluster
meta Meta
}
func (lct *loadCollectionTask) msgBase() *commonpb.MsgBase {
@ -376,6 +377,15 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
Deltalogs: segmentBingLog.Deltalogs,
}
indexInfo, err := getIndexInfo(ctx, &querypb.SegmentInfo{
CollectionID: collectionID,
SegmentID: segmentID,
}, lct.rootCoord, lct.indexCoord)
if err == nil && indexInfo.enableIndex {
segmentLoadInfo.IndexPathInfos = indexInfo.infos
}
msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
@ -636,10 +646,12 @@ func (rct *releaseCollectionTask) rollBack(ctx context.Context) []task {
type loadPartitionTask struct {
*baseTask
*querypb.LoadPartitionsRequest
dataCoord types.DataCoord
cluster Cluster
meta Meta
addCol bool
rootCoord types.RootCoord
dataCoord types.DataCoord
indexCoord types.IndexCoord
cluster Cluster
meta Meta
addCol bool
}
func (lpt *loadPartitionTask) msgBase() *commonpb.MsgBase {
@ -732,6 +744,15 @@ func (lpt *loadPartitionTask) execute(ctx context.Context) error {
Deltalogs: segmentBingLog.Deltalogs,
}
indexInfo, err := getIndexInfo(ctx, &querypb.SegmentInfo{
CollectionID: collectionID,
SegmentID: segmentID,
}, lpt.rootCoord, lpt.indexCoord)
if err == nil && indexInfo.enableIndex {
segmentLoadInfo.IndexPathInfos = indexInfo.infos
}
msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
@ -1534,6 +1555,7 @@ func (ht *handoffTask) execute(ctx context.Context) error {
BinlogPaths: segmentBinlogs.FieldBinlogs,
NumOfRows: segmentBinlogs.NumOfRows,
CompactionFrom: segmentInfo.CompactionFrom,
IndexPathInfos: segmentInfo.IndexPathInfos,
}
msgBase := proto.Clone(ht.Base).(*commonpb.MsgBase)
@ -1623,10 +1645,11 @@ func (ht *handoffTask) rollBack(ctx context.Context) []task {
type loadBalanceTask struct {
*baseTask
*querypb.LoadBalanceRequest
rootCoord types.RootCoord
dataCoord types.DataCoord
cluster Cluster
meta Meta
rootCoord types.RootCoord
dataCoord types.DataCoord
indexCoord types.IndexCoord
cluster Cluster
meta Meta
}
func (lbt *loadBalanceTask) msgBase() *commonpb.MsgBase {
@ -1712,6 +1735,14 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
Statslogs: segmentBingLog.Statslogs,
Deltalogs: segmentBingLog.Deltalogs,
}
indexInfo, err := getIndexInfo(ctx, &querypb.SegmentInfo{
CollectionID: collectionID,
SegmentID: segmentID,
}, lbt.rootCoord, lbt.indexCoord)
if err == nil && indexInfo.enableIndex {
segmentLoadInfo.IndexPathInfos = indexInfo.infos
}
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
@ -1909,6 +1940,15 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
Deltalogs: segmentBingLog.Deltalogs,
}
indexInfo, err := getIndexInfo(ctx, &querypb.SegmentInfo{
CollectionID: collectionID,
SegmentID: segmentID,
}, lbt.rootCoord, lbt.indexCoord)
if err == nil && indexInfo.enableIndex {
segmentLoadInfo.IndexPathInfos = indexInfo.infos
}
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{

View File

@ -129,8 +129,9 @@ type TaskScheduler struct {
client *etcdkv.EtcdKV
stopActivateTaskLoopChan chan int
rootCoord types.RootCoord
dataCoord types.DataCoord
rootCoord types.RootCoord
dataCoord types.DataCoord
indexCoord types.IndexCoord
wg sync.WaitGroup
ctx context.Context
@ -144,6 +145,7 @@ func NewTaskScheduler(ctx context.Context,
kv *etcdkv.EtcdKV,
rootCoord types.RootCoord,
dataCoord types.DataCoord,
indexCoord types.IndexCoord,
idAllocator func() (UniqueID, error)) (*TaskScheduler, error) {
ctx1, cancel := context.WithCancel(ctx)
taskChan := make(chan task, 1024)
@ -159,6 +161,7 @@ func NewTaskScheduler(ctx context.Context,
stopActivateTaskLoopChan: stopTaskLoopChan,
rootCoord: rootCoord,
dataCoord: dataCoord,
indexCoord: indexCoord,
}
s.triggerTaskQueue = NewTaskQueue()
@ -271,6 +274,7 @@ func (scheduler *TaskScheduler) unmarshalTask(taskID UniqueID, t string) (task,
LoadCollectionRequest: &loadReq,
rootCoord: scheduler.rootCoord,
dataCoord: scheduler.dataCoord,
indexCoord: scheduler.indexCoord,
cluster: scheduler.cluster,
meta: scheduler.meta,
}
@ -284,7 +288,9 @@ func (scheduler *TaskScheduler) unmarshalTask(taskID UniqueID, t string) (task,
loadPartitionTask := &loadPartitionTask{
baseTask: baseTask,
LoadPartitionsRequest: &loadReq,
rootCoord: scheduler.rootCoord,
dataCoord: scheduler.dataCoord,
indexCoord: scheduler.indexCoord,
cluster: scheduler.cluster,
meta: scheduler.meta,
}
@ -398,6 +404,7 @@ func (scheduler *TaskScheduler) unmarshalTask(taskID UniqueID, t string) (task,
LoadBalanceRequest: &loadReq,
rootCoord: scheduler.rootCoord,
dataCoord: scheduler.dataCoord,
indexCoord: scheduler.indexCoord,
cluster: scheduler.cluster,
meta: scheduler.meta,
}

View File

@ -36,6 +36,7 @@ func genLoadCollectionTask(ctx context.Context, queryCoord *QueryCoord) *loadCol
LoadCollectionRequest: req,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
@ -55,7 +56,9 @@ func genLoadPartitionTask(ctx context.Context, queryCoord *QueryCoord) *loadPart
loadPartitionTask := &loadPartitionTask{
baseTask: baseTask,
LoadPartitionsRequest: req,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
@ -159,6 +162,7 @@ func genWatchDmChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID i
LoadCollectionRequest: parentReq,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
meta: queryCoord.meta,
cluster: queryCoord.cluster,
}
@ -211,6 +215,7 @@ func genLoadSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int6
LoadCollectionRequest: parentReq,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
meta: queryCoord.meta,
cluster: queryCoord.cluster,
}
@ -825,10 +830,11 @@ func TestLoadBalanceSegmentsTask(t *testing.T) {
SourceNodeIDs: []int64{node1.queryNodeID},
SealedSegmentIDs: []UniqueID{defaultSegmentID},
},
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
assert.Nil(t, err)
@ -846,10 +852,11 @@ func TestLoadBalanceSegmentsTask(t *testing.T) {
SourceNodeIDs: []int64{node1.queryNodeID},
SealedSegmentIDs: []UniqueID{defaultSegmentID + 100},
},
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
assert.Nil(t, err)
@ -866,10 +873,11 @@ func TestLoadBalanceSegmentsTask(t *testing.T) {
},
SourceNodeIDs: []int64{node1.queryNodeID},
},
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
assert.Nil(t, err)
@ -885,10 +893,11 @@ func TestLoadBalanceSegmentsTask(t *testing.T) {
MsgType: commonpb.MsgType_LoadBalanceSegments,
},
},
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
assert.Nil(t, err)
@ -905,10 +914,11 @@ func TestLoadBalanceSegmentsTask(t *testing.T) {
},
SourceNodeIDs: []int64{node1.queryNodeID + 100},
},
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
rootCoord: queryCoord.rootCoordClient,
dataCoord: queryCoord.dataCoordClient,
indexCoord: queryCoord.indexCoordClient,
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
assert.Nil(t, err)