Implement memory replica in Proxy, QueryNode and QueryCoord (#16470)

Related to #16298 #16291 #16154
Co-authored-by: sunby <bingyi.sun@zilliz.com>
Co-authored-by: yangxuan <xuan.yang@zilliz.com>
Co-authored-by: yah01 <yang.cen@zilliz.com>
Co-authored-by: Letian Jiang <letian.jiang@zilliz.com>

Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
pull/16552/head
congqixia 2022-04-20 16:15:41 +08:00 committed by GitHub
parent 9aa557f8a1
commit 69252f812d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
77 changed files with 8487 additions and 4619 deletions

View File

@ -290,7 +290,7 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
"led\020\004*\202\001\n\014SegmentState\022\024\n\020SegmentStateNo"
"ne\020\000\022\014\n\010NotExist\020\001\022\013\n\007Growing\020\002\022\n\n\006Seale"
"d\020\003\022\013\n\007Flushed\020\004\022\014\n\010Flushing\020\005\022\013\n\007Droppe"
"d\020\006\022\r\n\tImporting\020\007*\261\n\n\007MsgType\022\r\n\tUndefi"
"d\020\006\022\r\n\tImporting\020\007*\307\n\n\007MsgType\022\r\n\tUndefi"
"ned\020\000\022\024\n\020CreateCollection\020d\022\022\n\016DropColle"
"ction\020e\022\021\n\rHasCollection\020f\022\026\n\022DescribeCo"
"llection\020g\022\023\n\017ShowCollections\020h\022\024\n\020GetSy"
@ -314,27 +314,28 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
"atchDmChannels\020\374\003\022\025\n\020RemoveDmChannels\020\375\003"
"\022\027\n\022WatchQueryChannels\020\376\003\022\030\n\023RemoveQuery"
"Channels\020\377\003\022\035\n\030SealedSegmentsChangeInfo\020"
"\200\004\022\027\n\022WatchDeltaChannels\020\201\004\022\020\n\013SegmentIn"
"fo\020\330\004\022\017\n\nSystemInfo\020\331\004\022\024\n\017GetRecoveryInf"
"o\020\332\004\022\024\n\017GetSegmentState\020\333\004\022\r\n\010TimeTick\020\260"
"\t\022\023\n\016QueryNodeStats\020\261\t\022\016\n\tLoadIndex\020\262\t\022\016"
"\n\tRequestID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024\n\017Alloc"
"ateSegment\020\265\t\022\026\n\021SegmentStatistics\020\266\t\022\025\n"
"\020SegmentFlushDone\020\267\t\022\017\n\nDataNodeTt\020\270\t\022\025\n"
"\020CreateCredential\020\334\013\022\022\n\rGetCredential\020\335\013"
"\022\025\n\020DeleteCredential\020\336\013\022\025\n\020UpdateCredent"
"ial\020\337\013\022\026\n\021ListCredUsernames\020\340\013*\"\n\007DslTyp"
"e\022\007\n\003Dsl\020\000\022\016\n\nBoolExprV1\020\001*B\n\017Compaction"
"State\022\021\n\rUndefiedState\020\000\022\r\n\tExecuting\020\001\022"
"\r\n\tCompleted\020\002*X\n\020ConsistencyLevel\022\n\n\006St"
"rong\020\000\022\013\n\007Session\020\001\022\013\n\007Bounded\020\002\022\016\n\nEven"
"tually\020\003\022\016\n\nCustomized\020\004*\227\001\n\013ImportState"
"\022\021\n\rImportPending\020\000\022\020\n\014ImportFailed\020\001\022\021\n"
"\rImportStarted\020\002\022\024\n\020ImportDownloaded\020\003\022\020"
"\n\014ImportParsed\020\004\022\023\n\017ImportPersisted\020\005\022\023\n"
"\017ImportCompleted\020\006BW\n\016io.milvus.grpcB\013Co"
"mmonProtoP\001Z3github.com/milvus-io/milvus"
"/internal/proto/commonpb\240\001\001b\006proto3"
"\200\004\022\027\n\022WatchDeltaChannels\020\201\004\022\024\n\017GetShardL"
"eaders\020\202\004\022\020\n\013SegmentInfo\020\330\004\022\017\n\nSystemInf"
"o\020\331\004\022\024\n\017GetRecoveryInfo\020\332\004\022\024\n\017GetSegment"
"State\020\333\004\022\r\n\010TimeTick\020\260\t\022\023\n\016QueryNodeStat"
"s\020\261\t\022\016\n\tLoadIndex\020\262\t\022\016\n\tRequestID\020\263\t\022\017\n\n"
"RequestTSO\020\264\t\022\024\n\017AllocateSegment\020\265\t\022\026\n\021S"
"egmentStatistics\020\266\t\022\025\n\020SegmentFlushDone\020"
"\267\t\022\017\n\nDataNodeTt\020\270\t\022\025\n\020CreateCredential\020"
"\334\013\022\022\n\rGetCredential\020\335\013\022\025\n\020DeleteCredenti"
"al\020\336\013\022\025\n\020UpdateCredential\020\337\013\022\026\n\021ListCred"
"Usernames\020\340\013*\"\n\007DslType\022\007\n\003Dsl\020\000\022\016\n\nBool"
"ExprV1\020\001*B\n\017CompactionState\022\021\n\rUndefiedS"
"tate\020\000\022\r\n\tExecuting\020\001\022\r\n\tCompleted\020\002*X\n\020"
"ConsistencyLevel\022\n\n\006Strong\020\000\022\013\n\007Session\020"
"\001\022\013\n\007Bounded\020\002\022\016\n\nEventually\020\003\022\016\n\nCustom"
"ized\020\004*\227\001\n\013ImportState\022\021\n\rImportPending\020"
"\000\022\020\n\014ImportFailed\020\001\022\021\n\rImportStarted\020\002\022\024"
"\n\020ImportDownloaded\020\003\022\020\n\014ImportParsed\020\004\022\023"
"\n\017ImportPersisted\020\005\022\023\n\017ImportCompleted\020\006"
"BW\n\016io.milvus.grpcB\013CommonProtoP\001Z3githu"
"b.com/milvus-io/milvus/internal/proto/co"
"mmonpb\240\001\001b\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_common_2eproto_deps[1] = {
};
@ -351,7 +352,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_com
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_common_2eproto_once;
static bool descriptor_table_common_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_common_2eproto = {
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 3275,
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 3297,
&descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_sccs, descriptor_table_common_2eproto_deps, 8, 0,
schemas, file_default_instances, TableStruct_common_2eproto::offsets,
file_level_metadata_common_2eproto, 8, file_level_enum_descriptors_common_2eproto, file_level_service_descriptors_common_2eproto,
@ -497,6 +498,7 @@ bool MsgType_IsValid(int value) {
case 511:
case 512:
case 513:
case 514:
case 600:
case 601:
case 602:

View File

@ -262,6 +262,7 @@ enum MsgType : int {
RemoveQueryChannels = 511,
SealedSegmentsChangeInfo = 512,
WatchDeltaChannels = 513,
GetShardLeaders = 514,
SegmentInfo = 600,
SystemInfo = 601,
GetRecoveryInfo = 602,

File diff suppressed because it is too large Load Diff

View File

@ -3888,6 +3888,7 @@ class LoadPartitionsRequest :
kDbNameFieldNumber = 2,
kCollectionNameFieldNumber = 3,
kBaseFieldNumber = 1,
kReplicaNumberFieldNumber = 5,
};
// repeated string partition_names = 4;
int partition_names_size() const;
@ -3936,6 +3937,11 @@ class LoadPartitionsRequest :
::milvus::proto::common::MsgBase* mutable_base();
void set_allocated_base(::milvus::proto::common::MsgBase* base);
// int32 replica_number = 5;
void clear_replica_number();
::PROTOBUF_NAMESPACE_ID::int32 replica_number() const;
void set_replica_number(::PROTOBUF_NAMESPACE_ID::int32 value);
// @@protoc_insertion_point(class_scope:milvus.proto.milvus.LoadPartitionsRequest)
private:
class _Internal;
@ -3945,6 +3951,7 @@ class LoadPartitionsRequest :
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr db_name_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collection_name_;
::milvus::proto::common::MsgBase* base_;
::PROTOBUF_NAMESPACE_ID::int32 replica_number_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_milvus_2eproto;
};
@ -11873,6 +11880,7 @@ class LoadBalanceRequest :
enum : int {
kDstNodeIDsFieldNumber = 3,
kSealedSegmentIDsFieldNumber = 4,
kCollectionNameFieldNumber = 5,
kBaseFieldNumber = 1,
kSrcNodeIDFieldNumber = 2,
};
@ -11898,6 +11906,17 @@ class LoadBalanceRequest :
::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >*
mutable_sealed_segmentids();
// string collectionName = 5;
void clear_collectionname();
const std::string& collectionname() const;
void set_collectionname(const std::string& value);
void set_collectionname(std::string&& value);
void set_collectionname(const char* value);
void set_collectionname(const char* value, size_t size);
std::string* mutable_collectionname();
std::string* release_collectionname();
void set_allocated_collectionname(std::string* collectionname);
// .milvus.proto.common.MsgBase base = 1;
bool has_base() const;
void clear_base();
@ -11920,6 +11939,7 @@ class LoadBalanceRequest :
mutable std::atomic<int> _dst_nodeids_cached_byte_size_;
::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > sealed_segmentids_;
mutable std::atomic<int> _sealed_segmentids_cached_byte_size_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collectionname_;
::milvus::proto::common::MsgBase* base_;
::PROTOBUF_NAMESPACE_ID::int64 src_nodeid_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
@ -18987,6 +19007,20 @@ LoadPartitionsRequest::mutable_partition_names() {
return &partition_names_;
}
// int32 replica_number = 5;
inline void LoadPartitionsRequest::clear_replica_number() {
replica_number_ = 0;
}
inline ::PROTOBUF_NAMESPACE_ID::int32 LoadPartitionsRequest::replica_number() const {
// @@protoc_insertion_point(field_get:milvus.proto.milvus.LoadPartitionsRequest.replica_number)
return replica_number_;
}
inline void LoadPartitionsRequest::set_replica_number(::PROTOBUF_NAMESPACE_ID::int32 value) {
replica_number_ = value;
// @@protoc_insertion_point(field_set:milvus.proto.milvus.LoadPartitionsRequest.replica_number)
}
// -------------------------------------------------------------------
// ReleasePartitionsRequest
@ -26343,6 +26377,57 @@ LoadBalanceRequest::mutable_sealed_segmentids() {
return &sealed_segmentids_;
}
// string collectionName = 5;
inline void LoadBalanceRequest::clear_collectionname() {
collectionname_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
inline const std::string& LoadBalanceRequest::collectionname() const {
// @@protoc_insertion_point(field_get:milvus.proto.milvus.LoadBalanceRequest.collectionName)
return collectionname_.GetNoArena();
}
inline void LoadBalanceRequest::set_collectionname(const std::string& value) {
collectionname_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value);
// @@protoc_insertion_point(field_set:milvus.proto.milvus.LoadBalanceRequest.collectionName)
}
inline void LoadBalanceRequest::set_collectionname(std::string&& value) {
collectionname_.SetNoArena(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:milvus.proto.milvus.LoadBalanceRequest.collectionName)
}
inline void LoadBalanceRequest::set_collectionname(const char* value) {
GOOGLE_DCHECK(value != nullptr);
collectionname_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
// @@protoc_insertion_point(field_set_char:milvus.proto.milvus.LoadBalanceRequest.collectionName)
}
inline void LoadBalanceRequest::set_collectionname(const char* value, size_t size) {
collectionname_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:milvus.proto.milvus.LoadBalanceRequest.collectionName)
}
inline std::string* LoadBalanceRequest::mutable_collectionname() {
// @@protoc_insertion_point(field_mutable:milvus.proto.milvus.LoadBalanceRequest.collectionName)
return collectionname_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
inline std::string* LoadBalanceRequest::release_collectionname() {
// @@protoc_insertion_point(field_release:milvus.proto.milvus.LoadBalanceRequest.collectionName)
return collectionname_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
inline void LoadBalanceRequest::set_allocated_collectionname(std::string* collectionname) {
if (collectionname != nullptr) {
} else {
}
collectionname_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), collectionname);
// @@protoc_insertion_point(field_set_allocated:milvus.proto.milvus.LoadBalanceRequest.collectionName)
}
// -------------------------------------------------------------------
// ManualCompactionRequest

View File

@ -528,6 +528,7 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
segment2Binlogs := make(map[UniqueID][]*datapb.FieldBinlog)
segment2StatsBinlogs := make(map[UniqueID][]*datapb.FieldBinlog)
segment2DeltaBinlogs := make(map[UniqueID][]*datapb.FieldBinlog)
segment2InsertChannel := make(map[UniqueID]string)
segmentsNumOfRows := make(map[UniqueID]int64)
flushedIDs := make(map[int64]struct{})
@ -542,6 +543,7 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
if segment.State != commonpb.SegmentState_Flushed && segment.State != commonpb.SegmentState_Flushing {
continue
}
segment2InsertChannel[segment.ID] = segment.InsertChannel
binlogs := segment.GetBinlogs()
if len(binlogs) == 0 {
@ -590,11 +592,12 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
binlogs := make([]*datapb.SegmentBinlogs, 0, len(segment2Binlogs))
for segmentID := range flushedIDs {
sbl := &datapb.SegmentBinlogs{
SegmentID: segmentID,
NumOfRows: segmentsNumOfRows[segmentID],
FieldBinlogs: segment2Binlogs[segmentID],
Statslogs: segment2StatsBinlogs[segmentID],
Deltalogs: segment2DeltaBinlogs[segmentID],
SegmentID: segmentID,
NumOfRows: segmentsNumOfRows[segmentID],
FieldBinlogs: segment2Binlogs[segmentID],
Statslogs: segment2StatsBinlogs[segmentID],
Deltalogs: segment2DeltaBinlogs[segmentID],
InsertChannel: segment2InsertChannel[segmentID],
}
binlogs = append(binlogs, sbl)
}

View File

@ -21,7 +21,6 @@ import (
"errors"
"testing"
"github.com/milvus-io/milvus/internal/proxy"
"github.com/milvus-io/milvus/internal/util/mock"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
@ -29,7 +28,7 @@ import (
)
func Test_NewClient(t *testing.T) {
proxy.Params.InitOnce()
ClientParams.InitOnce(typeutil.QueryNodeRole)
ctx := context.Background()
client, err := NewClient(ctx, "")

View File

@ -149,6 +149,7 @@ enum MsgType {
RemoveQueryChannels = 511;
SealedSegmentsChangeInfo = 512;
WatchDeltaChannels = 513;
GetShardLeaders = 514;
/* DATA SERVICE */
SegmentInfo = 600;
@ -221,4 +222,4 @@ enum ImportState {
ImportParsed = 4;
ImportPersisted = 5;
ImportCompleted = 6;
}
}

View File

@ -274,6 +274,7 @@ const (
MsgType_RemoveQueryChannels MsgType = 511
MsgType_SealedSegmentsChangeInfo MsgType = 512
MsgType_WatchDeltaChannels MsgType = 513
MsgType_GetShardLeaders MsgType = 514
// DATA SERVICE
MsgType_SegmentInfo MsgType = 600
MsgType_SystemInfo MsgType = 601
@ -344,6 +345,7 @@ var MsgType_name = map[int32]string{
511: "RemoveQueryChannels",
512: "SealedSegmentsChangeInfo",
513: "WatchDeltaChannels",
514: "GetShardLeaders",
600: "SegmentInfo",
601: "SystemInfo",
602: "GetRecoveryInfo",
@ -411,6 +413,7 @@ var MsgType_value = map[string]int32{
"RemoveQueryChannels": 511,
"SealedSegmentsChangeInfo": 512,
"WatchDeltaChannels": 513,
"GetShardLeaders": 514,
"SegmentInfo": 600,
"SystemInfo": 601,
"GetRecoveryInfo": 602,
@ -966,113 +969,114 @@ func init() {
func init() { proto.RegisterFile("common.proto", fileDescriptor_555bd8c177793206) }
var fileDescriptor_555bd8c177793206 = []byte{
// 1727 bytes of a gzipped FileDescriptorProto
// 1736 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4b, 0x73, 0x1c, 0x49,
0xf1, 0x57, 0xcf, 0x8c, 0x34, 0x9e, 0x1c, 0x3d, 0xca, 0x25, 0x59, 0xd6, 0xda, 0xda, 0xfd, 0xfb,
0xaf, 0x93, 0x43, 0x11, 0x6b, 0x03, 0x8e, 0x80, 0xd3, 0x1e, 0xa4, 0x69, 0x49, 0x9e, 0xb0, 0x24,
0x8b, 0x19, 0xc9, 0xbb, 0xc1, 0x01, 0x47, 0xa9, 0x3b, 0x35, 0x2a, 0xdc, 0x5d, 0x35, 0x54, 0x55,
0xcb, 0x1a, 0x4e, 0x0b, 0x9f, 0x00, 0xf6, 0xc2, 0x95, 0x0f, 0x00, 0x04, 0xcb, 0xfb, 0x23, 0xf0,
0x3e, 0xf3, 0x86, 0x23, 0x1f, 0x80, 0xe7, 0x3e, 0xbc, 0x44, 0x56, 0xf7, 0x74, 0xb7, 0xed, 0xdd,
0x13, 0xb7, 0xca, 0x5f, 0x66, 0xfe, 0x2a, 0x2b, 0x33, 0x2b, 0xab, 0x60, 0x3e, 0xd2, 0x69, 0xaa,
0xd5, 0x9d, 0xb1, 0xd1, 0x4e, 0xf3, 0xe5, 0x54, 0x26, 0x17, 0x99, 0xcd, 0xa5, 0x3b, 0xb9, 0x6a,
0xe3, 0x31, 0xcc, 0x0d, 0x9d, 0x70, 0x99, 0xe5, 0x6f, 0x00, 0xa0, 0x31, 0xda, 0x3c, 0x8e, 0x74,
0x8c, 0x6b, 0xc1, 0xad, 0xe0, 0xf6, 0xe2, 0x67, 0x5e, 0xbb, 0xf3, 0x31, 0x3e, 0x77, 0x76, 0xc8,
0xac, 0xa7, 0x63, 0x1c, 0x74, 0x70, 0xba, 0xe4, 0xab, 0x30, 0x67, 0x50, 0x58, 0xad, 0xd6, 0x1a,
0xb7, 0x82, 0xdb, 0x9d, 0x41, 0x21, 0x6d, 0x7c, 0x16, 0xe6, 0x1f, 0xe0, 0xe4, 0x91, 0x48, 0x32,
0x3c, 0x12, 0xd2, 0x70, 0x06, 0xcd, 0x27, 0x38, 0xf1, 0xfc, 0x9d, 0x01, 0x2d, 0xf9, 0x0a, 0xcc,
0x5e, 0x90, 0xba, 0x70, 0xcc, 0x85, 0x8d, 0x7b, 0xd0, 0x7d, 0x80, 0x93, 0x50, 0x38, 0xf1, 0x09,
0x6e, 0x1c, 0x5a, 0xb1, 0x70, 0xc2, 0x7b, 0xcd, 0x0f, 0xfc, 0x7a, 0x63, 0x1d, 0x5a, 0xdb, 0x89,
0x3e, 0xad, 0x28, 0x03, 0xaf, 0x2c, 0x28, 0x5f, 0x87, 0xf6, 0x56, 0x1c, 0x1b, 0xb4, 0x96, 0x2f,
0x42, 0x43, 0x8e, 0x0b, 0xb6, 0x86, 0x1c, 0x13, 0xd9, 0x58, 0x1b, 0xe7, 0xc9, 0x9a, 0x03, 0xbf,
0xde, 0x78, 0x27, 0x80, 0xf6, 0x81, 0x1d, 0x6d, 0x0b, 0x8b, 0xfc, 0x73, 0x70, 0x25, 0xb5, 0xa3,
0xc7, 0x6e, 0x32, 0x9e, 0xa6, 0x66, 0xfd, 0x63, 0x53, 0x73, 0x60, 0x47, 0xc7, 0x93, 0x31, 0x0e,
0xda, 0x69, 0xbe, 0xa0, 0x48, 0x52, 0x3b, 0xea, 0x87, 0x05, 0x73, 0x2e, 0xf0, 0x75, 0xe8, 0x38,
0x99, 0xa2, 0x75, 0x22, 0x1d, 0xaf, 0x35, 0x6f, 0x05, 0xb7, 0x5b, 0x83, 0x0a, 0xe0, 0x37, 0xe0,
0x8a, 0xd5, 0x99, 0x89, 0xb0, 0x1f, 0xae, 0xb5, 0xbc, 0x5b, 0x29, 0x6f, 0xbc, 0x01, 0x9d, 0x03,
0x3b, 0xba, 0x8f, 0x22, 0x46, 0xc3, 0x3f, 0x05, 0xad, 0x53, 0x61, 0xf3, 0x88, 0xba, 0x9f, 0x1c,
0x11, 0x9d, 0x60, 0xe0, 0x2d, 0x37, 0xbe, 0x08, 0xf3, 0xe1, 0xc1, 0xfe, 0xff, 0xc0, 0x40, 0xa1,
0xdb, 0x73, 0x61, 0xe2, 0x43, 0x91, 0x4e, 0x2b, 0x56, 0x01, 0x9b, 0xcf, 0x66, 0xa1, 0x53, 0xb6,
0x07, 0xef, 0x42, 0x7b, 0x98, 0x45, 0x11, 0x5a, 0xcb, 0x66, 0xf8, 0x32, 0x2c, 0x9d, 0x28, 0xbc,
0x1c, 0x63, 0xe4, 0x30, 0xf6, 0x36, 0x2c, 0xe0, 0x57, 0x61, 0xa1, 0xa7, 0x95, 0xc2, 0xc8, 0xed,
0x0a, 0x99, 0x60, 0xcc, 0x1a, 0x7c, 0x05, 0xd8, 0x11, 0x9a, 0x54, 0x5a, 0x2b, 0xb5, 0x0a, 0x51,
0x49, 0x8c, 0x59, 0x93, 0x5f, 0x87, 0xe5, 0x9e, 0x4e, 0x12, 0x8c, 0x9c, 0xd4, 0xea, 0x50, 0xbb,
0x9d, 0x4b, 0x69, 0x9d, 0x65, 0x2d, 0xa2, 0xed, 0x27, 0x09, 0x8e, 0x44, 0xb2, 0x65, 0x46, 0x59,
0x8a, 0xca, 0xb1, 0x59, 0xe2, 0x28, 0xc0, 0x50, 0xa6, 0xa8, 0x88, 0x89, 0xb5, 0x6b, 0x68, 0x5f,
0xc5, 0x78, 0x49, 0xf5, 0x61, 0x57, 0xf8, 0x2b, 0x70, 0xad, 0x40, 0x6b, 0x1b, 0x88, 0x14, 0x59,
0x87, 0x2f, 0x41, 0xb7, 0x50, 0x1d, 0x3f, 0x3c, 0x7a, 0xc0, 0xa0, 0xc6, 0x30, 0xd0, 0x4f, 0x07,
0x18, 0x69, 0x13, 0xb3, 0x6e, 0x2d, 0x84, 0x47, 0x18, 0x39, 0x6d, 0xfa, 0x21, 0x9b, 0xa7, 0x80,
0x0b, 0x70, 0x88, 0xc2, 0x44, 0xe7, 0x03, 0xb4, 0x59, 0xe2, 0xd8, 0x02, 0x67, 0x30, 0xbf, 0x2b,
0x13, 0x3c, 0xd4, 0x6e, 0x57, 0x67, 0x2a, 0x66, 0x8b, 0x7c, 0x11, 0xe0, 0x00, 0x9d, 0x28, 0x32,
0xb0, 0x44, 0xdb, 0xf6, 0x44, 0x74, 0x8e, 0x05, 0xc0, 0xf8, 0x2a, 0xf0, 0x9e, 0x50, 0x4a, 0xbb,
0x9e, 0x41, 0xe1, 0x70, 0x57, 0x27, 0x31, 0x1a, 0x76, 0x95, 0xc2, 0x79, 0x0e, 0x97, 0x09, 0x32,
0x5e, 0x59, 0x87, 0x98, 0x60, 0x69, 0xbd, 0x5c, 0x59, 0x17, 0x38, 0x59, 0xaf, 0x50, 0xf0, 0xdb,
0x99, 0x4c, 0x62, 0x9f, 0x92, 0xbc, 0x2c, 0xd7, 0x28, 0xc6, 0x22, 0xf8, 0xc3, 0xfd, 0xfe, 0xf0,
0x98, 0xad, 0xf2, 0x6b, 0x70, 0xb5, 0x40, 0x0e, 0xd0, 0x19, 0x19, 0xf9, 0xe4, 0x5d, 0xa7, 0x50,
0x1f, 0x66, 0xee, 0xe1, 0xd9, 0x01, 0xa6, 0xda, 0x4c, 0xd8, 0x1a, 0x15, 0xd4, 0x33, 0x4d, 0x4b,
0xc4, 0x5e, 0xa1, 0x1d, 0x76, 0xd2, 0xb1, 0x9b, 0x54, 0xe9, 0x65, 0x37, 0xf8, 0x4d, 0xb8, 0x7e,
0x32, 0x8e, 0x85, 0xc3, 0x7e, 0x4a, 0x97, 0xed, 0x58, 0xd8, 0x27, 0x74, 0xdc, 0xcc, 0x20, 0xbb,
0xc9, 0x6f, 0xc0, 0xea, 0xf3, 0xb5, 0x28, 0x93, 0xb5, 0x4e, 0x8e, 0xf9, 0x69, 0x7b, 0x06, 0x63,
0x54, 0x4e, 0x8a, 0x64, 0xea, 0xf8, 0x6a, 0xc5, 0xfa, 0xb2, 0xf2, 0x35, 0x52, 0xe6, 0x27, 0x7f,
0x59, 0xf9, 0x7f, 0x7c, 0x0d, 0x56, 0xf6, 0xd0, 0xbd, 0xac, 0xb9, 0x45, 0x9a, 0x7d, 0x69, 0xbd,
0xea, 0xc4, 0xa2, 0xb1, 0x53, 0xcd, 0xff, 0x73, 0x0e, 0x0b, 0x61, 0x38, 0xc0, 0x2f, 0x67, 0x68,
0xdd, 0x40, 0x44, 0xc8, 0xfe, 0xd6, 0xde, 0x7c, 0x0b, 0xc0, 0x9f, 0x9f, 0x86, 0x2a, 0x72, 0x0e,
0x8b, 0x95, 0x74, 0xa8, 0x15, 0xb2, 0x19, 0x3e, 0x0f, 0x57, 0x4e, 0x94, 0xb4, 0x36, 0xc3, 0x98,
0x05, 0x54, 0xfb, 0xbe, 0x3a, 0x32, 0x7a, 0x44, 0x63, 0x89, 0x35, 0x48, 0xbb, 0x2b, 0x95, 0xb4,
0xe7, 0xbe, 0xeb, 0x01, 0xe6, 0x8a, 0x26, 0x68, 0x6d, 0x7e, 0x2d, 0x80, 0xf9, 0x21, 0x8e, 0xa8,
0xc3, 0x73, 0xf2, 0x15, 0x60, 0x75, 0xb9, 0xa2, 0x2f, 0x73, 0x1f, 0xd0, 0x0d, 0xdc, 0x33, 0xfa,
0xa9, 0x54, 0x23, 0xd6, 0x20, 0xb6, 0x21, 0x8a, 0xc4, 0x33, 0x77, 0xa1, 0xbd, 0x9b, 0x64, 0x7e,
0x9b, 0x96, 0xdf, 0x94, 0x04, 0x32, 0x9b, 0x25, 0x55, 0x68, 0xf4, 0x78, 0x8c, 0x31, 0x9b, 0xe3,
0x0b, 0xd0, 0xc9, 0x2b, 0x44, 0xba, 0xf6, 0xe6, 0xbb, 0xe0, 0x67, 0xa2, 0x1f, 0x6d, 0x0b, 0xd0,
0x39, 0x51, 0x31, 0x9e, 0x49, 0x85, 0x31, 0x9b, 0xf1, 0xed, 0x95, 0x17, 0xa6, 0xaa, 0x73, 0x4c,
0x19, 0x20, 0xb2, 0x1a, 0x86, 0xd4, 0x23, 0xf7, 0x85, 0xad, 0x41, 0x67, 0xd4, 0xb3, 0x21, 0xda,
0xc8, 0xc8, 0xd3, 0xba, 0xfb, 0x88, 0x7a, 0x67, 0x78, 0xae, 0x9f, 0x56, 0x98, 0x65, 0xe7, 0xb4,
0xd3, 0x1e, 0xba, 0xe1, 0xc4, 0x3a, 0x4c, 0x7b, 0x5a, 0x9d, 0xc9, 0x91, 0x65, 0x92, 0x76, 0xda,
0xd7, 0x22, 0xae, 0xb9, 0x7f, 0x89, 0xba, 0x76, 0x80, 0x09, 0x0a, 0x5b, 0x67, 0x7d, 0xe2, 0x2f,
0x98, 0x0f, 0x75, 0x2b, 0x91, 0xc2, 0xb2, 0x84, 0x8e, 0x42, 0x51, 0xe6, 0x62, 0x4a, 0x45, 0xd9,
0x11, 0x56, 0xcf, 0x8c, 0x34, 0x9e, 0x1c, 0x3d, 0xca, 0x25, 0x59, 0xd6, 0x7a, 0xb5, 0x8b, 0xd1,
0xc9, 0xa1, 0x88, 0xb5, 0x01, 0x47, 0xc0, 0x69, 0x0f, 0xd2, 0xb4, 0x24, 0x4f, 0x58, 0x92, 0xc5,
0x8c, 0xe4, 0xdd, 0xe0, 0x80, 0xa3, 0xd4, 0x9d, 0x1a, 0x15, 0xee, 0xae, 0x1a, 0xaa, 0xaa, 0x65,
0x0d, 0xa7, 0x65, 0x7f, 0x01, 0xec, 0x85, 0x2b, 0x3f, 0x00, 0x08, 0xde, 0xf0, 0x0f, 0x78, 0x73,
0xe6, 0x0d, 0x47, 0x7e, 0x00, 0xcf, 0x7d, 0x78, 0x89, 0xac, 0xee, 0xe9, 0x6e, 0xdb, 0xbb, 0x27,
0x6e, 0x95, 0x5f, 0x66, 0x7d, 0x95, 0x95, 0x99, 0x95, 0x59, 0x30, 0x1f, 0xe9, 0x34, 0xd5, 0xea,
0xf6, 0xd8, 0x68, 0xa7, 0xf9, 0x72, 0x2a, 0x93, 0x8b, 0xcc, 0xe6, 0xd2, 0xed, 0x5c, 0xb5, 0xf1,
0x08, 0xe6, 0x86, 0x4e, 0xb8, 0xcc, 0xf2, 0xd7, 0x01, 0xd0, 0x18, 0x6d, 0x1e, 0x45, 0x3a, 0xc6,
0xb5, 0xe0, 0x66, 0x70, 0x6b, 0xf1, 0x33, 0xaf, 0xde, 0xfe, 0x88, 0x3d, 0xb7, 0x77, 0xc8, 0xac,
0xa7, 0x63, 0x1c, 0x74, 0x70, 0xba, 0xe4, 0xab, 0x30, 0x67, 0x50, 0x58, 0xad, 0xd6, 0x1a, 0x37,
0x83, 0x5b, 0x9d, 0x41, 0x21, 0x6d, 0x7c, 0x16, 0xe6, 0xef, 0xe3, 0xe4, 0xa1, 0x48, 0x32, 0x3c,
0x12, 0xd2, 0x70, 0x06, 0xcd, 0xc7, 0x38, 0xf1, 0xfc, 0x9d, 0x01, 0x2d, 0xf9, 0x0a, 0xcc, 0x5e,
0x90, 0xba, 0xd8, 0x98, 0x0b, 0x1b, 0x77, 0xa1, 0x7b, 0x1f, 0x27, 0xa1, 0x70, 0xe2, 0x63, 0xb6,
0x71, 0x68, 0xc5, 0xc2, 0x09, 0xbf, 0x6b, 0x7e, 0xe0, 0xd7, 0x1b, 0xeb, 0xd0, 0xda, 0x4e, 0xf4,
0x69, 0x45, 0x19, 0x78, 0x65, 0x41, 0xf9, 0x1a, 0xb4, 0xb7, 0xe2, 0xd8, 0xa0, 0xb5, 0x7c, 0x11,
0x1a, 0x72, 0x5c, 0xb0, 0x35, 0xe4, 0x98, 0xc8, 0xc6, 0xda, 0x38, 0x4f, 0xd6, 0x1c, 0xf8, 0xf5,
0xc6, 0x3b, 0x01, 0xb4, 0x0f, 0xec, 0x68, 0x5b, 0x58, 0xe4, 0x9f, 0x83, 0x2b, 0xa9, 0x1d, 0x3d,
0x72, 0x93, 0xf1, 0x34, 0x34, 0xeb, 0x1f, 0x19, 0x9a, 0x03, 0x3b, 0x3a, 0x9e, 0x8c, 0x71, 0xd0,
0x4e, 0xf3, 0x05, 0x79, 0x92, 0xda, 0x51, 0x3f, 0x2c, 0x98, 0x73, 0x81, 0xaf, 0x43, 0xc7, 0xc9,
0x14, 0xad, 0x13, 0xe9, 0x78, 0xad, 0x79, 0x33, 0xb8, 0xd5, 0x1a, 0x54, 0x00, 0xbf, 0x01, 0x57,
0xac, 0xce, 0x4c, 0x84, 0xfd, 0x70, 0xad, 0xe5, 0xb7, 0x95, 0xf2, 0xc6, 0xeb, 0xd0, 0x39, 0xb0,
0xa3, 0x7b, 0x28, 0x62, 0x34, 0xfc, 0x53, 0xd0, 0x3a, 0x15, 0x36, 0xf7, 0xa8, 0xfb, 0xf1, 0x1e,
0xd1, 0x0d, 0x06, 0xde, 0x72, 0xe3, 0x8b, 0x30, 0x1f, 0x1e, 0xec, 0xff, 0x1f, 0x0c, 0xe4, 0xba,
0x3d, 0x17, 0x26, 0x3e, 0x14, 0xe9, 0x34, 0x63, 0x15, 0xb0, 0xf9, 0x74, 0x16, 0x3a, 0x65, 0x79,
0xf0, 0x2e, 0xb4, 0x87, 0x59, 0x14, 0xa1, 0xb5, 0x6c, 0x86, 0x2f, 0xc3, 0xd2, 0x89, 0xc2, 0xcb,
0x31, 0x46, 0x0e, 0x63, 0x6f, 0xc3, 0x02, 0x7e, 0x15, 0x16, 0x7a, 0x5a, 0x29, 0x8c, 0xdc, 0xae,
0x90, 0x09, 0xc6, 0xac, 0xc1, 0x57, 0x80, 0x1d, 0xa1, 0x49, 0xa5, 0xb5, 0x52, 0xab, 0x10, 0x95,
0xc4, 0x98, 0x35, 0xf9, 0x75, 0x58, 0xee, 0xe9, 0x24, 0xc1, 0xc8, 0x49, 0xad, 0x0e, 0xb5, 0xdb,
0xb9, 0x94, 0xd6, 0x59, 0xd6, 0x22, 0xda, 0x7e, 0x92, 0xe0, 0x48, 0x24, 0x5b, 0x66, 0x94, 0xa5,
0xa8, 0x1c, 0x9b, 0x25, 0x8e, 0x02, 0x0c, 0x65, 0x8a, 0x8a, 0x98, 0x58, 0xbb, 0x86, 0xf6, 0x55,
0x8c, 0x97, 0x94, 0x1f, 0x76, 0x85, 0xbf, 0x04, 0xd7, 0x0a, 0xb4, 0x76, 0x80, 0x48, 0x91, 0x75,
0xf8, 0x12, 0x74, 0x0b, 0xd5, 0xf1, 0x83, 0xa3, 0xfb, 0x0c, 0x6a, 0x0c, 0x03, 0xfd, 0x64, 0x80,
0x91, 0x36, 0x31, 0xeb, 0xd6, 0x5c, 0x78, 0x88, 0x91, 0xd3, 0xa6, 0x1f, 0xb2, 0x79, 0x72, 0xb8,
0x00, 0x87, 0x28, 0x4c, 0x74, 0x3e, 0x40, 0x9b, 0x25, 0x8e, 0x2d, 0x70, 0x06, 0xf3, 0xbb, 0x32,
0xc1, 0x43, 0xed, 0x76, 0x75, 0xa6, 0x62, 0xb6, 0xc8, 0x17, 0x01, 0x0e, 0xd0, 0x89, 0x22, 0x02,
0x4b, 0x74, 0x6c, 0x4f, 0x44, 0xe7, 0x58, 0x00, 0x8c, 0xaf, 0x02, 0xef, 0x09, 0xa5, 0xb4, 0xeb,
0x19, 0x14, 0x0e, 0x77, 0x75, 0x12, 0xa3, 0x61, 0x57, 0xc9, 0x9d, 0x67, 0x70, 0x99, 0x20, 0xe3,
0x95, 0x75, 0x88, 0x09, 0x96, 0xd6, 0xcb, 0x95, 0x75, 0x81, 0x93, 0xf5, 0x0a, 0x39, 0xbf, 0x9d,
0xc9, 0x24, 0xf6, 0x21, 0xc9, 0xd3, 0x72, 0x8d, 0x7c, 0x2c, 0x9c, 0x3f, 0xdc, 0xef, 0x0f, 0x8f,
0xd9, 0x2a, 0xbf, 0x06, 0x57, 0x0b, 0xe4, 0x00, 0x9d, 0x91, 0x91, 0x0f, 0xde, 0x75, 0x72, 0xf5,
0x41, 0xe6, 0x1e, 0x9c, 0x1d, 0x60, 0xaa, 0xcd, 0x84, 0xad, 0x51, 0x42, 0x3d, 0xd3, 0x34, 0x45,
0xec, 0x25, 0x3a, 0x61, 0x27, 0x1d, 0xbb, 0x49, 0x15, 0x5e, 0x76, 0x83, 0xbf, 0x0c, 0xd7, 0x4f,
0xc6, 0xb1, 0x70, 0xd8, 0x4f, 0xe9, 0xb1, 0x1d, 0x0b, 0xfb, 0x98, 0xae, 0x9b, 0x19, 0x64, 0x2f,
0xf3, 0x1b, 0xb0, 0xfa, 0x6c, 0x2e, 0xca, 0x60, 0xad, 0xd3, 0xc6, 0xfc, 0xb6, 0x3d, 0x83, 0x31,
0x2a, 0x27, 0x45, 0x32, 0xdd, 0xf8, 0x4a, 0xc5, 0xfa, 0xa2, 0xf2, 0x55, 0x52, 0xe6, 0x37, 0x7f,
0x51, 0xf9, 0x09, 0xbe, 0x06, 0x2b, 0x7b, 0xe8, 0x5e, 0xd4, 0xdc, 0x24, 0xcd, 0xbe, 0xb4, 0x5e,
0x75, 0x62, 0xd1, 0xd8, 0xa9, 0xe6, 0x93, 0x9c, 0xc3, 0x42, 0x18, 0x0e, 0xf0, 0xcb, 0x19, 0x5a,
0x37, 0x10, 0x11, 0xb2, 0xbf, 0xb7, 0x37, 0xdf, 0x04, 0xf0, 0xf7, 0xa7, 0xa6, 0x8a, 0x9c, 0xc3,
0x62, 0x25, 0x1d, 0x6a, 0x85, 0x6c, 0x86, 0xcf, 0xc3, 0x95, 0x13, 0x25, 0xad, 0xcd, 0x30, 0x66,
0x01, 0xe5, 0xbe, 0xaf, 0x8e, 0x8c, 0x1e, 0x51, 0x5b, 0x62, 0x0d, 0xd2, 0xee, 0x4a, 0x25, 0xed,
0xb9, 0xaf, 0x7a, 0x80, 0xb9, 0xa2, 0x08, 0x5a, 0x9b, 0x6f, 0x07, 0x30, 0x3f, 0xc4, 0x11, 0x55,
0x78, 0x4e, 0xbe, 0x02, 0xac, 0x2e, 0x57, 0xf4, 0x65, 0xec, 0x03, 0x7a, 0x81, 0x7b, 0x46, 0x3f,
0x91, 0x6a, 0xc4, 0x1a, 0xc4, 0x36, 0x44, 0x91, 0x78, 0xe6, 0x2e, 0xb4, 0x77, 0x93, 0xcc, 0x1f,
0xd3, 0xf2, 0x87, 0x92, 0x40, 0x66, 0xb3, 0xa4, 0x0a, 0x8d, 0x1e, 0x8f, 0x31, 0x66, 0x73, 0x7c,
0x01, 0x3a, 0x79, 0x86, 0x48, 0xd7, 0xde, 0xfc, 0x19, 0xf8, 0x9e, 0xe8, 0x5b, 0xdb, 0x02, 0x74,
0x4e, 0x54, 0x8c, 0x67, 0x52, 0x61, 0xcc, 0x66, 0x7c, 0x79, 0xe5, 0x89, 0xa9, 0xf2, 0x1c, 0x53,
0x04, 0x88, 0xac, 0x86, 0x21, 0xd5, 0xc8, 0x3d, 0x61, 0x6b, 0xd0, 0x19, 0xd5, 0x6c, 0x88, 0x36,
0x32, 0xf2, 0xb4, 0xbe, 0x7d, 0x44, 0xb5, 0x33, 0x3c, 0xd7, 0x4f, 0x2a, 0xcc, 0xb2, 0x73, 0x3a,
0x69, 0x0f, 0xdd, 0x70, 0x62, 0x1d, 0xa6, 0x3d, 0xad, 0xce, 0xe4, 0xc8, 0x32, 0x49, 0x27, 0xed,
0x6b, 0x11, 0xd7, 0xb6, 0x7f, 0x89, 0xaa, 0x76, 0x80, 0x09, 0x0a, 0x5b, 0x67, 0x7d, 0xec, 0x1f,
0x98, 0x77, 0x75, 0x2b, 0x91, 0xc2, 0xb2, 0x84, 0xae, 0x42, 0x5e, 0xe6, 0x62, 0x4a, 0x49, 0xd9,
0x4a, 0x1c, 0x9a, 0x5c, 0x56, 0x7c, 0x05, 0x96, 0x72, 0xfb, 0x23, 0x61, 0x9c, 0xf4, 0x24, 0x3f,
0x0b, 0x7c, 0xf9, 0x8d, 0x1e, 0x57, 0xd8, 0xcf, 0x69, 0x9e, 0xcd, 0xdf, 0x17, 0xb6, 0x82, 0x7e,
0x11, 0xf0, 0x55, 0xb8, 0x3a, 0x3d, 0x5a, 0x85, 0xff, 0x32, 0xe0, 0xcb, 0xb0, 0x48, 0x47, 0x2b,
0x31, 0xcb, 0x7e, 0xe5, 0x41, 0x3a, 0x44, 0x0d, 0xfc, 0xb5, 0x67, 0x28, 0x4e, 0x51, 0xc3, 0x7f,
0xe3, 0x37, 0x23, 0x86, 0xa2, 0x09, 0x2c, 0x7b, 0x2f, 0xa0, 0x48, 0xa7, 0x9b, 0x15, 0x30, 0x7b,
0xdf, 0x1b, 0x12, 0x6b, 0x69, 0xf8, 0x81, 0x37, 0x2c, 0x38, 0x4b, 0xf4, 0x43, 0x8f, 0xde, 0x17,
0x2a, 0xd6, 0x67, 0x67, 0x25, 0xfa, 0x2c, 0xe0, 0x6b, 0xb0, 0x4c, 0xee, 0xdb, 0x22, 0x11, 0x2a,
0xaa, 0xec, 0x3f, 0x0a, 0xf8, 0x35, 0x60, 0x2f, 0x6c, 0x67, 0xd9, 0xdb, 0x0d, 0xce, 0xa6, 0xf9,
0xf5, 0xcd, 0xcf, 0xbe, 0xdd, 0xf0, 0xb9, 0x2a, 0x0c, 0x73, 0xec, 0x3b, 0x0d, 0xbe, 0x98, 0x27,
0x3d, 0x97, 0xbf, 0xdb, 0xe0, 0x5d, 0x98, 0xeb, 0x2b, 0x8b, 0xc6, 0xb1, 0xaf, 0x53, 0x7f, 0xce,
0xe5, 0x97, 0x95, 0x7d, 0x83, 0xae, 0xc1, 0xac, 0xef, 0x4f, 0xf6, 0x8e, 0x57, 0xe4, 0x03, 0x95,
0xfd, 0xbd, 0xe9, 0x33, 0x50, 0x9f, 0xae, 0xff, 0x68, 0xd2, 0x4e, 0x7b, 0xe8, 0xaa, 0x5b, 0xc7,
0xfe, 0xd9, 0xe4, 0x37, 0xe0, 0xda, 0x14, 0xf3, 0xb3, 0xae, 0xbc, 0x6f, 0xff, 0x6a, 0xf2, 0x75,
0xb8, 0x4e, 0x17, 0xbf, 0x6c, 0x0f, 0x72, 0x92, 0xd6, 0xc9, 0xc8, 0xb2, 0x7f, 0x37, 0xf9, 0x4d,
0x58, 0xdd, 0x43, 0x57, 0xa6, 0xbd, 0xa6, 0xfc, 0x4f, 0x93, 0x2f, 0xc0, 0x95, 0x01, 0x0d, 0x43,
0xbc, 0x40, 0xf6, 0x5e, 0x93, 0x6a, 0x37, 0x15, 0x8b, 0x70, 0xde, 0x6f, 0x52, 0x46, 0xdf, 0x14,
0x2e, 0x3a, 0x0f, 0xd3, 0xde, 0xb9, 0x50, 0x0a, 0x13, 0xcb, 0x3e, 0x68, 0x52, 0xde, 0x06, 0x98,
0xea, 0x0b, 0xac, 0xc1, 0x1f, 0xd2, 0x23, 0xc7, 0xbd, 0xf1, 0xe7, 0x33, 0x34, 0x93, 0x52, 0xf1,
0xac, 0x49, 0x15, 0xc8, 0xed, 0x9f, 0xd7, 0x7c, 0xd4, 0xe4, 0xaf, 0xc2, 0x5a, 0x7e, 0xa7, 0xa7,
0xf9, 0x27, 0xe5, 0x08, 0xfb, 0xea, 0x4c, 0xb3, 0xb7, 0x5b, 0x25, 0x63, 0x88, 0x89, 0x13, 0xa5,
0xdf, 0x57, 0x5b, 0x54, 0xa2, 0xc2, 0xc3, 0x9b, 0xfe, 0xb6, 0xc5, 0x97, 0x00, 0xf2, 0x2b, 0xe5,
0x81, 0xdf, 0xb5, 0x28, 0xf4, 0x3d, 0x74, 0xf4, 0xca, 0x5d, 0xa0, 0x99, 0x78, 0xf4, 0xf7, 0x53,
0xb4, 0x3e, 0x78, 0xd8, 0x1f, 0x5a, 0x94, 0x8a, 0x63, 0x99, 0xe2, 0xb1, 0x8c, 0x9e, 0xb0, 0xef,
0x75, 0x28, 0x15, 0x3e, 0xd2, 0x43, 0x1d, 0x23, 0xd9, 0x58, 0xf6, 0x6e, 0x87, 0xea, 0x4d, 0x6d,
0x94, 0xd7, 0xfb, 0xfb, 0x5e, 0x2e, 0x86, 0x67, 0x3f, 0x64, 0x3f, 0xa0, 0xd7, 0x16, 0x0a, 0xf9,
0x78, 0xf8, 0x90, 0xfd, 0xb0, 0x43, 0x5b, 0x6d, 0x25, 0x89, 0x8e, 0x84, 0x2b, 0x9b, 0xf9, 0x47,
0x1d, 0xba, 0x0d, 0xb5, 0xdd, 0x8b, 0x6a, 0xfc, 0xb8, 0x43, 0x39, 0x2d, 0x70, 0xdf, 0x2b, 0x21,
0x8d, 0xc3, 0x9f, 0x78, 0x56, 0xfa, 0x44, 0x52, 0x24, 0xc7, 0x8e, 0xfd, 0xd4, 0xdb, 0xbd, 0xf8,
0x80, 0xb0, 0x3f, 0x76, 0x8b, 0xbe, 0xa9, 0x61, 0x7f, 0xea, 0xe6, 0xed, 0xfd, 0xfc, 0x8b, 0xc1,
0xfe, 0xec, 0xe1, 0x17, 0x5f, 0x19, 0xf6, 0x97, 0x2e, 0x05, 0x56, 0x7f, 0x28, 0x94, 0x48, 0xd1,
0xb2, 0xbf, 0x76, 0x37, 0x37, 0xa0, 0x1d, 0xda, 0xc4, 0x8f, 0xcc, 0x36, 0x34, 0x43, 0x9b, 0xb0,
0x19, 0x9a, 0x30, 0xdb, 0x5a, 0x27, 0x3b, 0x97, 0x63, 0xf3, 0xe8, 0xd3, 0x2c, 0xd8, 0xdc, 0x86,
0xa5, 0x9e, 0x4e, 0xc7, 0xa2, 0x6c, 0x41, 0x3f, 0x25, 0xf3, 0xf1, 0x8a, 0x71, 0x9e, 0xe6, 0x19,
0x1a, 0x53, 0x3b, 0x97, 0x18, 0x65, 0x7e, 0x18, 0x07, 0x24, 0x92, 0x13, 0x05, 0x18, 0xb3, 0xc6,
0xe6, 0x5b, 0xc0, 0x7a, 0x5a, 0x59, 0x69, 0x1d, 0xaa, 0x68, 0xb2, 0x8f, 0x17, 0x98, 0xf8, 0x91,
0xef, 0x8c, 0x56, 0x23, 0x36, 0xe3, 0x7f, 0x63, 0xe8, 0x7f, 0x55, 0xf9, 0xc3, 0xb0, 0x4d, 0x2f,
0xaa, 0xff, 0x72, 0x2d, 0x02, 0xec, 0x5c, 0xa0, 0x72, 0x99, 0x48, 0x92, 0x09, 0x6b, 0x92, 0xdc,
0xcb, 0xac, 0xd3, 0xa9, 0xfc, 0x8a, 0x7f, 0x7a, 0xbe, 0x19, 0x40, 0x37, 0x7f, 0x05, 0xca, 0xd0,
0x72, 0xf1, 0x08, 0x55, 0x2c, 0x3d, 0x39, 0xfd, 0x18, 0x3c, 0x54, 0xbc, 0x57, 0x41, 0x65, 0x34,
0x74, 0xc2, 0xb8, 0xe9, 0xd7, 0x2e, 0x87, 0x42, 0xfd, 0x54, 0x25, 0x5a, 0xc4, 0xfe, 0x29, 0x2a,
0x5d, 0x8f, 0x84, 0xb1, 0xfe, 0x3d, 0xa2, 0x0f, 0x55, 0xc1, 0x6f, 0xfc, 0x79, 0x62, 0x36, 0x5b,
0x81, 0xd5, 0x99, 0xe7, 0xb6, 0xdf, 0x84, 0x45, 0xa9, 0xa7, 0xbf, 0xd6, 0x91, 0x19, 0x47, 0xdb,
0xdd, 0x9e, 0xff, 0xb5, 0x1e, 0xd1, 0x0f, 0xf6, 0x28, 0xf8, 0xc2, 0xbd, 0x91, 0x74, 0xe7, 0xd9,
0x29, 0xfd, 0x65, 0xef, 0xe6, 0x66, 0xaf, 0x4b, 0x5d, 0xac, 0xee, 0x4a, 0xe5, 0xa8, 0x4e, 0xc9,
0x5d, 0xff, 0xdf, 0xbd, 0x9b, 0xff, 0x77, 0xc7, 0xa7, 0xdf, 0x0a, 0x82, 0xd3, 0x39, 0x0f, 0xdd,
0xfb, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x25, 0x53, 0x30, 0x43, 0x0d, 0x00, 0x00,
0x0f, 0x7c, 0xfa, 0x8d, 0x1e, 0x57, 0xd8, 0x2f, 0xa8, 0x9f, 0xcd, 0xdf, 0x13, 0xb6, 0x82, 0x7e,
0x19, 0xf0, 0x55, 0xb8, 0x3a, 0xbd, 0x5a, 0x85, 0xff, 0x2a, 0xe0, 0xcb, 0xb0, 0x48, 0x57, 0x2b,
0x31, 0xcb, 0x7e, 0xed, 0x41, 0xba, 0x44, 0x0d, 0xfc, 0x8d, 0x67, 0x28, 0x6e, 0x51, 0xc3, 0x7f,
0xeb, 0x0f, 0x23, 0x86, 0xa2, 0x08, 0x2c, 0x7b, 0x37, 0x20, 0x4f, 0xa7, 0x87, 0x15, 0x30, 0x7b,
0xcf, 0x1b, 0x12, 0x6b, 0x69, 0xf8, 0xbe, 0x37, 0x2c, 0x38, 0x4b, 0xf4, 0x03, 0x8f, 0xde, 0x13,
0x2a, 0xd6, 0x67, 0x67, 0x25, 0xfa, 0x34, 0xe0, 0x6b, 0xb0, 0x4c, 0xdb, 0xb7, 0x45, 0x22, 0x54,
0x54, 0xd9, 0x7f, 0x18, 0xf0, 0x6b, 0xc0, 0x9e, 0x3b, 0xce, 0xb2, 0xb7, 0x1a, 0x9c, 0x4d, 0xe3,
0xeb, 0x8b, 0x9f, 0x7d, 0xab, 0xe1, 0x63, 0x55, 0x18, 0xe6, 0xd8, 0xb7, 0x1b, 0x7c, 0x31, 0x0f,
0x7a, 0x2e, 0x7f, 0xa7, 0xc1, 0xbb, 0x30, 0xd7, 0x57, 0x16, 0x8d, 0x63, 0x5f, 0xa3, 0xfa, 0x9c,
0xcb, 0x1f, 0x2b, 0xfb, 0x3a, 0x3d, 0x83, 0x59, 0x5f, 0x9f, 0xec, 0x1d, 0xaf, 0xc8, 0x1b, 0x2a,
0xfb, 0x47, 0xd3, 0x47, 0xa0, 0xde, 0x5d, 0xff, 0xd9, 0xa4, 0x93, 0xf6, 0xd0, 0x55, 0xaf, 0x8e,
0xfd, 0xab, 0xc9, 0x6f, 0xc0, 0xb5, 0x29, 0xe6, 0x7b, 0x5d, 0xf9, 0xde, 0xfe, 0xdd, 0xe4, 0xeb,
0x70, 0x9d, 0x1e, 0x7e, 0x59, 0x1e, 0xb4, 0x49, 0x5a, 0x27, 0x23, 0xcb, 0xfe, 0xd3, 0xe4, 0x2f,
0xc3, 0xea, 0x1e, 0xba, 0x32, 0xec, 0x35, 0xe5, 0x7f, 0x9b, 0x7c, 0x01, 0xae, 0x0c, 0xa8, 0x19,
0xe2, 0x05, 0xb2, 0x77, 0x9b, 0x94, 0xbb, 0xa9, 0x58, 0xb8, 0xf3, 0x5e, 0x93, 0x22, 0xfa, 0x86,
0x70, 0xd1, 0x79, 0x98, 0xf6, 0xce, 0x85, 0x52, 0x98, 0x58, 0xf6, 0x7e, 0x93, 0xe2, 0x36, 0xc0,
0x54, 0x5f, 0x60, 0x0d, 0xfe, 0x80, 0x86, 0x1c, 0xf7, 0xc6, 0x9f, 0xcf, 0xd0, 0x4c, 0x4a, 0xc5,
0xd3, 0x26, 0x65, 0x20, 0xb7, 0x7f, 0x56, 0xf3, 0x61, 0x93, 0xbf, 0x02, 0x6b, 0xf9, 0x9b, 0x9e,
0xc6, 0x9f, 0x94, 0x23, 0xec, 0xab, 0x33, 0xcd, 0xde, 0x6a, 0x95, 0x8c, 0x21, 0x26, 0x4e, 0x94,
0xfb, 0xbe, 0xda, 0x22, 0xbf, 0xe8, 0x0d, 0xd1, 0xe0, 0xde, 0xf7, 0x5f, 0x01, 0xcb, 0xde, 0x6e,
0x51, 0xe2, 0x0a, 0x1e, 0x4f, 0xf0, 0xbb, 0x16, 0x5f, 0x02, 0xc8, 0x1f, 0x9a, 0x07, 0x7e, 0x3f,
0xdd, 0x48, 0xb3, 0xef, 0x02, 0xcd, 0xc4, 0xa3, 0x7f, 0x28, 0xe9, 0x6a, 0xed, 0x88, 0xfd, 0xb1,
0x45, 0x01, 0x3a, 0x96, 0x29, 0x1e, 0xcb, 0xe8, 0x31, 0xfb, 0x6e, 0x87, 0x02, 0xe4, 0xfd, 0x3f,
0xd4, 0x31, 0x92, 0x8d, 0x65, 0xdf, 0xeb, 0x50, 0x15, 0x50, 0x71, 0xe5, 0x55, 0xf0, 0x7d, 0x2f,
0x17, 0x2d, 0xb5, 0x1f, 0xb2, 0x1f, 0xd0, 0x0c, 0x86, 0x42, 0x3e, 0x1e, 0x3e, 0x60, 0x3f, 0xec,
0xd0, 0x51, 0x5b, 0x49, 0xa2, 0x23, 0xe1, 0xca, 0x12, 0xff, 0x51, 0x87, 0xde, 0x48, 0xed, 0xf4,
0x22, 0x47, 0x3f, 0xee, 0x50, 0xa4, 0x0b, 0xdc, 0x57, 0x50, 0x48, 0x4d, 0xf2, 0x27, 0x9e, 0x95,
0xbe, 0x96, 0xe4, 0xc9, 0xb1, 0x63, 0x3f, 0xf5, 0x76, 0xcf, 0x8f, 0x15, 0xf6, 0xa7, 0x6e, 0x51,
0x4d, 0x35, 0xec, 0xcf, 0xdd, 0xbc, 0xe8, 0x9f, 0x9d, 0x23, 0xec, 0x2f, 0x1e, 0x7e, 0x7e, 0xf6,
0xb0, 0xbf, 0x76, 0xc9, 0xb1, 0xfa, 0xf8, 0x50, 0x22, 0x45, 0xcb, 0xfe, 0xd6, 0xdd, 0xdc, 0x80,
0x76, 0x68, 0x13, 0xdf, 0x48, 0xdb, 0xd0, 0x0c, 0x6d, 0xc2, 0x66, 0xa8, 0xef, 0x6c, 0x6b, 0x9d,
0xec, 0x5c, 0x8e, 0xcd, 0xc3, 0x4f, 0xb3, 0x60, 0x73, 0x1b, 0x96, 0x7a, 0x3a, 0x1d, 0x8b, 0xb2,
0x30, 0x7d, 0xef, 0xcc, 0x9b, 0x2e, 0xc6, 0x79, 0x98, 0x67, 0xa8, 0x79, 0xed, 0x5c, 0x62, 0x94,
0xf9, 0x16, 0x1d, 0x90, 0x48, 0x9b, 0xc8, 0xc1, 0x98, 0x35, 0x36, 0xdf, 0x04, 0xd6, 0xd3, 0xca,
0x4a, 0xeb, 0x50, 0x45, 0x93, 0x7d, 0xbc, 0xc0, 0xc4, 0x0f, 0x02, 0x67, 0xb4, 0x1a, 0xb1, 0x19,
0xff, 0x47, 0x43, 0xff, 0xd7, 0xca, 0xc7, 0xc5, 0x36, 0xcd, 0x59, 0xff, 0x11, 0x5b, 0x04, 0xd8,
0xb9, 0x40, 0xe5, 0x32, 0x91, 0x24, 0x13, 0xd6, 0x24, 0xb9, 0x97, 0x59, 0xa7, 0x53, 0xf9, 0x15,
0x3f, 0x90, 0xbe, 0x11, 0x40, 0x37, 0x9f, 0x0d, 0xa5, 0x6b, 0xb9, 0x78, 0x84, 0x2a, 0x96, 0x9e,
0x9c, 0xfe, 0x11, 0x1e, 0x2a, 0xa6, 0x58, 0x50, 0x19, 0x0d, 0x9d, 0x30, 0x6e, 0xfa, 0xe1, 0xcb,
0xa1, 0x50, 0x3f, 0x51, 0x89, 0x16, 0xb1, 0x1f, 0x50, 0xe5, 0xd6, 0x23, 0x61, 0xac, 0x9f, 0x52,
0xf4, 0xcd, 0x2a, 0xf8, 0x8d, 0xbf, 0x4f, 0xcc, 0x66, 0x2b, 0xb0, 0xba, 0xf3, 0xdc, 0xf6, 0x1b,
0xb0, 0x28, 0xf5, 0xf4, 0x2f, 0x3b, 0x32, 0xe3, 0x68, 0xbb, 0xdb, 0xf3, 0x7f, 0xd9, 0x23, 0xfa,
0xd7, 0x1e, 0x05, 0x5f, 0xb8, 0x3b, 0x92, 0xee, 0x3c, 0x3b, 0xa5, 0x1f, 0xee, 0x9d, 0xdc, 0xec,
0x35, 0xa9, 0x8b, 0xd5, 0x1d, 0xa9, 0x1c, 0xe5, 0x29, 0xb9, 0xe3, 0x7f, 0xc1, 0x77, 0xf2, 0x5f,
0xf0, 0xf8, 0xf4, 0x9b, 0x41, 0x70, 0x3a, 0xe7, 0xa1, 0xbb, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff,
0x04, 0x66, 0xf7, 0xea, 0x59, 0x0d, 0x00, 0x00,
}

View File

@ -303,6 +303,7 @@ message SegmentBinlogs {
int64 num_of_rows = 3;
repeated FieldBinlog statslogs = 4;
repeated FieldBinlog deltalogs = 5;
string insert_channel = 6;
}
message FieldBinlog{
@ -367,6 +368,7 @@ message CompactionSegmentBinlogs {
repeated FieldBinlog fieldBinlogs = 2;
repeated FieldBinlog field2StatslogPaths = 3;
repeated FieldBinlog deltalogs = 4;
string insert_channel = 5;
}
message CompactionPlan {

View File

@ -2094,6 +2094,7 @@ type SegmentBinlogs struct {
NumOfRows int64 `protobuf:"varint,3,opt,name=num_of_rows,json=numOfRows,proto3" json:"num_of_rows,omitempty"`
Statslogs []*FieldBinlog `protobuf:"bytes,4,rep,name=statslogs,proto3" json:"statslogs,omitempty"`
Deltalogs []*FieldBinlog `protobuf:"bytes,5,rep,name=deltalogs,proto3" json:"deltalogs,omitempty"`
InsertChannel string `protobuf:"bytes,6,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -2159,6 +2160,13 @@ func (m *SegmentBinlogs) GetDeltalogs() []*FieldBinlog {
return nil
}
func (m *SegmentBinlogs) GetInsertChannel() string {
if m != nil {
return m.InsertChannel
}
return ""
}
type FieldBinlog struct {
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
Binlogs []*Binlog `protobuf:"bytes,2,rep,name=binlogs,proto3" json:"binlogs,omitempty"`
@ -2606,6 +2614,7 @@ type CompactionSegmentBinlogs struct {
FieldBinlogs []*FieldBinlog `protobuf:"bytes,2,rep,name=fieldBinlogs,proto3" json:"fieldBinlogs,omitempty"`
Field2StatslogPaths []*FieldBinlog `protobuf:"bytes,3,rep,name=field2StatslogPaths,proto3" json:"field2StatslogPaths,omitempty"`
Deltalogs []*FieldBinlog `protobuf:"bytes,4,rep,name=deltalogs,proto3" json:"deltalogs,omitempty"`
InsertChannel string `protobuf:"bytes,5,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -2664,6 +2673,13 @@ func (m *CompactionSegmentBinlogs) GetDeltalogs() []*FieldBinlog {
return nil
}
func (m *CompactionSegmentBinlogs) GetInsertChannel() string {
if m != nil {
return m.InsertChannel
}
return ""
}
type CompactionPlan struct {
PlanID int64 `protobuf:"varint,1,opt,name=planID,proto3" json:"planID,omitempty"`
SegmentBinlogs []*CompactionSegmentBinlogs `protobuf:"bytes,2,rep,name=segmentBinlogs,proto3" json:"segmentBinlogs,omitempty"`
@ -3748,220 +3764,221 @@ func init() {
func init() { proto.RegisterFile("data_coord.proto", fileDescriptor_82cd95f524594f49) }
var fileDescriptor_82cd95f524594f49 = []byte{
// 3401 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x1b, 0x4b, 0x6f, 0x1b, 0xc7,
0xd9, 0xcb, 0x97, 0xc8, 0x8f, 0x0f, 0x51, 0x63, 0x47, 0xa6, 0x69, 0x5b, 0x96, 0xd7, 0xb1, 0xa3,
0x38, 0x8e, 0x9d, 0xc8, 0x0d, 0x12, 0x34, 0x2f, 0x44, 0x92, 0xa5, 0x10, 0x95, 0x54, 0x65, 0xa5,
0xc4, 0x45, 0x53, 0x94, 0x58, 0x71, 0x47, 0xd4, 0x46, 0xdc, 0x5d, 0x66, 0x67, 0x69, 0x59, 0xb9,
0xc4, 0x68, 0x80, 0x02, 0x29, 0x82, 0xb4, 0x45, 0x4f, 0x05, 0x7a, 0x28, 0x0a, 0x14, 0xe8, 0xe3,
0x52, 0xa0, 0xa7, 0xb6, 0xe8, 0xa5, 0xa7, 0xa0, 0x3d, 0xf4, 0x67, 0xf4, 0xd6, 0xfe, 0x85, 0x62,
0x1e, 0x3b, 0xfb, 0xe0, 0x92, 0x5c, 0x49, 0x7e, 0xdc, 0x34, 0xdf, 0x7e, 0xdf, 0x37, 0xdf, 0x7c,
0xf3, 0xbd, 0x39, 0x82, 0xba, 0xa1, 0x7b, 0x7a, 0xbb, 0xe3, 0x38, 0xae, 0x71, 0xbb, 0xef, 0x3a,
0x9e, 0x83, 0x66, 0x2c, 0xb3, 0xf7, 0x60, 0x40, 0xf8, 0xea, 0x36, 0xfd, 0xdc, 0xac, 0x74, 0x1c,
0xcb, 0x72, 0x6c, 0x0e, 0x6a, 0xd6, 0x4c, 0xdb, 0xc3, 0xae, 0xad, 0xf7, 0xc4, 0xba, 0x12, 0x26,
0x68, 0x56, 0x48, 0x67, 0x1f, 0x5b, 0x3a, 0x5f, 0xa9, 0x53, 0x90, 0xbf, 0x67, 0xf5, 0xbd, 0x23,
0xf5, 0x21, 0x54, 0x56, 0x7b, 0x03, 0xb2, 0xaf, 0xe1, 0x4f, 0x07, 0x98, 0x78, 0xe8, 0x15, 0xc8,
0xed, 0xea, 0x04, 0x37, 0x94, 0x79, 0x65, 0xa1, 0xbc, 0x78, 0xe9, 0x76, 0x64, 0x53, 0xb1, 0xdd,
0x06, 0xe9, 0x2e, 0xe9, 0x04, 0x6b, 0x0c, 0x13, 0x21, 0xc8, 0x19, 0xbb, 0xad, 0x95, 0x46, 0x66,
0x5e, 0x59, 0xc8, 0x6a, 0xec, 0x6f, 0xa4, 0x42, 0xa5, 0xe3, 0xf4, 0x7a, 0xb8, 0xe3, 0x99, 0x8e,
0xdd, 0x5a, 0x69, 0xe4, 0xd8, 0xb7, 0x08, 0x4c, 0xfd, 0x95, 0x02, 0x55, 0xb1, 0x35, 0xe9, 0x3b,
0x36, 0xc1, 0xe8, 0x2e, 0x14, 0x88, 0xa7, 0x7b, 0x03, 0x22, 0x76, 0xbf, 0x98, 0xb8, 0xfb, 0x36,
0x43, 0xd1, 0x04, 0x6a, 0xaa, 0xed, 0xb3, 0xc3, 0xdb, 0xa3, 0x39, 0x00, 0x82, 0xbb, 0x16, 0xb6,
0xbd, 0xd6, 0x0a, 0x69, 0xe4, 0xe6, 0xb3, 0x0b, 0x59, 0x2d, 0x04, 0x51, 0x7f, 0xae, 0x40, 0x7d,
0xdb, 0x5f, 0xfa, 0xda, 0x39, 0x07, 0xf9, 0x8e, 0x33, 0xb0, 0x3d, 0x26, 0x60, 0x55, 0xe3, 0x0b,
0x74, 0x15, 0x2a, 0x9d, 0x7d, 0xdd, 0xb6, 0x71, 0xaf, 0x6d, 0xeb, 0x16, 0x66, 0xa2, 0x94, 0xb4,
0xb2, 0x80, 0x6d, 0xea, 0x16, 0x4e, 0x25, 0xd1, 0x3c, 0x94, 0xfb, 0xba, 0xeb, 0x99, 0x11, 0x9d,
0x85, 0x41, 0xea, 0xaf, 0x15, 0x98, 0x7d, 0x8f, 0x10, 0xb3, 0x6b, 0x0f, 0x49, 0x36, 0x0b, 0x05,
0xdb, 0x31, 0x70, 0x6b, 0x85, 0x89, 0x96, 0xd5, 0xc4, 0x0a, 0x5d, 0x84, 0x52, 0x1f, 0x63, 0xb7,
0xed, 0x3a, 0x3d, 0x5f, 0xb0, 0x22, 0x05, 0x68, 0x4e, 0x0f, 0xa3, 0x0f, 0x60, 0x86, 0xc4, 0x18,
0x91, 0x46, 0x76, 0x3e, 0xbb, 0x50, 0x5e, 0xbc, 0x76, 0x7b, 0xc8, 0xdc, 0x6e, 0xc7, 0x37, 0xd5,
0x86, 0xa9, 0xd5, 0x47, 0x19, 0x38, 0x2b, 0xf1, 0xb8, 0xac, 0xf4, 0x6f, 0xaa, 0x39, 0x82, 0xbb,
0x52, 0x3c, 0xbe, 0x48, 0xa3, 0x39, 0xa9, 0xf2, 0x6c, 0x58, 0xe5, 0x29, 0x0c, 0x2c, 0xae, 0xcf,
0xfc, 0x90, 0x3e, 0xd1, 0x15, 0x28, 0xe3, 0x87, 0x7d, 0xd3, 0xc5, 0x6d, 0xcf, 0xb4, 0x70, 0xa3,
0x30, 0xaf, 0x2c, 0xe4, 0x34, 0xe0, 0xa0, 0x1d, 0xd3, 0x0a, 0x5b, 0xe4, 0x54, 0x6a, 0x8b, 0x54,
0x7f, 0xa3, 0xc0, 0xf9, 0xa1, 0x5b, 0x12, 0x26, 0xae, 0x41, 0x9d, 0x9d, 0x3c, 0xd0, 0x0c, 0x35,
0x76, 0xaa, 0xf0, 0x1b, 0xe3, 0x14, 0x1e, 0xa0, 0x6b, 0x43, 0xf4, 0x21, 0x21, 0x33, 0xe9, 0x85,
0x3c, 0x80, 0xf3, 0x6b, 0xd8, 0x13, 0x1b, 0xd0, 0x6f, 0x98, 0x9c, 0x3c, 0x04, 0x44, 0x7d, 0x29,
0x33, 0xe4, 0x4b, 0x7f, 0xca, 0x48, 0x5f, 0x62, 0x5b, 0xb5, 0xec, 0x3d, 0x07, 0x5d, 0x82, 0x92,
0x44, 0x11, 0x56, 0x11, 0x00, 0xd0, 0xeb, 0x90, 0xa7, 0x92, 0x72, 0x93, 0xa8, 0x2d, 0x5e, 0x4d,
0x3e, 0x53, 0x88, 0xa7, 0xc6, 0xf1, 0x51, 0x0b, 0x6a, 0xc4, 0xd3, 0x5d, 0xaf, 0xdd, 0x77, 0x08,
0xbb, 0x67, 0x66, 0x38, 0xe5, 0x45, 0x35, 0xca, 0x41, 0xc6, 0xca, 0x0d, 0xd2, 0xdd, 0x12, 0x98,
0x5a, 0x95, 0x51, 0xfa, 0x4b, 0x74, 0x0f, 0x2a, 0xd8, 0x36, 0x02, 0x46, 0xb9, 0xd4, 0x8c, 0xca,
0xd8, 0x36, 0x24, 0x9b, 0xe0, 0x7e, 0xf2, 0xe9, 0xef, 0xe7, 0x2b, 0x05, 0x1a, 0xc3, 0x17, 0x74,
0x9a, 0x40, 0xf9, 0x26, 0x27, 0xc2, 0xfc, 0x82, 0xc6, 0x7a, 0xb8, 0xbc, 0x24, 0x4d, 0x90, 0xa8,
0x26, 0x3c, 0x17, 0x48, 0xc3, 0xbe, 0x3c, 0x31, 0x63, 0xf9, 0x42, 0x81, 0xd9, 0xf8, 0x5e, 0xa7,
0x39, 0xf7, 0xb7, 0x20, 0x6f, 0xda, 0x7b, 0x8e, 0x7f, 0xec, 0xb9, 0x31, 0x7e, 0x46, 0xf7, 0xe2,
0xc8, 0xaa, 0x05, 0x17, 0xd7, 0xb0, 0xd7, 0xb2, 0x09, 0x76, 0xbd, 0x25, 0xd3, 0xee, 0x39, 0xdd,
0x2d, 0xdd, 0xdb, 0x3f, 0x85, 0x8f, 0x44, 0xcc, 0x3d, 0x13, 0x33, 0x77, 0xf5, 0x77, 0x0a, 0x5c,
0x4a, 0xde, 0x4f, 0x1c, 0xbd, 0x09, 0xc5, 0x3d, 0x13, 0xf7, 0x0c, 0xaa, 0x33, 0x85, 0xe9, 0x4c,
0xae, 0xa9, 0xaf, 0xf4, 0x29, 0xb2, 0x38, 0xe1, 0xd5, 0x11, 0x06, 0xba, 0xed, 0xb9, 0xa6, 0xdd,
0x5d, 0x37, 0x89, 0xa7, 0x71, 0xfc, 0x90, 0x3e, 0xb3, 0xe9, 0x2d, 0xf3, 0x27, 0x0a, 0xcc, 0xad,
0x61, 0x6f, 0x59, 0x86, 0x5a, 0xfa, 0xdd, 0x24, 0x9e, 0xd9, 0x21, 0x4f, 0xb6, 0x88, 0x48, 0xc8,
0x99, 0xea, 0x4f, 0x15, 0xb8, 0x32, 0x52, 0x18, 0xa1, 0x3a, 0x11, 0x4a, 0xfc, 0x40, 0x9b, 0x1c,
0x4a, 0xbe, 0x83, 0x8f, 0x3e, 0xd2, 0x7b, 0x03, 0xbc, 0xa5, 0x9b, 0x2e, 0x0f, 0x25, 0x27, 0x0c,
0xac, 0x7f, 0x54, 0xe0, 0xf2, 0x1a, 0xf6, 0xb6, 0xfc, 0x34, 0xf3, 0x0c, 0xb5, 0x93, 0xa2, 0xa2,
0xf8, 0x9a, 0x5f, 0x66, 0xa2, 0xb4, 0xcf, 0x44, 0x7d, 0x73, 0xcc, 0x0f, 0x42, 0x0e, 0xb9, 0xcc,
0x6b, 0x01, 0xa1, 0x3c, 0xf5, 0x51, 0x16, 0x2a, 0x1f, 0x89, 0xfa, 0x80, 0xa5, 0x91, 0xb8, 0x1e,
0x94, 0x64, 0x3d, 0x84, 0x4a, 0x8a, 0xa4, 0x2a, 0x63, 0x0d, 0xaa, 0x04, 0xe3, 0x83, 0x93, 0x24,
0x8d, 0x0a, 0x25, 0x94, 0xc1, 0x7e, 0x1d, 0x66, 0x06, 0xf6, 0x1e, 0x2d, 0x6b, 0xb1, 0x21, 0x4e,
0xc1, 0xab, 0xcb, 0xc9, 0x91, 0x67, 0x98, 0x10, 0xbd, 0x0f, 0xd3, 0x71, 0x5e, 0xf9, 0x54, 0xbc,
0xe2, 0x64, 0xa8, 0x05, 0x75, 0xc3, 0x75, 0xfa, 0x7d, 0x6c, 0xb4, 0x89, 0xcf, 0xaa, 0x90, 0x8e,
0x95, 0xa0, 0xf3, 0x59, 0xa9, 0x5f, 0x2a, 0x30, 0x7b, 0x5f, 0xf7, 0x3a, 0xfb, 0x2b, 0x96, 0xb8,
0x9c, 0x53, 0x98, 0xf6, 0xdb, 0x50, 0x7a, 0x20, 0x2e, 0xc2, 0x8f, 0x5f, 0x57, 0x12, 0x04, 0x0a,
0x5f, 0xb9, 0x16, 0x50, 0xa8, 0xdf, 0x28, 0x70, 0x8e, 0x35, 0x11, 0xbe, 0x74, 0x4f, 0xdf, 0xc9,
0x26, 0x34, 0x12, 0xe8, 0x06, 0xd4, 0x2c, 0xdd, 0x3d, 0xd8, 0x0e, 0x70, 0xf2, 0x0c, 0x27, 0x06,
0x55, 0x1f, 0x02, 0x88, 0xd5, 0x06, 0xe9, 0x9e, 0x40, 0xfe, 0x37, 0x60, 0x4a, 0xec, 0x2a, 0xfc,
0x6d, 0xd2, 0xc5, 0xfa, 0xe8, 0xea, 0x3f, 0x15, 0xa8, 0x05, 0x11, 0x94, 0x79, 0x55, 0x0d, 0x32,
0xd2, 0x97, 0x32, 0xad, 0x15, 0xf4, 0x36, 0x14, 0x78, 0xff, 0x28, 0x78, 0x5f, 0x8f, 0xf2, 0x16,
0xbd, 0x65, 0x28, 0x0c, 0x33, 0x80, 0x26, 0x88, 0xa8, 0x8e, 0x64, 0xd4, 0xe1, 0x1d, 0x46, 0x56,
0x0b, 0x41, 0x50, 0x0b, 0xa6, 0xa3, 0x45, 0x9b, 0xef, 0x33, 0xf3, 0xa3, 0xa2, 0xcd, 0x8a, 0xee,
0xe9, 0x2c, 0xd8, 0xd4, 0x22, 0x35, 0x1b, 0x51, 0xff, 0x9b, 0x87, 0x72, 0xe8, 0x94, 0x43, 0x27,
0x89, 0x5f, 0x69, 0x66, 0x72, 0xdc, 0xcc, 0x0e, 0x77, 0x0e, 0xd7, 0xa1, 0x66, 0xb2, 0x5c, 0xdd,
0x16, 0xa6, 0xc8, 0x82, 0x6b, 0x49, 0xab, 0x72, 0xa8, 0xf0, 0x0b, 0x34, 0x07, 0x65, 0x7b, 0x60,
0xb5, 0x9d, 0xbd, 0xb6, 0xeb, 0x1c, 0x12, 0xd1, 0x82, 0x94, 0xec, 0x81, 0xf5, 0xdd, 0x3d, 0xcd,
0x39, 0x24, 0x41, 0x95, 0x5b, 0x38, 0x66, 0x95, 0x3b, 0x07, 0x65, 0x4b, 0x7f, 0x48, 0xb9, 0xb6,
0xed, 0x81, 0xc5, 0xba, 0x93, 0xac, 0x56, 0xb2, 0xf4, 0x87, 0x9a, 0x73, 0xb8, 0x39, 0xb0, 0xd0,
0x02, 0xd4, 0x7b, 0x3a, 0xf1, 0xda, 0xe1, 0xf6, 0xa6, 0xc8, 0xda, 0x9b, 0x1a, 0x85, 0xdf, 0x0b,
0x5a, 0x9c, 0xe1, 0x7a, 0xb9, 0x74, 0x8a, 0x7a, 0xd9, 0xb0, 0x7a, 0x01, 0x23, 0x48, 0x5f, 0x2f,
0x1b, 0x56, 0x4f, 0xb2, 0x79, 0x03, 0xa6, 0x76, 0x59, 0x05, 0x44, 0x1a, 0xe5, 0x91, 0x11, 0x6a,
0x95, 0x16, 0x3f, 0xbc, 0x50, 0xd2, 0x7c, 0x74, 0xf4, 0x16, 0x94, 0x58, 0xea, 0x61, 0xb4, 0x95,
0x54, 0xb4, 0x01, 0x01, 0xa5, 0x36, 0x70, 0xcf, 0xd3, 0x19, 0x75, 0x35, 0x1d, 0xb5, 0x24, 0x40,
0xaf, 0xc0, 0xd9, 0x8e, 0x8b, 0x75, 0x0f, 0x1b, 0x4b, 0x47, 0xcb, 0x8e, 0xd5, 0xd7, 0x99, 0x31,
0x35, 0x6a, 0xf3, 0xca, 0x42, 0x51, 0x4b, 0xfa, 0x44, 0x03, 0x43, 0x47, 0xae, 0x56, 0x5d, 0xc7,
0x6a, 0x4c, 0xf3, 0xc0, 0x10, 0x85, 0xa2, 0xcb, 0x00, 0x7e, 0xe8, 0xd6, 0xbd, 0x46, 0x9d, 0xdd,
0x62, 0x49, 0x40, 0xde, 0xf3, 0xd4, 0xcf, 0xe1, 0x5c, 0x60, 0x21, 0xa1, 0xdb, 0x18, 0xbe, 0x58,
0xe5, 0xa4, 0x17, 0x3b, 0xbe, 0x76, 0xfd, 0x77, 0x0e, 0x66, 0xb7, 0xf5, 0x07, 0xf8, 0xc9, 0x97,
0xc9, 0xa9, 0xe2, 0xf1, 0x3a, 0xcc, 0xb0, 0xca, 0x78, 0x31, 0x24, 0xcf, 0x98, 0x0c, 0x1c, 0xbe,
0xce, 0x61, 0x42, 0xf4, 0x2e, 0x2d, 0x1d, 0x70, 0xe7, 0x60, 0xcb, 0x31, 0x83, 0xec, 0x7b, 0x39,
0x81, 0xcf, 0xb2, 0xc4, 0xd2, 0xc2, 0x14, 0x68, 0x6b, 0x38, 0xb4, 0xf1, 0xbc, 0xfb, 0xc2, 0xd8,
0xfe, 0x2b, 0xd0, 0x7e, 0x3c, 0xc2, 0xa1, 0x06, 0x4c, 0x89, 0xec, 0xce, 0xfc, 0xbe, 0xa8, 0xf9,
0x4b, 0xb4, 0x05, 0x67, 0xf9, 0x09, 0xb6, 0x85, 0x51, 0xf3, 0xc3, 0x17, 0x53, 0x1d, 0x3e, 0x89,
0x34, 0xea, 0x13, 0xa5, 0xe3, 0xfa, 0x44, 0x03, 0xa6, 0x84, 0x9d, 0xb2, 0x58, 0x50, 0xd4, 0xfc,
0x25, 0xbd, 0x66, 0xd3, 0xea, 0x3b, 0xae, 0x67, 0xda, 0xdd, 0x46, 0x99, 0x7d, 0x0b, 0x00, 0xb4,
0xc5, 0x80, 0x40, 0x9f, 0x13, 0x26, 0x05, 0xef, 0x40, 0x51, 0x5a, 0x78, 0x26, 0xb5, 0x85, 0x4b,
0x9a, 0x78, 0x8c, 0xce, 0xc6, 0x62, 0xb4, 0xfa, 0x2f, 0x05, 0x2a, 0x2b, 0xf4, 0x48, 0xeb, 0x4e,
0x97, 0x65, 0x94, 0xeb, 0x50, 0x73, 0x71, 0xc7, 0x71, 0x8d, 0x36, 0xb6, 0x3d, 0xd7, 0xc4, 0xbc,
0x1b, 0xcd, 0x69, 0x55, 0x0e, 0xbd, 0xc7, 0x81, 0x14, 0x8d, 0x86, 0x5d, 0xe2, 0xe9, 0x56, 0xbf,
0xbd, 0x47, 0xdd, 0x3b, 0xc3, 0xd1, 0x24, 0x94, 0x79, 0xf7, 0x55, 0xa8, 0x04, 0x68, 0x9e, 0xc3,
0xf6, 0xcf, 0x69, 0x65, 0x09, 0xdb, 0x71, 0xd0, 0xf3, 0x50, 0x63, 0x3a, 0x6d, 0xf7, 0x9c, 0x6e,
0x9b, 0x76, 0x6e, 0x22, 0xd9, 0x54, 0x0c, 0x21, 0x16, 0xbd, 0xab, 0x28, 0x16, 0x31, 0x3f, 0xc3,
0x22, 0xdd, 0x48, 0xac, 0x6d, 0xf3, 0x33, 0x4c, 0x73, 0x7d, 0x95, 0xe6, 0xce, 0x4d, 0xc7, 0xc0,
0x3b, 0x27, 0xac, 0x34, 0x52, 0x4c, 0xed, 0x2e, 0x41, 0x49, 0x9e, 0x40, 0x1c, 0x29, 0x00, 0xa0,
0x55, 0xa8, 0xf9, 0x45, 0x68, 0x9b, 0xf7, 0x16, 0xb9, 0x91, 0x95, 0x5f, 0x28, 0xfb, 0x11, 0xad,
0xea, 0x93, 0xb1, 0xa5, 0xba, 0x0a, 0x95, 0xf0, 0x67, 0xba, 0xeb, 0x76, 0xdc, 0x50, 0x24, 0x80,
0x5a, 0xe3, 0xe6, 0xc0, 0xa2, 0x77, 0x2a, 0x02, 0x8b, 0xbf, 0x54, 0xbf, 0x50, 0xa0, 0x2a, 0x52,
0xf6, 0xb6, 0x9c, 0x2a, 0xb3, 0xa3, 0x29, 0xec, 0x68, 0xec, 0x6f, 0xf4, 0xed, 0xe8, 0x48, 0xea,
0xf9, 0xc4, 0x20, 0xc0, 0x98, 0xb0, 0xea, 0x38, 0x92, 0xaf, 0xd3, 0xf4, 0xb2, 0x8f, 0xa8, 0xa1,
0x89, 0xab, 0x61, 0x86, 0xd6, 0x80, 0x29, 0xdd, 0x30, 0x5c, 0x4c, 0x88, 0x90, 0xc3, 0x5f, 0xd2,
0x2f, 0x0f, 0xb0, 0x4b, 0x7c, 0x93, 0xcf, 0x6a, 0xfe, 0x12, 0xbd, 0x05, 0x45, 0x59, 0x4e, 0x67,
0x93, 0x4a, 0xa8, 0xb0, 0x9c, 0xa2, 0xf7, 0x92, 0x14, 0xea, 0xd7, 0x19, 0xa8, 0x09, 0x85, 0x2d,
0x89, 0x9c, 0x3a, 0xde, 0xf9, 0x96, 0xa0, 0xb2, 0x17, 0xf8, 0xfe, 0xb8, 0x19, 0x4b, 0x38, 0x44,
0x44, 0x68, 0x26, 0x39, 0x60, 0x34, 0xab, 0xe7, 0x4e, 0x95, 0xd5, 0xf3, 0xc7, 0x8c, 0x60, 0xea,
0x0f, 0xa0, 0x1c, 0xfa, 0xc2, 0x42, 0x2f, 0x9f, 0xba, 0x08, 0x55, 0xf8, 0x4b, 0x74, 0x37, 0x28,
0x5a, 0xb8, 0x0e, 0x2e, 0x24, 0x6c, 0x12, 0xab, 0x57, 0xd4, 0xdf, 0x2b, 0x50, 0x10, 0x9c, 0xaf,
0x40, 0x59, 0x44, 0x13, 0x56, 0xd0, 0x71, 0xee, 0x20, 0x40, 0xb4, 0xa2, 0x7b, 0x7c, 0xe1, 0xe4,
0x02, 0x14, 0x63, 0x81, 0x64, 0x4a, 0xc4, 0x7b, 0xff, 0x53, 0x28, 0x7a, 0xd0, 0x4f, 0x2c, 0x70,
0x7c, 0xa3, 0xb0, 0x89, 0xb1, 0x86, 0x3b, 0xce, 0x03, 0xec, 0x1e, 0x9d, 0x7e, 0x2e, 0xf7, 0x66,
0xc8, 0x52, 0x53, 0x36, 0x7e, 0x92, 0x00, 0xbd, 0x19, 0xa8, 0x3b, 0x9b, 0x34, 0x96, 0x08, 0x87,
0x0e, 0x61, 0x67, 0x81, 0xda, 0x7f, 0xc6, 0x27, 0x8c, 0xd1, 0xa3, 0x9c, 0xb4, 0x60, 0x79, 0x2c,
0xfd, 0x84, 0xfa, 0x0b, 0x05, 0x2e, 0xac, 0x61, 0x6f, 0x35, 0xda, 0xb5, 0x3f, 0x6b, 0xa9, 0x2c,
0x68, 0x26, 0x09, 0x75, 0x9a, 0x5b, 0x6f, 0x42, 0x51, 0xce, 0x1f, 0xf8, 0xec, 0x57, 0xae, 0xd5,
0x1f, 0x2b, 0xd0, 0x10, 0xbb, 0xb0, 0x3d, 0x69, 0xad, 0xdc, 0xc3, 0x1e, 0x36, 0x9e, 0x76, 0x43,
0xfc, 0x77, 0x05, 0xea, 0xe1, 0x50, 0xce, 0xa2, 0xf1, 0x6b, 0x90, 0x67, 0x73, 0x07, 0x21, 0xc1,
0x44, 0x63, 0xe5, 0xd8, 0x34, 0x64, 0xb0, 0xfa, 0x6d, 0x47, 0x66, 0x1d, 0xb1, 0x0c, 0xf2, 0x49,
0xf6, 0xf8, 0xf9, 0x44, 0xe4, 0x57, 0x67, 0x40, 0xf9, 0xf2, 0xb9, 0x5e, 0x00, 0x50, 0xbf, 0xca,
0x40, 0x23, 0x68, 0x34, 0x9e, 0x7a, 0x40, 0x1f, 0x51, 0x86, 0x66, 0x1f, 0x53, 0x19, 0x9a, 0x3b,
0x6e, 0x10, 0xff, 0x5b, 0x06, 0x6a, 0x81, 0x3a, 0xb6, 0x7a, 0xba, 0x8d, 0x66, 0xa1, 0xd0, 0xef,
0xe9, 0xc1, 0xbc, 0x50, 0xac, 0xd0, 0xb6, 0xac, 0x4c, 0xa2, 0x0a, 0x78, 0x29, 0xe9, 0x72, 0x46,
0x68, 0x58, 0x8b, 0xb1, 0xa0, 0x0d, 0x1c, 0x6f, 0x01, 0x58, 0x1b, 0x2e, 0xaa, 0x21, 0x6e, 0x05,
0xb4, 0x03, 0xbf, 0x05, 0x48, 0x5c, 0x5d, 0xdb, 0xb4, 0xdb, 0x04, 0x77, 0x1c, 0xdb, 0xe0, 0x97,
0x9a, 0xd7, 0xea, 0xe2, 0x4b, 0xcb, 0xde, 0xe6, 0x70, 0xf4, 0x1a, 0xe4, 0xbc, 0xa3, 0x3e, 0x0f,
0xcf, 0xb5, 0xc4, 0xb0, 0x17, 0xc8, 0xb5, 0x73, 0xd4, 0xc7, 0x1a, 0x43, 0x47, 0x73, 0x00, 0x94,
0x95, 0xe7, 0xea, 0x0f, 0x70, 0xcf, 0xff, 0xa5, 0x33, 0x80, 0x50, 0x33, 0xf5, 0x27, 0x19, 0x53,
0x3c, 0x27, 0x88, 0xa5, 0xfa, 0x97, 0x0c, 0xd4, 0x03, 0x96, 0x1a, 0x26, 0x83, 0x9e, 0x37, 0x52,
0x7f, 0xe3, 0xdb, 0xb7, 0x49, 0x99, 0xfe, 0x5d, 0x28, 0x8b, 0xa9, 0xca, 0x31, 0x2e, 0x1a, 0x38,
0xc9, 0xfa, 0x18, 0xcb, 0xcb, 0x3f, 0x26, 0xcb, 0x2b, 0x1c, 0xd7, 0xf2, 0xb6, 0x61, 0xd6, 0x0f,
0x68, 0x01, 0xc2, 0x06, 0xf6, 0xf4, 0x31, 0x95, 0xc4, 0x15, 0x28, 0xf3, 0x44, 0xc5, 0x33, 0x34,
0x2f, 0xae, 0x61, 0x57, 0xf6, 0xa4, 0xea, 0x0f, 0xe1, 0x1c, 0x0b, 0x08, 0xf1, 0xe1, 0x6b, 0x9a,
0x49, 0xb8, 0x2a, 0x4b, 0x77, 0x5a, 0xa6, 0x73, 0xeb, 0x2e, 0x69, 0x11, 0x98, 0xba, 0x0e, 0xcf,
0xc5, 0xf8, 0x9f, 0x22, 0xe0, 0xd3, 0x1a, 0x67, 0x76, 0x3b, 0xfa, 0x43, 0xe6, 0xc9, 0xd3, 0xda,
0x65, 0x39, 0x6b, 0x6d, 0x9b, 0x46, 0xdc, 0xbe, 0x0c, 0xf4, 0x0e, 0x94, 0x6c, 0x7c, 0xd8, 0x0e,
0x47, 0xd5, 0x14, 0x23, 0xb5, 0xa2, 0x8d, 0x0f, 0xd9, 0x5f, 0xea, 0x26, 0x9c, 0x1f, 0x12, 0xf5,
0x34, 0x67, 0xff, 0xab, 0x02, 0x17, 0x56, 0x5c, 0xa7, 0xff, 0x91, 0xe9, 0x7a, 0x03, 0xbd, 0x17,
0xfd, 0x29, 0xe3, 0xc9, 0x34, 0x5e, 0xef, 0x87, 0xf2, 0x2b, 0x0f, 0xb8, 0xb7, 0x12, 0xcc, 0x75,
0x58, 0x28, 0x71, 0xe8, 0x50, 0x36, 0xfe, 0x4f, 0x36, 0x49, 0x78, 0x81, 0x37, 0x21, 0x8b, 0xa4,
0x29, 0x3f, 0x12, 0xe7, 0x34, 0xd9, 0x93, 0xce, 0x69, 0x46, 0x78, 0x7e, 0xee, 0x31, 0x79, 0xfe,
0x71, 0x1b, 0x07, 0xf4, 0x3e, 0x44, 0x67, 0x68, 0x2c, 0xe4, 0x9e, 0x68, 0xf8, 0xb6, 0x04, 0x10,
0xcc, 0x93, 0xc4, 0x3b, 0x94, 0x34, 0x6c, 0x42, 0x54, 0xf4, 0xb6, 0x64, 0x94, 0x65, 0x73, 0xe0,
0xc8, 0x84, 0xe3, 0x03, 0x68, 0x26, 0x59, 0xe9, 0x69, 0x2c, 0xff, 0xcf, 0x19, 0x80, 0x16, 0x9b,
0xe7, 0xec, 0xe8, 0xe4, 0xe0, 0x64, 0xa5, 0xe2, 0x35, 0xa8, 0x06, 0x06, 0x13, 0xf8, 0x7b, 0xd8,
0x8a, 0x0c, 0xea, 0x12, 0xb2, 0x62, 0xa5, 0x38, 0x43, 0x55, 0xac, 0xc1, 0xf8, 0x84, 0xbc, 0x86,
0x1b, 0x45, 0x2c, 0xe8, 0xa1, 0x8b, 0x50, 0x72, 0x9d, 0xc3, 0x36, 0x75, 0x33, 0x83, 0xe5, 0xd6,
0xa2, 0x56, 0x74, 0x9d, 0x43, 0xea, 0x7c, 0x06, 0x3a, 0x0f, 0x53, 0x9e, 0x4e, 0x0e, 0x28, 0xff,
0x02, 0x4f, 0x77, 0x74, 0xd9, 0x32, 0xd0, 0x39, 0xc8, 0xef, 0x99, 0x3d, 0x4c, 0x1a, 0x53, 0x8c,
0x25, 0x5f, 0xa0, 0xd7, 0xfd, 0x17, 0x07, 0xc5, 0xd4, 0xbf, 0x98, 0xf2, 0x47, 0x07, 0xdf, 0x28,
0x30, 0x1d, 0x68, 0x8d, 0x05, 0x20, 0x1a, 0xd3, 0x58, 0x3c, 0x5b, 0x76, 0x0c, 0x1e, 0x2a, 0x6a,
0x23, 0x7e, 0x14, 0xe1, 0x84, 0x3c, 0x6a, 0x05, 0x24, 0xe3, 0x0a, 0x6e, 0x7a, 0x2e, 0x7a, 0x68,
0xd3, 0xf0, 0x7f, 0x93, 0x29, 0xb8, 0xce, 0x61, 0xcb, 0x90, 0xda, 0xe0, 0x0f, 0xaf, 0x78, 0x79,
0x49, 0xb5, 0xb1, 0xcc, 0xde, 0x5e, 0x5d, 0x83, 0x2a, 0x76, 0x5d, 0xc7, 0x6d, 0x5b, 0x98, 0x10,
0xbd, 0xcb, 0x4b, 0x91, 0x92, 0x56, 0x61, 0xc0, 0x0d, 0x0e, 0x53, 0xff, 0x97, 0x81, 0x5a, 0x70,
0x14, 0xff, 0x97, 0x18, 0xd3, 0xf0, 0x7f, 0x89, 0x31, 0x0d, 0x1a, 0xcc, 0x5d, 0x1e, 0x0a, 0x43,
0xc1, 0x5c, 0x40, 0x5a, 0x06, 0xcd, 0x83, 0xd4, 0xc1, 0x6c, 0xc7, 0xc0, 0xc1, 0xc5, 0x82, 0x0f,
0x12, 0xf7, 0x1a, 0xb1, 0x8f, 0x5c, 0x0a, 0xfb, 0xc8, 0xa7, 0xb0, 0x8f, 0x42, 0x82, 0x7d, 0xcc,
0x42, 0x61, 0x77, 0xd0, 0x39, 0xc0, 0x9e, 0x28, 0x8f, 0xc4, 0x2a, 0x6a, 0x37, 0xc5, 0x98, 0xdd,
0x48, 0xf3, 0x28, 0x85, 0xcd, 0xe3, 0x22, 0x94, 0xf8, 0xcf, 0x01, 0x6d, 0x8f, 0xb0, 0xb9, 0x68,
0x56, 0x2b, 0x72, 0xc0, 0x0e, 0x41, 0x6f, 0xf8, 0x4d, 0x41, 0x39, 0xc9, 0xd1, 0x59, 0xc4, 0x89,
0x59, 0x88, 0x68, 0x09, 0xd4, 0x4f, 0x00, 0x05, 0x5f, 0x4e, 0xd7, 0xa4, 0xc5, 0x54, 0x9f, 0x89,
0xab, 0x5e, 0xfd, 0x83, 0x02, 0x33, 0xe1, 0xcd, 0x4e, 0x9a, 0xd0, 0xde, 0x81, 0x32, 0x9f, 0xfa,
0xb6, 0xa9, 0x43, 0x89, 0x36, 0xed, 0xf2, 0xd8, 0x33, 0x6b, 0x60, 0x06, 0x71, 0xe5, 0x1a, 0x54,
0x0f, 0x1d, 0xf7, 0xc0, 0xb4, 0xbb, 0x6d, 0x2a, 0x99, 0x6f, 0xc6, 0x15, 0x01, 0xdc, 0xa4, 0x30,
0xf5, 0x4b, 0x05, 0xe6, 0x3e, 0xec, 0x1b, 0xba, 0x87, 0x43, 0x99, 0xfd, 0xb4, 0x4f, 0x32, 0x5e,
0xf3, 0x5f, 0x45, 0x64, 0xd2, 0x4d, 0x2e, 0x39, 0xf6, 0xcd, 0x5f, 0x2a, 0x30, 0x33, 0xd4, 0xd4,
0xa1, 0x1a, 0xc0, 0x87, 0x76, 0x47, 0x74, 0xbb, 0xf5, 0x33, 0xa8, 0x02, 0x45, 0xbf, 0xf7, 0xad,
0x2b, 0xa8, 0x0c, 0x53, 0x3b, 0x0e, 0xc3, 0xae, 0x67, 0x50, 0x1d, 0x2a, 0x9c, 0x70, 0xd0, 0xe9,
0x60, 0x42, 0xea, 0x59, 0x09, 0x59, 0xd5, 0xcd, 0xde, 0xc0, 0xc5, 0xf5, 0x1c, 0xaa, 0x42, 0x69,
0xc7, 0xd1, 0x70, 0x0f, 0xeb, 0x04, 0xd7, 0xf3, 0x08, 0x41, 0x4d, 0x2c, 0x7c, 0xa2, 0x42, 0x08,
0xe6, 0x93, 0x4d, 0xdd, 0xdc, 0x0b, 0x77, 0x49, 0xb4, 0x75, 0x40, 0xe7, 0xe1, 0xec, 0x87, 0xb6,
0x81, 0xf7, 0x4c, 0x1b, 0x1b, 0xc1, 0xa7, 0xfa, 0x19, 0x74, 0x16, 0xa6, 0x5b, 0xb6, 0x8d, 0xdd,
0x10, 0x50, 0xa1, 0xc0, 0x0d, 0xec, 0x76, 0x71, 0x08, 0x98, 0x41, 0x33, 0x50, 0xdd, 0x30, 0x1f,
0x86, 0x40, 0xd9, 0xc5, 0x7f, 0x3c, 0x07, 0xa5, 0x15, 0xdd, 0xd3, 0x97, 0x1d, 0xc7, 0x35, 0x50,
0x1f, 0x10, 0x7b, 0xc0, 0x63, 0xf5, 0x1d, 0x5b, 0xbe, 0x74, 0x43, 0xaf, 0x8c, 0x48, 0x70, 0xc3,
0xa8, 0xe2, 0x0a, 0x9b, 0x37, 0x46, 0x50, 0xc4, 0xd0, 0xd5, 0x33, 0xc8, 0x62, 0x3b, 0xd2, 0xd6,
0x6b, 0xc7, 0xec, 0x1c, 0xf8, 0x3f, 0xd5, 0x8e, 0xd9, 0x31, 0x86, 0xea, 0xef, 0x18, 0x7b, 0x40,
0x27, 0x16, 0xfc, 0x95, 0x95, 0xef, 0x7f, 0xea, 0x19, 0xf4, 0x29, 0x9c, 0x5b, 0xc3, 0x21, 0x9b,
0xf3, 0x37, 0x5c, 0x1c, 0xbd, 0xe1, 0x10, 0xf2, 0x31, 0xb7, 0x5c, 0x87, 0x3c, 0x1b, 0xa0, 0xa0,
0x24, 0xb3, 0x0c, 0x3f, 0xf7, 0x6e, 0xce, 0x8f, 0x46, 0x90, 0xdc, 0x3e, 0x81, 0xe9, 0xd8, 0x73,
0x56, 0xf4, 0x62, 0x02, 0x59, 0xf2, 0xc3, 0xe4, 0xe6, 0xcd, 0x34, 0xa8, 0x72, 0xaf, 0x2e, 0xd4,
0xa2, 0xcf, 0x7f, 0xd0, 0x42, 0x02, 0x7d, 0xe2, 0x53, 0xc4, 0xe6, 0x8b, 0x29, 0x30, 0xe5, 0x46,
0x16, 0xd4, 0xe3, 0xcf, 0x2b, 0xd1, 0xcd, 0xb1, 0x0c, 0xa2, 0xe6, 0xf6, 0x52, 0x2a, 0x5c, 0xb9,
0xdd, 0x11, 0x33, 0x82, 0xa1, 0xe7, 0x7d, 0xe8, 0x76, 0x32, 0x9b, 0x51, 0xef, 0x0e, 0x9b, 0x77,
0x52, 0xe3, 0xcb, 0xad, 0x7f, 0xc4, 0x07, 0xb7, 0x49, 0x4f, 0xe4, 0xd0, 0xab, 0xc9, 0xec, 0xc6,
0xbc, 0xed, 0x6b, 0x2e, 0x1e, 0x87, 0x44, 0x0a, 0xf1, 0x39, 0x9b, 0xb8, 0x26, 0x3c, 0x33, 0x8b,
0xfb, 0x9d, 0xcf, 0x6f, 0xf4, 0xfb, 0xb9, 0xe6, 0xab, 0xc7, 0xa0, 0x90, 0x02, 0x38, 0xf1, 0x07,
0xac, 0xbe, 0x1b, 0xde, 0x99, 0x68, 0x35, 0x27, 0xf3, 0xc1, 0x8f, 0x61, 0x3a, 0xf6, 0xa3, 0x78,
0xa2, 0xd7, 0x24, 0xff, 0x70, 0xde, 0x1c, 0x97, 0xa6, 0xb9, 0x4b, 0xc6, 0x06, 0xd8, 0x68, 0x84,
0xf5, 0x27, 0x0c, 0xb9, 0x9b, 0x37, 0xd3, 0xa0, 0xca, 0x83, 0x10, 0x16, 0x2e, 0x63, 0x43, 0x60,
0x74, 0x2b, 0x99, 0x47, 0xf2, 0x00, 0xbb, 0xf9, 0x72, 0x4a, 0x6c, 0xb9, 0x69, 0x1b, 0x60, 0x0d,
0x7b, 0x1b, 0xd8, 0x73, 0xa9, 0x8d, 0xdc, 0x48, 0x54, 0x79, 0x80, 0xe0, 0x6f, 0xf3, 0xc2, 0x44,
0x3c, 0xb9, 0xc1, 0xf7, 0x00, 0xf9, 0x29, 0x36, 0xf4, 0x24, 0xe3, 0xda, 0xd8, 0x71, 0x1a, 0x9f,
0x7d, 0x4d, 0xba, 0x9b, 0x4f, 0xa1, 0xbe, 0xa1, 0xdb, 0xb4, 0x91, 0x0a, 0xf8, 0xde, 0x4a, 0x14,
0x2c, 0x8e, 0x36, 0x42, 0x5b, 0x23, 0xb1, 0xe5, 0x61, 0x0e, 0x65, 0x0e, 0xd5, 0xa5, 0x0b, 0xe2,
0x78, 0x6c, 0x09, 0xb4, 0x11, 0x43, 0x1c, 0x11, 0x5b, 0xc6, 0xe0, 0xcb, 0x8d, 0x1f, 0x29, 0xec,
0x99, 0x74, 0x0c, 0xe1, 0xbe, 0xe9, 0xed, 0x6f, 0xf5, 0x74, 0x9b, 0xa4, 0x11, 0x81, 0x21, 0x1e,
0x43, 0x04, 0x81, 0x2f, 0x45, 0x30, 0xa0, 0x1a, 0x99, 0x56, 0xa1, 0xa4, 0x77, 0x15, 0x49, 0xf3,
0xb2, 0xe6, 0xc2, 0x64, 0x44, 0xb9, 0xcb, 0x3e, 0x54, 0x7d, 0x7b, 0xe5, 0xca, 0x7d, 0x71, 0x94,
0xa4, 0x01, 0xce, 0x08, 0x77, 0x4b, 0x46, 0x0d, 0xbb, 0xdb, 0x70, 0x33, 0x8e, 0xd2, 0x0d, 0x71,
0xc6, 0xb9, 0xdb, 0xe8, 0x0e, 0x9f, 0xc7, 0x93, 0xd8, 0xe0, 0x2b, 0x39, 0x58, 0x25, 0xce, 0xf1,
0x12, 0xe3, 0xc9, 0x88, 0x39, 0x9a, 0x7a, 0x06, 0xdd, 0x87, 0x02, 0xaf, 0xe6, 0xd1, 0xf3, 0xe3,
0x0b, 0x7d, 0xc1, 0xfd, 0xfa, 0x04, 0x2c, 0xc9, 0xf8, 0x00, 0xce, 0x8f, 0x28, 0xf3, 0x13, 0xf3,
0xdc, 0xf8, 0x96, 0x60, 0x82, 0x97, 0x2f, 0xfe, 0x36, 0x0f, 0x45, 0xff, 0xc7, 0xfa, 0x67, 0x50,
0xc3, 0x3e, 0x83, 0xa2, 0xf2, 0x63, 0x98, 0x8e, 0xbd, 0xfa, 0x4d, 0xb4, 0x91, 0xe4, 0x97, 0xc1,
0x93, 0x82, 0xe6, 0x7d, 0xf1, 0xbf, 0x80, 0x32, 0xbf, 0xbc, 0x30, 0xaa, 0x30, 0x8d, 0xa7, 0x96,
0x09, 0x8c, 0x9f, 0x78, 0x22, 0xd9, 0x04, 0x08, 0x05, 0xfa, 0xf1, 0xbf, 0xc7, 0xd0, 0xd8, 0x35,
0x49, 0xe0, 0x8d, 0x63, 0xba, 0xc7, 0x78, 0x76, 0x4b, 0x77, 0xbf, 0xff, 0x6a, 0xd7, 0xf4, 0xf6,
0x07, 0xbb, 0xf4, 0xcb, 0x1d, 0x8e, 0xfa, 0xb2, 0xe9, 0x88, 0xbf, 0xee, 0xf8, 0x06, 0x72, 0x87,
0x51, 0xdf, 0xa1, 0x7b, 0xf4, 0x77, 0x77, 0x0b, 0x6c, 0x75, 0xf7, 0xff, 0x01, 0x00, 0x00, 0xff,
0xff, 0xa1, 0x07, 0xba, 0xab, 0x85, 0x3a, 0x00, 0x00,
// 3412 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3b, 0x4b, 0x6f, 0x1b, 0xd7,
0xd5, 0x1e, 0xbe, 0x44, 0x1e, 0x3e, 0x44, 0x5f, 0x3b, 0x32, 0x4d, 0xdb, 0xb2, 0x3c, 0x8e, 0x1d,
0xc5, 0x71, 0xec, 0x44, 0xfe, 0x82, 0x04, 0x5f, 0x5e, 0x88, 0x24, 0x4b, 0x21, 0x3e, 0x49, 0x9f,
0x32, 0x52, 0xe2, 0xa2, 0x29, 0x4a, 0x8c, 0x38, 0x57, 0xd4, 0x44, 0x9c, 0x19, 0x66, 0xee, 0xd0,
0xb2, 0xb2, 0x89, 0xd1, 0x00, 0x05, 0x52, 0x14, 0x7d, 0xa0, 0xab, 0x02, 0x5d, 0x14, 0x05, 0x0a,
0xf4, 0xb1, 0x29, 0x10, 0x74, 0xd1, 0x16, 0xdd, 0x74, 0x15, 0xb4, 0x8b, 0xfe, 0x8c, 0xee, 0xda,
0xbf, 0x50, 0xdc, 0xc7, 0xdc, 0x79, 0x70, 0x48, 0x8e, 0x44, 0x3b, 0xde, 0xe9, 0x9e, 0x39, 0xe7,
0xdc, 0x73, 0xcf, 0x3d, 0x6f, 0x5e, 0x41, 0xdd, 0xd0, 0x3d, 0xbd, 0xdd, 0x71, 0x1c, 0xd7, 0xb8,
0xd3, 0x77, 0x1d, 0xcf, 0x41, 0x67, 0x2d, 0xb3, 0xf7, 0x70, 0x40, 0xf8, 0xea, 0x0e, 0xfd, 0xdc,
0xac, 0x74, 0x1c, 0xcb, 0x72, 0x6c, 0x0e, 0x6a, 0xd6, 0x4c, 0xdb, 0xc3, 0xae, 0xad, 0xf7, 0xc4,
0xba, 0x12, 0x26, 0x68, 0x56, 0x48, 0xe7, 0x00, 0x5b, 0x3a, 0x5f, 0xa9, 0x33, 0x90, 0xbf, 0x6f,
0xf5, 0xbd, 0x63, 0xf5, 0x11, 0x54, 0xd6, 0x7a, 0x03, 0x72, 0xa0, 0xe1, 0x4f, 0x07, 0x98, 0x78,
0xe8, 0x15, 0xc8, 0xed, 0xe9, 0x04, 0x37, 0x94, 0x05, 0x65, 0xb1, 0xbc, 0x74, 0xf9, 0x4e, 0x64,
0x53, 0xb1, 0xdd, 0x26, 0xe9, 0x2e, 0xeb, 0x04, 0x6b, 0x0c, 0x13, 0x21, 0xc8, 0x19, 0x7b, 0xad,
0xd5, 0x46, 0x66, 0x41, 0x59, 0xcc, 0x6a, 0xec, 0x6f, 0xa4, 0x42, 0xa5, 0xe3, 0xf4, 0x7a, 0xb8,
0xe3, 0x99, 0x8e, 0xdd, 0x5a, 0x6d, 0xe4, 0xd8, 0xb7, 0x08, 0x4c, 0xfd, 0x85, 0x02, 0x55, 0xb1,
0x35, 0xe9, 0x3b, 0x36, 0xc1, 0xe8, 0x1e, 0x14, 0x88, 0xa7, 0x7b, 0x03, 0x22, 0x76, 0xbf, 0x94,
0xb8, 0xfb, 0x0e, 0x43, 0xd1, 0x04, 0x6a, 0xaa, 0xed, 0xb3, 0xc3, 0xdb, 0xa3, 0x79, 0x00, 0x82,
0xbb, 0x16, 0xb6, 0xbd, 0xd6, 0x2a, 0x69, 0xe4, 0x16, 0xb2, 0x8b, 0x59, 0x2d, 0x04, 0x51, 0x7f,
0xaa, 0x40, 0x7d, 0xc7, 0x5f, 0xfa, 0xda, 0x39, 0x0f, 0xf9, 0x8e, 0x33, 0xb0, 0x3d, 0x26, 0x60,
0x55, 0xe3, 0x0b, 0x74, 0x0d, 0x2a, 0x9d, 0x03, 0xdd, 0xb6, 0x71, 0xaf, 0x6d, 0xeb, 0x16, 0x66,
0xa2, 0x94, 0xb4, 0xb2, 0x80, 0x6d, 0xe9, 0x16, 0x4e, 0x25, 0xd1, 0x02, 0x94, 0xfb, 0xba, 0xeb,
0x99, 0x11, 0x9d, 0x85, 0x41, 0xea, 0x2f, 0x15, 0x98, 0x7b, 0x8f, 0x10, 0xb3, 0x6b, 0x0f, 0x49,
0x36, 0x07, 0x05, 0xdb, 0x31, 0x70, 0x6b, 0x95, 0x89, 0x96, 0xd5, 0xc4, 0x0a, 0x5d, 0x82, 0x52,
0x1f, 0x63, 0xb7, 0xed, 0x3a, 0x3d, 0x5f, 0xb0, 0x22, 0x05, 0x68, 0x4e, 0x0f, 0xa3, 0x0f, 0xe0,
0x2c, 0x89, 0x31, 0x22, 0x8d, 0xec, 0x42, 0x76, 0xb1, 0xbc, 0x74, 0xfd, 0xce, 0x90, 0xb9, 0xdd,
0x89, 0x6f, 0xaa, 0x0d, 0x53, 0xab, 0x8f, 0x33, 0x70, 0x4e, 0xe2, 0x71, 0x59, 0xe9, 0xdf, 0x54,
0x73, 0x04, 0x77, 0xa5, 0x78, 0x7c, 0x91, 0x46, 0x73, 0x52, 0xe5, 0xd9, 0xb0, 0xca, 0x53, 0x18,
0x58, 0x5c, 0x9f, 0xf9, 0x21, 0x7d, 0xa2, 0xab, 0x50, 0xc6, 0x8f, 0xfa, 0xa6, 0x8b, 0xdb, 0x9e,
0x69, 0xe1, 0x46, 0x61, 0x41, 0x59, 0xcc, 0x69, 0xc0, 0x41, 0xbb, 0xa6, 0x15, 0xb6, 0xc8, 0x99,
0xd4, 0x16, 0xa9, 0xfe, 0x4a, 0x81, 0x0b, 0x43, 0xb7, 0x24, 0x4c, 0x5c, 0x83, 0x3a, 0x3b, 0x79,
0xa0, 0x19, 0x6a, 0xec, 0x54, 0xe1, 0x37, 0xc7, 0x29, 0x3c, 0x40, 0xd7, 0x86, 0xe8, 0x43, 0x42,
0x66, 0xd2, 0x0b, 0x79, 0x08, 0x17, 0xd6, 0xb1, 0x27, 0x36, 0xa0, 0xdf, 0x30, 0x39, 0x7d, 0x08,
0x88, 0xfa, 0x52, 0x66, 0xc8, 0x97, 0xfe, 0x90, 0x91, 0xbe, 0xc4, 0xb6, 0x6a, 0xd9, 0xfb, 0x0e,
0xba, 0x0c, 0x25, 0x89, 0x22, 0xac, 0x22, 0x00, 0xa0, 0xd7, 0x21, 0x4f, 0x25, 0xe5, 0x26, 0x51,
0x5b, 0xba, 0x96, 0x7c, 0xa6, 0x10, 0x4f, 0x8d, 0xe3, 0xa3, 0x16, 0xd4, 0x88, 0xa7, 0xbb, 0x5e,
0xbb, 0xef, 0x10, 0x76, 0xcf, 0xcc, 0x70, 0xca, 0x4b, 0x6a, 0x94, 0x83, 0x8c, 0x95, 0x9b, 0xa4,
0xbb, 0x2d, 0x30, 0xb5, 0x2a, 0xa3, 0xf4, 0x97, 0xe8, 0x3e, 0x54, 0xb0, 0x6d, 0x04, 0x8c, 0x72,
0xa9, 0x19, 0x95, 0xb1, 0x6d, 0x48, 0x36, 0xc1, 0xfd, 0xe4, 0xd3, 0xdf, 0xcf, 0x0f, 0x15, 0x68,
0x0c, 0x5f, 0xd0, 0x34, 0x81, 0xf2, 0x4d, 0x4e, 0x84, 0xf9, 0x05, 0x8d, 0xf5, 0x70, 0x79, 0x49,
0x9a, 0x20, 0x51, 0x4d, 0x78, 0x2e, 0x90, 0x86, 0x7d, 0x79, 0x6a, 0xc6, 0xf2, 0x85, 0x02, 0x73,
0xf1, 0xbd, 0xa6, 0x39, 0xf7, 0xff, 0x40, 0xde, 0xb4, 0xf7, 0x1d, 0xff, 0xd8, 0xf3, 0x63, 0xfc,
0x8c, 0xee, 0xc5, 0x91, 0x55, 0x0b, 0x2e, 0xad, 0x63, 0xaf, 0x65, 0x13, 0xec, 0x7a, 0xcb, 0xa6,
0xdd, 0x73, 0xba, 0xdb, 0xba, 0x77, 0x30, 0x85, 0x8f, 0x44, 0xcc, 0x3d, 0x13, 0x33, 0x77, 0xf5,
0x37, 0x0a, 0x5c, 0x4e, 0xde, 0x4f, 0x1c, 0xbd, 0x09, 0xc5, 0x7d, 0x13, 0xf7, 0x0c, 0xaa, 0x33,
0x85, 0xe9, 0x4c, 0xae, 0xa9, 0xaf, 0xf4, 0x29, 0xb2, 0x38, 0xe1, 0xb5, 0x11, 0x06, 0xba, 0xe3,
0xb9, 0xa6, 0xdd, 0xdd, 0x30, 0x89, 0xa7, 0x71, 0xfc, 0x90, 0x3e, 0xb3, 0xe9, 0x2d, 0xf3, 0x07,
0x0a, 0xcc, 0xaf, 0x63, 0x6f, 0x45, 0x86, 0x5a, 0xfa, 0xdd, 0x24, 0x9e, 0xd9, 0x21, 0x4f, 0xb7,
0x88, 0x48, 0xc8, 0x99, 0xea, 0x8f, 0x15, 0xb8, 0x3a, 0x52, 0x18, 0xa1, 0x3a, 0x11, 0x4a, 0xfc,
0x40, 0x9b, 0x1c, 0x4a, 0xfe, 0x0f, 0x1f, 0x7f, 0xa4, 0xf7, 0x06, 0x78, 0x5b, 0x37, 0x5d, 0x1e,
0x4a, 0x4e, 0x19, 0x58, 0x7f, 0xaf, 0xc0, 0x95, 0x75, 0xec, 0x6d, 0xfb, 0x69, 0xe6, 0x19, 0x6a,
0x27, 0x45, 0x45, 0xf1, 0x23, 0x7e, 0x99, 0x89, 0xd2, 0x3e, 0x13, 0xf5, 0xcd, 0x33, 0x3f, 0x08,
0x39, 0xe4, 0x0a, 0xaf, 0x05, 0x84, 0xf2, 0xd4, 0xc7, 0x59, 0xa8, 0x7c, 0x24, 0xea, 0x03, 0x96,
0x46, 0xe2, 0x7a, 0x50, 0x92, 0xf5, 0x10, 0x2a, 0x29, 0x92, 0xaa, 0x8c, 0x75, 0xa8, 0x12, 0x8c,
0x0f, 0x4f, 0x93, 0x34, 0x2a, 0x94, 0x50, 0x06, 0xfb, 0x0d, 0x38, 0x3b, 0xb0, 0xf7, 0x69, 0x59,
0x8b, 0x0d, 0x71, 0x0a, 0x5e, 0x5d, 0x4e, 0x8e, 0x3c, 0xc3, 0x84, 0xe8, 0x7d, 0x98, 0x8d, 0xf3,
0xca, 0xa7, 0xe2, 0x15, 0x27, 0x43, 0x2d, 0xa8, 0x1b, 0xae, 0xd3, 0xef, 0x63, 0xa3, 0x4d, 0x7c,
0x56, 0x85, 0x74, 0xac, 0x04, 0x9d, 0xcf, 0x4a, 0xfd, 0x52, 0x81, 0xb9, 0x07, 0xba, 0xd7, 0x39,
0x58, 0xb5, 0xc4, 0xe5, 0x4c, 0x61, 0xda, 0x6f, 0x43, 0xe9, 0xa1, 0xb8, 0x08, 0x3f, 0x7e, 0x5d,
0x4d, 0x10, 0x28, 0x7c, 0xe5, 0x5a, 0x40, 0xa1, 0x7e, 0xad, 0xc0, 0x79, 0xd6, 0x44, 0xf8, 0xd2,
0x7d, 0xf3, 0x4e, 0x36, 0xa1, 0x91, 0x40, 0x37, 0xa1, 0x66, 0xe9, 0xee, 0xe1, 0x4e, 0x80, 0x93,
0x67, 0x38, 0x31, 0xa8, 0xfa, 0x08, 0x40, 0xac, 0x36, 0x49, 0xf7, 0x14, 0xf2, 0xbf, 0x01, 0x33,
0x62, 0x57, 0xe1, 0x6f, 0x93, 0x2e, 0xd6, 0x47, 0x57, 0xff, 0xae, 0x40, 0x2d, 0x88, 0xa0, 0xcc,
0xab, 0x6a, 0x90, 0x91, 0xbe, 0x94, 0x69, 0xad, 0xa2, 0xb7, 0xa1, 0xc0, 0xfb, 0x47, 0xc1, 0xfb,
0x46, 0x94, 0xb7, 0xe8, 0x2d, 0x43, 0x61, 0x98, 0x01, 0x34, 0x41, 0x44, 0x75, 0x24, 0xa3, 0x0e,
0xef, 0x30, 0xb2, 0x5a, 0x08, 0x82, 0x5a, 0x30, 0x1b, 0x2d, 0xda, 0x7c, 0x9f, 0x59, 0x18, 0x15,
0x6d, 0x56, 0x75, 0x4f, 0x67, 0xc1, 0xa6, 0x16, 0xa9, 0xd9, 0x88, 0xfa, 0xef, 0x3c, 0x94, 0x43,
0xa7, 0x1c, 0x3a, 0x49, 0xfc, 0x4a, 0x33, 0x93, 0xe3, 0x66, 0x76, 0xb8, 0x73, 0xb8, 0x01, 0x35,
0x93, 0xe5, 0xea, 0xb6, 0x30, 0x45, 0x16, 0x5c, 0x4b, 0x5a, 0x95, 0x43, 0x85, 0x5f, 0xa0, 0x79,
0x28, 0xdb, 0x03, 0xab, 0xed, 0xec, 0xb7, 0x5d, 0xe7, 0x88, 0x88, 0x16, 0xa4, 0x64, 0x0f, 0xac,
0xff, 0xdf, 0xd7, 0x9c, 0x23, 0x12, 0x54, 0xb9, 0x85, 0x13, 0x56, 0xb9, 0xf3, 0x50, 0xb6, 0xf4,
0x47, 0x94, 0x6b, 0xdb, 0x1e, 0x58, 0xac, 0x3b, 0xc9, 0x6a, 0x25, 0x4b, 0x7f, 0xa4, 0x39, 0x47,
0x5b, 0x03, 0x0b, 0x2d, 0x42, 0xbd, 0xa7, 0x13, 0xaf, 0x1d, 0x6e, 0x6f, 0x8a, 0xac, 0xbd, 0xa9,
0x51, 0xf8, 0xfd, 0xa0, 0xc5, 0x19, 0xae, 0x97, 0x4b, 0x53, 0xd4, 0xcb, 0x86, 0xd5, 0x0b, 0x18,
0x41, 0xfa, 0x7a, 0xd9, 0xb0, 0x7a, 0x92, 0xcd, 0x1b, 0x30, 0xb3, 0xc7, 0x2a, 0x20, 0xd2, 0x28,
0x8f, 0x8c, 0x50, 0x6b, 0xb4, 0xf8, 0xe1, 0x85, 0x92, 0xe6, 0xa3, 0xa3, 0xb7, 0xa0, 0xc4, 0x52,
0x0f, 0xa3, 0xad, 0xa4, 0xa2, 0x0d, 0x08, 0x28, 0xb5, 0x81, 0x7b, 0x9e, 0xce, 0xa8, 0xab, 0xe9,
0xa8, 0x25, 0x01, 0x7a, 0x05, 0xce, 0x75, 0x5c, 0xac, 0x7b, 0xd8, 0x58, 0x3e, 0x5e, 0x71, 0xac,
0xbe, 0xce, 0x8c, 0xa9, 0x51, 0x5b, 0x50, 0x16, 0x8b, 0x5a, 0xd2, 0x27, 0x1a, 0x18, 0x3a, 0x72,
0xb5, 0xe6, 0x3a, 0x56, 0x63, 0x96, 0x07, 0x86, 0x28, 0x14, 0x5d, 0x01, 0xf0, 0x43, 0xb7, 0xee,
0x35, 0xea, 0xec, 0x16, 0x4b, 0x02, 0xf2, 0x9e, 0xa7, 0x7e, 0x0e, 0xe7, 0x03, 0x0b, 0x09, 0xdd,
0xc6, 0xf0, 0xc5, 0x2a, 0xa7, 0xbd, 0xd8, 0xf1, 0xb5, 0xeb, 0x3f, 0x73, 0x30, 0xb7, 0xa3, 0x3f,
0xc4, 0x4f, 0xbf, 0x4c, 0x4e, 0x15, 0x8f, 0x37, 0xe0, 0x2c, 0xab, 0x8c, 0x97, 0x42, 0xf2, 0x8c,
0xc9, 0xc0, 0xe1, 0xeb, 0x1c, 0x26, 0x44, 0xef, 0xd2, 0xd2, 0x01, 0x77, 0x0e, 0xb7, 0x1d, 0x33,
0xc8, 0xbe, 0x57, 0x12, 0xf8, 0xac, 0x48, 0x2c, 0x2d, 0x4c, 0x81, 0xb6, 0x87, 0x43, 0x1b, 0xcf,
0xbb, 0x2f, 0x8c, 0xed, 0xbf, 0x02, 0xed, 0xc7, 0x23, 0x1c, 0x6a, 0xc0, 0x8c, 0xc8, 0xee, 0xcc,
0xef, 0x8b, 0x9a, 0xbf, 0x44, 0xdb, 0x70, 0x8e, 0x9f, 0x60, 0x47, 0x18, 0x35, 0x3f, 0x7c, 0x31,
0xd5, 0xe1, 0x93, 0x48, 0xa3, 0x3e, 0x51, 0x3a, 0xa9, 0x4f, 0x34, 0x60, 0x46, 0xd8, 0x29, 0x8b,
0x05, 0x45, 0xcd, 0x5f, 0xd2, 0x6b, 0x36, 0xad, 0xbe, 0xe3, 0x7a, 0xa6, 0xdd, 0x6d, 0x94, 0xd9,
0xb7, 0x00, 0x40, 0x5b, 0x0c, 0x08, 0xf4, 0x39, 0x61, 0x52, 0xf0, 0x0e, 0x14, 0xa5, 0x85, 0x67,
0x52, 0x5b, 0xb8, 0xa4, 0x89, 0xc7, 0xe8, 0x6c, 0x2c, 0x46, 0xab, 0xff, 0x50, 0xa0, 0xb2, 0x4a,
0x8f, 0xb4, 0xe1, 0x74, 0x59, 0x46, 0xb9, 0x01, 0x35, 0x17, 0x77, 0x1c, 0xd7, 0x68, 0x63, 0xdb,
0x73, 0x4d, 0xcc, 0xbb, 0xd1, 0x9c, 0x56, 0xe5, 0xd0, 0xfb, 0x1c, 0x48, 0xd1, 0x68, 0xd8, 0x25,
0x9e, 0x6e, 0xf5, 0xdb, 0xfb, 0xd4, 0xbd, 0x33, 0x1c, 0x4d, 0x42, 0x99, 0x77, 0x5f, 0x83, 0x4a,
0x80, 0xe6, 0x39, 0x6c, 0xff, 0x9c, 0x56, 0x96, 0xb0, 0x5d, 0x07, 0x3d, 0x0f, 0x35, 0xa6, 0xd3,
0x76, 0xcf, 0xe9, 0xb6, 0x69, 0xe7, 0x26, 0x92, 0x4d, 0xc5, 0x10, 0x62, 0xd1, 0xbb, 0x8a, 0x62,
0x11, 0xf3, 0x33, 0x2c, 0xd2, 0x8d, 0xc4, 0xda, 0x31, 0x3f, 0xc3, 0x34, 0xd7, 0x57, 0x69, 0xee,
0xdc, 0x72, 0x0c, 0xbc, 0x7b, 0xca, 0x4a, 0x23, 0xc5, 0xd4, 0xee, 0x32, 0x94, 0xe4, 0x09, 0xc4,
0x91, 0x02, 0x00, 0x5a, 0x83, 0x9a, 0x5f, 0x84, 0xb6, 0x79, 0x6f, 0x91, 0x1b, 0x59, 0xf9, 0x85,
0xb2, 0x1f, 0xd1, 0xaa, 0x3e, 0x19, 0x5b, 0xaa, 0x6b, 0x50, 0x09, 0x7f, 0xa6, 0xbb, 0xee, 0xc4,
0x0d, 0x45, 0x02, 0xa8, 0x35, 0x6e, 0x0d, 0x2c, 0x7a, 0xa7, 0x22, 0xb0, 0xf8, 0x4b, 0xf5, 0x0b,
0x05, 0xaa, 0x22, 0x65, 0xef, 0xc8, 0xa9, 0x32, 0x3b, 0x9a, 0xc2, 0x8e, 0xc6, 0xfe, 0x46, 0xff,
0x1b, 0x1d, 0x49, 0x3d, 0x9f, 0x18, 0x04, 0x18, 0x13, 0x56, 0x1d, 0x47, 0xf2, 0x75, 0x9a, 0x5e,
0xf6, 0x31, 0x35, 0x34, 0x71, 0x35, 0xcc, 0xd0, 0x1a, 0x30, 0xa3, 0x1b, 0x86, 0x8b, 0x09, 0x11,
0x72, 0xf8, 0x4b, 0xfa, 0xe5, 0x21, 0x76, 0x89, 0x6f, 0xf2, 0x59, 0xcd, 0x5f, 0xa2, 0xb7, 0xa0,
0x28, 0xcb, 0xe9, 0x6c, 0x52, 0x09, 0x15, 0x96, 0x53, 0xf4, 0x5e, 0x92, 0x42, 0xfd, 0x63, 0x06,
0x6a, 0x42, 0x61, 0xcb, 0x22, 0xa7, 0x8e, 0x77, 0xbe, 0x65, 0xa8, 0xec, 0x07, 0xbe, 0x3f, 0x6e,
0xc6, 0x12, 0x0e, 0x11, 0x11, 0x9a, 0x49, 0x0e, 0x18, 0xcd, 0xea, 0xb9, 0xa9, 0xb2, 0x7a, 0xfe,
0xa4, 0x11, 0x6c, 0xb8, 0xce, 0x2b, 0x24, 0xd4, 0x79, 0xea, 0x77, 0xa0, 0x1c, 0x62, 0xc0, 0x22,
0x34, 0x1f, 0xce, 0x08, 0x8d, 0xf9, 0x4b, 0x74, 0x2f, 0xa8, 0x6d, 0xb8, 0xaa, 0x2e, 0x26, 0xc8,
0x12, 0x2b, 0x6b, 0xd4, 0xdf, 0x2a, 0x50, 0x10, 0x9c, 0xaf, 0x42, 0x59, 0x04, 0x1d, 0x56, 0xf7,
0x71, 0xee, 0x20, 0x40, 0xb4, 0xf0, 0x7b, 0x72, 0x51, 0xe7, 0x22, 0x14, 0x63, 0xf1, 0x66, 0x46,
0xa4, 0x05, 0xff, 0x53, 0x28, 0xc8, 0xd0, 0x4f, 0x2c, 0xbe, 0x7c, 0xad, 0xb0, 0xc1, 0xb2, 0x86,
0x3b, 0xce, 0x43, 0xec, 0x1e, 0x4f, 0x3f, 0xbe, 0x7b, 0x33, 0x64, 0xd0, 0x29, 0xfb, 0x43, 0x49,
0x80, 0xde, 0x0c, 0xd4, 0x9d, 0x4d, 0x9a, 0x5e, 0x84, 0x23, 0x8c, 0x30, 0xc7, 0x40, 0xed, 0x3f,
0xe1, 0x83, 0xc8, 0xe8, 0x51, 0x4e, 0x5b, 0xd7, 0x3c, 0x91, 0xb6, 0x43, 0xfd, 0x99, 0x02, 0x17,
0xd7, 0xb1, 0xb7, 0x16, 0x6d, 0xee, 0x9f, 0xb5, 0x54, 0x16, 0x34, 0x93, 0x84, 0x9a, 0xe6, 0xd6,
0x9b, 0x50, 0x94, 0x63, 0x0a, 0x3e, 0x22, 0x96, 0x6b, 0xf5, 0xfb, 0x0a, 0x34, 0xc4, 0x2e, 0x6c,
0x4f, 0x5a, 0x52, 0xf7, 0xb0, 0x87, 0x8d, 0x6f, 0xba, 0x6f, 0xfe, 0xab, 0x02, 0xf5, 0x70, 0xc4,
0x67, 0x41, 0xfb, 0x35, 0xc8, 0xb3, 0xf1, 0x84, 0x90, 0x60, 0xa2, 0xb1, 0x72, 0x6c, 0x1a, 0x32,
0x58, 0x99, 0xb7, 0x2b, 0x93, 0x93, 0x58, 0x06, 0x69, 0x27, 0x7b, 0xf2, 0xb4, 0x23, 0xd2, 0xb0,
0x33, 0xa0, 0x7c, 0xf9, 0xf8, 0x2f, 0x00, 0xa8, 0x5f, 0x65, 0xa0, 0x11, 0xf4, 0x23, 0xdf, 0x78,
0xdc, 0x1f, 0x51, 0xad, 0x66, 0x9f, 0x50, 0xb5, 0x9a, 0x9b, 0x3e, 0xd6, 0xe7, 0x93, 0x62, 0xfd,
0x5f, 0x32, 0x50, 0x0b, 0xb4, 0xb6, 0xdd, 0xd3, 0x6d, 0x34, 0x07, 0x85, 0x7e, 0x4f, 0x0f, 0xa6,
0x8f, 0x62, 0x85, 0x76, 0x64, 0x9d, 0x13, 0xd5, 0xd3, 0x4b, 0x49, 0x77, 0x38, 0xe2, 0x22, 0xb4,
0x18, 0x0b, 0xda, 0x0e, 0xf2, 0x86, 0x82, 0x35, 0xf5, 0xa2, 0xb6, 0xe2, 0xc6, 0x42, 0xfb, 0xf9,
0xdb, 0x80, 0xc4, 0x0d, 0xb7, 0x4d, 0xbb, 0x4d, 0x70, 0xc7, 0xb1, 0x0d, 0x7e, 0xf7, 0x79, 0xad,
0x2e, 0xbe, 0xb4, 0xec, 0x1d, 0x0e, 0x47, 0xaf, 0x41, 0xce, 0x3b, 0xee, 0xf3, 0x28, 0x5e, 0x4b,
0x8c, 0x8e, 0x81, 0x5c, 0xbb, 0xc7, 0x7d, 0xac, 0x31, 0x74, 0x34, 0x0f, 0x40, 0x59, 0x79, 0xae,
0xfe, 0x50, 0xa4, 0xc4, 0x9c, 0x16, 0x82, 0x50, 0x6b, 0xf6, 0x75, 0x38, 0xc3, 0x53, 0x87, 0x58,
0xaa, 0x7f, 0xca, 0x40, 0x3d, 0x60, 0xa9, 0x61, 0x32, 0xe8, 0x79, 0x23, 0xf5, 0x37, 0xbe, 0x19,
0x9c, 0x54, 0x37, 0xbc, 0x0b, 0x65, 0x71, 0x9f, 0x27, 0xb0, 0x07, 0xe0, 0x24, 0x1b, 0x63, 0x0c,
0x34, 0xff, 0x84, 0x0c, 0xb4, 0x70, 0x42, 0x03, 0x55, 0x77, 0x60, 0xce, 0x8f, 0x7b, 0x01, 0xc2,
0x26, 0xf6, 0xf4, 0x31, 0x05, 0xc7, 0x55, 0x28, 0xf3, 0x7c, 0xc6, 0x13, 0x39, 0x2f, 0xd5, 0x61,
0x4f, 0x76, 0xb8, 0xea, 0x77, 0xe1, 0x3c, 0x8b, 0x1b, 0xf1, 0x51, 0x6e, 0x9a, 0xb9, 0xba, 0x2a,
0x1b, 0x01, 0x5a, 0xf4, 0x73, 0xeb, 0x2e, 0x69, 0x11, 0x98, 0xba, 0x01, 0xcf, 0xc5, 0xf8, 0x4f,
0x91, 0x17, 0x68, 0x29, 0x34, 0xb7, 0x13, 0xfd, 0x59, 0xf4, 0xf4, 0xd9, 0xef, 0x8a, 0x9c, 0xdc,
0xb6, 0x4d, 0x23, 0x6e, 0x5f, 0x06, 0x7a, 0x07, 0x4a, 0x36, 0x3e, 0x6a, 0x87, 0x83, 0x6f, 0x8a,
0x01, 0x5d, 0xd1, 0xc6, 0x47, 0xec, 0x2f, 0x75, 0x0b, 0x2e, 0x0c, 0x89, 0x3a, 0xcd, 0xd9, 0xff,
0xac, 0xc0, 0xc5, 0x55, 0xd7, 0xe9, 0x7f, 0x64, 0xba, 0xde, 0x40, 0xef, 0x45, 0x7f, 0x18, 0x79,
0x3a, 0x6d, 0xdc, 0xfb, 0xa1, 0x34, 0xcc, 0xe3, 0xf2, 0xed, 0x04, 0x73, 0x1d, 0x16, 0x4a, 0x1c,
0x3a, 0x94, 0xb4, 0xff, 0x95, 0x4d, 0x12, 0x5e, 0xe0, 0x4d, 0x48, 0x36, 0x69, 0xaa, 0x94, 0xc4,
0xa9, 0x4f, 0xf6, 0xb4, 0x53, 0x9f, 0x11, 0x9e, 0x9f, 0x7b, 0x42, 0x9e, 0x7f, 0xe2, 0x36, 0xe4,
0x7d, 0x88, 0x4e, 0xe4, 0x58, 0xc8, 0x3d, 0xd5, 0x28, 0x6f, 0x19, 0x20, 0x98, 0x4e, 0x89, 0x57,
0x2d, 0x69, 0xd8, 0x84, 0xa8, 0xe8, 0x6d, 0xc9, 0x28, 0xcb, 0xa6, 0xca, 0x91, 0x79, 0xc9, 0x07,
0xd0, 0x4c, 0xb2, 0xd2, 0x69, 0x2c, 0xff, 0xab, 0x0c, 0x40, 0x8b, 0x4d, 0x87, 0x76, 0x75, 0x72,
0x78, 0xba, 0x8a, 0xf2, 0x3a, 0x54, 0x03, 0x83, 0x09, 0xfc, 0x3d, 0x6c, 0x45, 0x06, 0x75, 0x09,
0x59, 0xd8, 0x52, 0x9c, 0xa1, 0x62, 0xd7, 0x60, 0x7c, 0x42, 0x5e, 0xc3, 0x8d, 0x22, 0x16, 0xf4,
0xd0, 0x25, 0x28, 0xb9, 0xce, 0x51, 0x9b, 0xba, 0x99, 0xc1, 0x72, 0x6b, 0x51, 0x2b, 0xba, 0xce,
0x11, 0x75, 0x3e, 0x03, 0x5d, 0x80, 0x19, 0x4f, 0x27, 0x87, 0x94, 0x7f, 0x81, 0xa7, 0x3b, 0xba,
0x6c, 0x19, 0xe8, 0x3c, 0xe4, 0xf7, 0xcd, 0x1e, 0x26, 0x8d, 0x19, 0xc6, 0x92, 0x2f, 0xd0, 0xeb,
0xfe, 0xfb, 0x85, 0x62, 0xea, 0xdf, 0x5f, 0xf9, 0x13, 0x86, 0xaf, 0x15, 0x98, 0x0d, 0xb4, 0xc6,
0x02, 0x10, 0x8d, 0x69, 0x2c, 0x9e, 0xad, 0x38, 0x06, 0x0f, 0x15, 0xb5, 0x11, 0x3f, 0xb1, 0x70,
0x42, 0x1e, 0xb5, 0x02, 0x92, 0x71, 0x75, 0x39, 0x3d, 0x17, 0x3d, 0xb4, 0x69, 0xf8, 0xbf, 0xf0,
0x14, 0x5c, 0xe7, 0xa8, 0x65, 0x48, 0x6d, 0xf0, 0x67, 0x5c, 0xbc, 0x0a, 0xa5, 0xda, 0x58, 0x61,
0x2f, 0xb9, 0xae, 0x43, 0x15, 0xbb, 0xae, 0xe3, 0xb6, 0x2d, 0x4c, 0x88, 0xde, 0xc5, 0xa2, 0xe8,
0xaa, 0x30, 0xe0, 0x26, 0x87, 0xa9, 0xff, 0xc9, 0x40, 0x2d, 0x38, 0x8a, 0xff, 0xbb, 0x8e, 0x69,
0xf8, 0xbf, 0xeb, 0x98, 0x06, 0x0d, 0xe6, 0x2e, 0x0f, 0x85, 0xa1, 0x60, 0x2e, 0x20, 0x2d, 0x83,
0xe6, 0x41, 0xea, 0x60, 0xb6, 0x63, 0xe0, 0xe0, 0x62, 0xc1, 0x07, 0x89, 0x7b, 0x8d, 0xd8, 0x47,
0x2e, 0x85, 0x7d, 0xe4, 0x53, 0xd8, 0x47, 0x21, 0xc1, 0x3e, 0xe6, 0xa0, 0xb0, 0x37, 0xe8, 0x1c,
0x62, 0x4f, 0x94, 0x47, 0x62, 0x15, 0xb5, 0x9b, 0x62, 0xcc, 0x6e, 0xa4, 0x79, 0x94, 0xc2, 0xe6,
0x71, 0x09, 0x4a, 0xfc, 0xc7, 0x85, 0xb6, 0x47, 0xd8, 0x94, 0x35, 0xab, 0x15, 0x39, 0x60, 0x97,
0xa0, 0x37, 0xfc, 0xde, 0xa1, 0x9c, 0xe4, 0xe8, 0x2c, 0xe2, 0xc4, 0x2c, 0x44, 0x74, 0x0e, 0xea,
0x27, 0x80, 0x82, 0x2f, 0xd3, 0xf5, 0x72, 0x31, 0xd5, 0x67, 0xe2, 0xaa, 0x57, 0x7f, 0xa7, 0xc0,
0xd9, 0xf0, 0x66, 0xa7, 0x4d, 0x68, 0xef, 0x40, 0x99, 0xcf, 0x90, 0xdb, 0xd4, 0xa1, 0x44, 0x37,
0x77, 0x65, 0xec, 0x99, 0x35, 0x30, 0x83, 0xb8, 0x72, 0x1d, 0xaa, 0x47, 0x8e, 0x7b, 0x68, 0xda,
0xdd, 0x36, 0x95, 0xcc, 0x37, 0xe3, 0x8a, 0x00, 0x6e, 0x51, 0x98, 0xfa, 0xa5, 0x02, 0xf3, 0x1f,
0xf6, 0x0d, 0xdd, 0xc3, 0xa1, 0xcc, 0x3e, 0xed, 0x03, 0x8f, 0xd7, 0xfc, 0x37, 0x16, 0x99, 0x74,
0x73, 0x50, 0x8e, 0x7d, 0xeb, 0xe7, 0x0a, 0x9c, 0x1d, 0xea, 0xfd, 0x50, 0x0d, 0xe0, 0x43, 0xbb,
0x23, 0x9a, 0xe2, 0xfa, 0x19, 0x54, 0x81, 0xa2, 0xdf, 0x22, 0xd7, 0x15, 0x54, 0x86, 0x99, 0x5d,
0x87, 0x61, 0xd7, 0x33, 0xa8, 0x0e, 0x15, 0x4e, 0x38, 0xe8, 0x74, 0x30, 0x21, 0xf5, 0xac, 0x84,
0xac, 0xe9, 0x66, 0x6f, 0xe0, 0xe2, 0x7a, 0x0e, 0x55, 0xa1, 0xb4, 0xeb, 0x68, 0xb8, 0x87, 0x75,
0x82, 0xeb, 0x79, 0x84, 0xa0, 0x26, 0x16, 0x3e, 0x51, 0x21, 0x04, 0xf3, 0xc9, 0x66, 0x6e, 0xed,
0x87, 0xbb, 0x24, 0xda, 0x3a, 0xa0, 0x0b, 0x70, 0xee, 0x43, 0xdb, 0xc0, 0xfb, 0xa6, 0x8d, 0x8d,
0xe0, 0x53, 0xfd, 0x0c, 0x3a, 0x07, 0xb3, 0x2d, 0xdb, 0xc6, 0x6e, 0x08, 0xa8, 0x50, 0xe0, 0x26,
0x76, 0xbb, 0x38, 0x04, 0xcc, 0xa0, 0xb3, 0x50, 0xdd, 0x34, 0x1f, 0x85, 0x40, 0xd9, 0xa5, 0xbf,
0x3d, 0x07, 0xa5, 0x55, 0xdd, 0xd3, 0x57, 0x1c, 0xc7, 0x35, 0x50, 0x1f, 0x10, 0x7b, 0x0e, 0x64,
0xf5, 0x1d, 0x5b, 0xbe, 0x9b, 0x43, 0xaf, 0x8c, 0x48, 0x70, 0xc3, 0xa8, 0xe2, 0x0a, 0x9b, 0x37,
0x47, 0x50, 0xc4, 0xd0, 0xd5, 0x33, 0xc8, 0x62, 0x3b, 0xd2, 0xd6, 0x6b, 0xd7, 0xec, 0x1c, 0xfa,
0x3f, 0xfc, 0x8e, 0xd9, 0x31, 0x86, 0xea, 0xef, 0x18, 0x7b, 0x8e, 0x27, 0x16, 0xfc, 0xcd, 0x96,
0xef, 0x7f, 0xea, 0x19, 0xf4, 0x29, 0x9c, 0x5f, 0xc7, 0x21, 0x9b, 0xf3, 0x37, 0x5c, 0x1a, 0xbd,
0xe1, 0x10, 0xf2, 0x09, 0xb7, 0xdc, 0x80, 0x3c, 0x9b, 0xb3, 0xa0, 0x24, 0xb3, 0x0c, 0x3f, 0x1e,
0x6f, 0x2e, 0x8c, 0x46, 0x90, 0xdc, 0x3e, 0x81, 0xd9, 0xd8, 0xe3, 0x58, 0xf4, 0x62, 0x02, 0x59,
0xf2, 0x33, 0xe7, 0xe6, 0xad, 0x34, 0xa8, 0x72, 0xaf, 0x2e, 0xd4, 0xa2, 0x8f, 0x89, 0xd0, 0x62,
0x02, 0x7d, 0xe2, 0xc3, 0xc6, 0xe6, 0x8b, 0x29, 0x30, 0xe5, 0x46, 0x16, 0xd4, 0xe3, 0x8f, 0x35,
0xd1, 0xad, 0xb1, 0x0c, 0xa2, 0xe6, 0xf6, 0x52, 0x2a, 0x5c, 0xb9, 0xdd, 0x31, 0x33, 0x82, 0xa1,
0xc7, 0x82, 0xe8, 0x4e, 0x32, 0x9b, 0x51, 0xaf, 0x18, 0x9b, 0x77, 0x53, 0xe3, 0xcb, 0xad, 0xbf,
0xc7, 0xe7, 0xbb, 0x49, 0x0f, 0xee, 0xd0, 0xab, 0xc9, 0xec, 0xc6, 0xbc, 0x14, 0x6c, 0x2e, 0x9d,
0x84, 0x44, 0x0a, 0xf1, 0x39, 0x1b, 0xcc, 0x26, 0x3c, 0x5a, 0x8b, 0xfb, 0x9d, 0xcf, 0x6f, 0xf4,
0x6b, 0xbc, 0xe6, 0xab, 0x27, 0xa0, 0x90, 0x02, 0x38, 0xf1, 0xe7, 0xb0, 0xbe, 0x1b, 0xde, 0x9d,
0x68, 0x35, 0xa7, 0xf3, 0xc1, 0x8f, 0x61, 0x36, 0xf6, 0x13, 0x7b, 0xa2, 0xd7, 0x24, 0xff, 0x0c,
0xdf, 0x1c, 0x97, 0xa6, 0xb9, 0x4b, 0xc6, 0xe6, 0xdc, 0x68, 0x84, 0xf5, 0x27, 0xcc, 0xc2, 0x9b,
0xb7, 0xd2, 0xa0, 0xca, 0x83, 0x10, 0x16, 0x2e, 0x63, 0xb3, 0x62, 0x74, 0x3b, 0x99, 0x47, 0xf2,
0x9c, 0xbb, 0xf9, 0x72, 0x4a, 0x6c, 0xb9, 0x69, 0x1b, 0x60, 0x1d, 0x7b, 0x9b, 0xd8, 0x73, 0xa9,
0x8d, 0xdc, 0x4c, 0x54, 0x79, 0x80, 0xe0, 0x6f, 0xf3, 0xc2, 0x44, 0x3c, 0xb9, 0xc1, 0xb7, 0x00,
0xf9, 0x29, 0x36, 0xf4, 0xc0, 0xe3, 0xfa, 0xd8, 0x71, 0x1a, 0x9f, 0x7d, 0x4d, 0xba, 0x9b, 0x4f,
0xa1, 0xbe, 0xa9, 0xdb, 0xb4, 0x91, 0x0a, 0xf8, 0xde, 0x4e, 0x14, 0x2c, 0x8e, 0x36, 0x42, 0x5b,
0x23, 0xb1, 0xe5, 0x61, 0x8e, 0x64, 0x0e, 0xd5, 0xa5, 0x0b, 0xe2, 0x78, 0x6c, 0x09, 0xb4, 0x11,
0x43, 0x1c, 0x11, 0x5b, 0xc6, 0xe0, 0xcb, 0x8d, 0x1f, 0x2b, 0xec, 0xd1, 0x75, 0x0c, 0xe1, 0x81,
0xe9, 0x1d, 0x6c, 0xf7, 0x74, 0x9b, 0xa4, 0x11, 0x81, 0x21, 0x9e, 0x40, 0x04, 0x81, 0x2f, 0x45,
0x30, 0xa0, 0x1a, 0x99, 0x56, 0xa1, 0xa4, 0x57, 0x1a, 0x49, 0xf3, 0xb2, 0xe6, 0xe2, 0x64, 0x44,
0xb9, 0xcb, 0x01, 0x54, 0x7d, 0x7b, 0xe5, 0xca, 0x7d, 0x71, 0x94, 0xa4, 0x01, 0xce, 0x08, 0x77,
0x4b, 0x46, 0x0d, 0xbb, 0xdb, 0x70, 0x33, 0x8e, 0xd2, 0x0d, 0x71, 0xc6, 0xb9, 0xdb, 0xe8, 0x0e,
0x9f, 0xc7, 0x93, 0xd8, 0xe0, 0x2b, 0x39, 0x58, 0x25, 0xce, 0xf1, 0x12, 0xe3, 0xc9, 0x88, 0x39,
0x9a, 0x7a, 0x06, 0x3d, 0x80, 0x02, 0xaf, 0xe6, 0xd1, 0xf3, 0xe3, 0x0b, 0x7d, 0xc1, 0xfd, 0xc6,
0x04, 0x2c, 0xc9, 0xf8, 0x10, 0x2e, 0x8c, 0x28, 0xf3, 0x13, 0xf3, 0xdc, 0xf8, 0x96, 0x60, 0x82,
0x97, 0x2f, 0xfd, 0x3a, 0x0f, 0x45, 0xff, 0xa7, 0xff, 0x67, 0x50, 0xc3, 0x3e, 0x83, 0xa2, 0xf2,
0x63, 0x98, 0x8d, 0xbd, 0x21, 0x4e, 0xb4, 0x91, 0xe4, 0x77, 0xc6, 0x93, 0x82, 0xe6, 0x03, 0xf1,
0x9f, 0x85, 0x32, 0xbf, 0xbc, 0x30, 0xaa, 0x30, 0x8d, 0xa7, 0x96, 0x09, 0x8c, 0x9f, 0x7a, 0x22,
0xd9, 0x02, 0x08, 0x05, 0xfa, 0xf1, 0xbf, 0xc7, 0xd0, 0xd8, 0x35, 0x49, 0xe0, 0xcd, 0x13, 0xba,
0xc7, 0x78, 0x76, 0xcb, 0xf7, 0xbe, 0xfd, 0x6a, 0xd7, 0xf4, 0x0e, 0x06, 0x7b, 0xf4, 0xcb, 0x5d,
0x8e, 0xfa, 0xb2, 0xe9, 0x88, 0xbf, 0xee, 0xfa, 0x06, 0x72, 0x97, 0x51, 0xdf, 0xa5, 0x7b, 0xf4,
0xf7, 0xf6, 0x0a, 0x6c, 0x75, 0xef, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd1, 0xa4, 0x8b, 0x98,
0xd3, 0x3a, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -343,6 +343,8 @@ message LoadPartitionsRequest {
string collection_name = 3;
// The partition names you want to load
repeated string partition_names = 4;
// The replicas number you would load, 1 by default
int32 replica_number = 5;
}
/*
@ -755,6 +757,7 @@ message LoadBalanceRequest {
int64 src_nodeID = 2;
repeated int64 dst_nodeIDs = 3;
repeated int64 sealed_segmentIDs = 4;
string collectionName = 5;
}
message ManualCompactionRequest {
@ -857,8 +860,6 @@ message ShardReplica {
repeated int64 node_ids = 4;
}
service ProxyService {
rpc RegisterLink(RegisterLinkRequest) returns (RegisterLinkResponse) {}
}
@ -911,3 +912,4 @@ message ListCredUsersRequest {
// Not useful for now
common.MsgBase base = 1;
}

View File

@ -1417,7 +1417,9 @@ type LoadPartitionsRequest struct {
// The collection name in milvus
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
// The partition names you want to load
PartitionNames []string `protobuf:"bytes,4,rep,name=partition_names,json=partitionNames,proto3" json:"partition_names,omitempty"`
PartitionNames []string `protobuf:"bytes,4,rep,name=partition_names,json=partitionNames,proto3" json:"partition_names,omitempty"`
// The replicas number you would load, 1 by default
ReplicaNumber int32 `protobuf:"varint,5,opt,name=replica_number,json=replicaNumber,proto3" json:"replica_number,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1476,6 +1478,13 @@ func (m *LoadPartitionsRequest) GetPartitionNames() []string {
return nil
}
func (m *LoadPartitionsRequest) GetReplicaNumber() int32 {
if m != nil {
return m.ReplicaNumber
}
return 0
}
//
// Release specific partitions data of one collection from query nodes.
// Then you can not get these data as result when you do vector search on this collection.
@ -4474,6 +4483,7 @@ type LoadBalanceRequest struct {
SrcNodeID int64 `protobuf:"varint,2,opt,name=src_nodeID,json=srcNodeID,proto3" json:"src_nodeID,omitempty"`
DstNodeIDs []int64 `protobuf:"varint,3,rep,packed,name=dst_nodeIDs,json=dstNodeIDs,proto3" json:"dst_nodeIDs,omitempty"`
SealedSegmentIDs []int64 `protobuf:"varint,4,rep,packed,name=sealed_segmentIDs,json=sealedSegmentIDs,proto3" json:"sealed_segmentIDs,omitempty"`
CollectionName string `protobuf:"bytes,5,opt,name=collectionName,proto3" json:"collectionName,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -4532,6 +4542,13 @@ func (m *LoadBalanceRequest) GetSealedSegmentIDs() []int64 {
return nil
}
func (m *LoadBalanceRequest) GetCollectionName() string {
if m != nil {
return m.CollectionName
}
return ""
}
type ManualCompactionRequest struct {
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
Timetravel uint64 `protobuf:"varint,2,opt,name=timetravel,proto3" json:"timetravel,omitempty"`
@ -5834,273 +5851,273 @@ func init() {
func init() { proto.RegisterFile("milvus.proto", fileDescriptor_02345ba45cc0e303) }
var fileDescriptor_02345ba45cc0e303 = []byte{
// 4247 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7c, 0x4b, 0x8f, 0x1c, 0xc9,
0x71, 0xf0, 0x54, 0xf7, 0xf4, 0x2b, 0xba, 0x7b, 0xa6, 0x99, 0xf3, 0xea, 0x6d, 0x92, 0xcb, 0x61,
0xed, 0x6b, 0x96, 0xd4, 0x92, 0xda, 0xe1, 0x6a, 0x57, 0xdf, 0xae, 0x3e, 0xaf, 0x48, 0x8e, 0x97,
0x1c, 0x2c, 0x49, 0x8f, 0x6a, 0x76, 0x25, 0xc8, 0x0b, 0xa2, 0x91, 0x53, 0x95, 0xd3, 0x53, 0x66,
0x75, 0x55, 0xab, 0x32, 0x9b, 0xc3, 0xd9, 0x93, 0x00, 0x19, 0x7e, 0x40, 0xeb, 0x15, 0x0c, 0x0b,
0xb6, 0x75, 0xb0, 0x61, 0xf8, 0x71, 0xf0, 0xc1, 0x86, 0x25, 0x19, 0xb6, 0xe0, 0x8b, 0x7d, 0xf0,
0xc1, 0x07, 0x03, 0x7e, 0x5c, 0x0c, 0xc3, 0x17, 0xff, 0x01, 0x1f, 0x0c, 0xf8, 0xe8, 0x83, 0x91,
0x8f, 0xaa, 0xae, 0xaa, 0xce, 0xea, 0xe9, 0x61, 0x8b, 0x9a, 0xe1, 0xad, 0x2b, 0x32, 0x22, 0x33,
0x32, 0x32, 0x32, 0x32, 0x32, 0x22, 0xb2, 0xa1, 0xd1, 0x77, 0xbd, 0xc7, 0x43, 0x7a, 0x6d, 0x10,
0x06, 0x2c, 0x40, 0x4b, 0xc9, 0xaf, 0x6b, 0xf2, 0xa3, 0xd3, 0xb0, 0x83, 0x7e, 0x3f, 0xf0, 0x25,
0xb0, 0xd3, 0xa0, 0xf6, 0x01, 0xe9, 0x63, 0xf9, 0x65, 0xfe, 0xbe, 0x01, 0xe8, 0x76, 0x48, 0x30,
0x23, 0x37, 0x3d, 0x17, 0x53, 0x8b, 0x7c, 0x6b, 0x48, 0x28, 0x43, 0x5f, 0x84, 0xf9, 0x3d, 0x4c,
0x49, 0xdb, 0x58, 0x37, 0x36, 0xea, 0x9b, 0x17, 0xae, 0xa5, 0xba, 0x55, 0xdd, 0xdd, 0xa7, 0xbd,
0x5b, 0x98, 0x12, 0x4b, 0x60, 0xa2, 0x35, 0xa8, 0x38, 0x7b, 0x5d, 0x1f, 0xf7, 0x49, 0xbb, 0xb0,
0x6e, 0x6c, 0xd4, 0xac, 0xb2, 0xb3, 0xf7, 0x00, 0xf7, 0x09, 0x7a, 0x0d, 0x16, 0xed, 0xc0, 0xf3,
0x88, 0xcd, 0xdc, 0xc0, 0x97, 0x08, 0x45, 0x81, 0xb0, 0x30, 0x02, 0x0b, 0xc4, 0x65, 0x28, 0x61,
0xce, 0x43, 0x7b, 0x5e, 0x34, 0xcb, 0x0f, 0x93, 0x42, 0x6b, 0x2b, 0x0c, 0x06, 0xcf, 0x8a, 0xbb,
0x78, 0xd0, 0x62, 0x72, 0xd0, 0xdf, 0x33, 0xe0, 0xdc, 0x4d, 0x8f, 0x91, 0xf0, 0x8c, 0x0a, 0xe5,
0x77, 0x0b, 0xb0, 0x26, 0x57, 0xed, 0x76, 0x8c, 0x7e, 0x9a, 0x5c, 0xae, 0x42, 0x59, 0x6a, 0x95,
0x60, 0xb3, 0x61, 0xa9, 0x2f, 0x74, 0x11, 0x80, 0x1e, 0xe0, 0xd0, 0xa1, 0x5d, 0x7f, 0xd8, 0x6f,
0x97, 0xd6, 0x8d, 0x8d, 0x92, 0x55, 0x93, 0x90, 0x07, 0xc3, 0x3e, 0xb2, 0xe0, 0x9c, 0x1d, 0xf8,
0xd4, 0xa5, 0x8c, 0xf8, 0xf6, 0x51, 0xd7, 0x23, 0x8f, 0x89, 0xd7, 0x2e, 0xaf, 0x1b, 0x1b, 0x0b,
0x9b, 0xaf, 0x68, 0xf9, 0xbe, 0x3d, 0xc2, 0xbe, 0xc7, 0x91, 0xad, 0x96, 0x9d, 0x81, 0x98, 0xdf,
0x35, 0x60, 0x85, 0x2b, 0xcc, 0x99, 0x10, 0x8c, 0xf9, 0xa7, 0x06, 0x2c, 0xdf, 0xc5, 0xf4, 0x6c,
0xac, 0xd2, 0x45, 0x00, 0xe6, 0xf6, 0x49, 0x97, 0x32, 0xdc, 0x1f, 0x88, 0x95, 0x9a, 0xb7, 0x6a,
0x1c, 0xb2, 0xcb, 0x01, 0xe6, 0x37, 0xa1, 0x71, 0x2b, 0x08, 0x3c, 0x8b, 0xd0, 0x41, 0xe0, 0x53,
0x82, 0x6e, 0x40, 0x99, 0x32, 0xcc, 0x86, 0x54, 0x31, 0x79, 0x5e, 0xcb, 0xe4, 0xae, 0x40, 0xb1,
0x14, 0x2a, 0xd7, 0xd7, 0xc7, 0xd8, 0x1b, 0x4a, 0x1e, 0xab, 0x96, 0xfc, 0x30, 0x3f, 0x81, 0x85,
0x5d, 0x16, 0xba, 0x7e, 0xef, 0xa7, 0xd8, 0x79, 0x2d, 0xea, 0xfc, 0x5f, 0x0d, 0x78, 0x61, 0x8b,
0x50, 0x3b, 0x74, 0xf7, 0xce, 0xc8, 0x76, 0x30, 0xa1, 0x31, 0x82, 0x6c, 0x6f, 0x09, 0x51, 0x17,
0xad, 0x14, 0x2c, 0xb3, 0x18, 0xa5, 0xec, 0x62, 0x7c, 0xbb, 0x04, 0x1d, 0xdd, 0xa4, 0x66, 0x11,
0xdf, 0xff, 0x8f, 0x77, 0x69, 0x41, 0x10, 0x65, 0xf6, 0x98, 0x3a, 0x17, 0x46, 0xa3, 0xed, 0x0a,
0x40, 0xbc, 0x99, 0xb3, 0xb3, 0x2a, 0x6a, 0x66, 0xb5, 0x09, 0x2b, 0x8f, 0xdd, 0x90, 0x0d, 0xb1,
0xd7, 0xb5, 0x0f, 0xb0, 0xef, 0x13, 0x4f, 0xc8, 0x89, 0x9b, 0xaf, 0xe2, 0x46, 0xcd, 0x5a, 0x52,
0x8d, 0xb7, 0x65, 0x1b, 0x17, 0x16, 0x45, 0x6f, 0xc1, 0xea, 0xe0, 0xe0, 0x88, 0xba, 0xf6, 0x18,
0x51, 0x49, 0x10, 0x2d, 0x47, 0xad, 0x29, 0xaa, 0xab, 0x70, 0xce, 0x16, 0x16, 0xd0, 0xe9, 0x72,
0xa9, 0x49, 0x31, 0x96, 0x85, 0x18, 0x5b, 0xaa, 0xe1, 0xa3, 0x08, 0xce, 0xd9, 0x8a, 0x90, 0x87,
0xcc, 0x4e, 0x10, 0x54, 0x04, 0xc1, 0x92, 0x6a, 0xfc, 0x98, 0xd9, 0x23, 0x9a, 0xb4, 0xed, 0xaa,
0x66, 0x6d, 0x57, 0x1b, 0x2a, 0xc2, 0x16, 0x13, 0xda, 0xae, 0x09, 0x36, 0xa3, 0x4f, 0xb4, 0x0d,
0x8b, 0x94, 0xe1, 0x90, 0x75, 0x07, 0x01, 0x75, 0xb9, 0x5c, 0x68, 0x1b, 0xd6, 0x8b, 0x1b, 0xf5,
0xcd, 0x75, 0xed, 0x22, 0x7d, 0x48, 0x8e, 0xb6, 0x30, 0xc3, 0x3b, 0xd8, 0x0d, 0xad, 0x05, 0x41,
0xb8, 0x13, 0xd1, 0xe9, 0x0d, 0x64, 0x7d, 0x26, 0x03, 0xa9, 0xd3, 0xe2, 0x86, 0xd6, 0x76, 0xfd,
0xc8, 0x80, 0x95, 0x7b, 0x01, 0x76, 0xce, 0xc6, 0x9e, 0x7a, 0x05, 0x16, 0x42, 0x32, 0xf0, 0x5c,
0x1b, 0xf3, 0xf5, 0xd8, 0x23, 0xa1, 0xd8, 0x55, 0x25, 0xab, 0xa9, 0xa0, 0x0f, 0x04, 0xd0, 0xfc,
0xdc, 0x80, 0xb6, 0x45, 0x3c, 0x82, 0xe9, 0xd9, 0xb0, 0x05, 0xe6, 0xf7, 0x0d, 0x78, 0xf1, 0x0e,
0x61, 0x89, 0x5d, 0xc5, 0x30, 0x73, 0x29, 0x73, 0xed, 0xd3, 0xf4, 0x2b, 0xcc, 0xef, 0x19, 0x70,
0x29, 0x97, 0xad, 0x59, 0x8c, 0xcc, 0x3b, 0x50, 0xe2, 0xbf, 0x68, 0xbb, 0x20, 0x74, 0xfe, 0x72,
0x9e, 0xce, 0x7f, 0x9d, 0xdb, 0x6e, 0xa1, 0xf4, 0x12, 0xdf, 0xfc, 0x4f, 0x03, 0x56, 0x77, 0x0f,
0x82, 0xc3, 0x11, 0x4b, 0xcf, 0x42, 0x40, 0x69, 0xb3, 0x5b, 0xcc, 0x98, 0x5d, 0xf4, 0x26, 0xcc,
0xb3, 0xa3, 0x01, 0x11, 0xba, 0xb5, 0xb0, 0x79, 0xf1, 0x9a, 0xc6, 0x9d, 0xbe, 0xc6, 0x99, 0xfc,
0xe8, 0x68, 0x40, 0x2c, 0x81, 0x8a, 0x5e, 0x87, 0x56, 0x46, 0xe4, 0x91, 0xe1, 0x5a, 0x4c, 0xcb,
0x9c, 0x9a, 0x3f, 0x29, 0xc0, 0xda, 0xd8, 0x14, 0x67, 0x11, 0xb6, 0x6e, 0xec, 0x82, 0x76, 0x6c,
0xbe, 0x7f, 0x12, 0xa8, 0xae, 0xc3, 0x3d, 0xde, 0xe2, 0x46, 0xd1, 0x6a, 0x26, 0xec, 0xb7, 0x43,
0xd1, 0x1b, 0x80, 0xc6, 0xcc, 0xaa, 0xb4, 0xde, 0xf3, 0xd6, 0xb9, 0xac, 0x5d, 0x15, 0xb6, 0x5b,
0x6b, 0x58, 0xa5, 0x08, 0xe6, 0xad, 0x65, 0x8d, 0x65, 0xa5, 0xe8, 0x4d, 0x58, 0x76, 0xfd, 0xfb,
0xa4, 0x1f, 0x84, 0x47, 0xdd, 0x01, 0x09, 0x6d, 0xe2, 0x33, 0xdc, 0x23, 0xb4, 0x5d, 0x16, 0x1c,
0x2d, 0x45, 0x6d, 0x3b, 0xa3, 0x26, 0xf3, 0xc7, 0x06, 0xac, 0x4a, 0x8f, 0x77, 0x07, 0x87, 0xcc,
0x3d, 0x03, 0xd6, 0x68, 0x10, 0xf1, 0x21, 0xf1, 0xa4, 0x7f, 0xde, 0x8c, 0xa1, 0x62, 0x97, 0xfd,
0xd0, 0x80, 0x65, 0xee, 0x8c, 0x3e, 0x4f, 0x3c, 0xff, 0x85, 0x01, 0x4b, 0x77, 0x31, 0x7d, 0x9e,
0x58, 0xfe, 0x4b, 0x75, 0x52, 0xc5, 0x3c, 0x9f, 0xea, 0x95, 0xed, 0x35, 0x58, 0x4c, 0x33, 0x1d,
0x79, 0x3f, 0x0b, 0x29, 0xae, 0xa9, 0xf9, 0xd7, 0xa3, 0xb3, 0xea, 0x39, 0xe3, 0xfc, 0x6f, 0x0c,
0xb8, 0x78, 0x87, 0xb0, 0x98, 0xeb, 0x33, 0x71, 0xa6, 0x4d, 0xab, 0x2d, 0x9f, 0xcb, 0x13, 0x59,
0xcb, 0xfc, 0xa9, 0x9c, 0x7c, 0xdf, 0x2d, 0xc0, 0x0a, 0x3f, 0x16, 0xce, 0x86, 0x12, 0x4c, 0x73,
0x79, 0xd1, 0x28, 0x4a, 0x49, 0xa7, 0x28, 0xf1, 0x79, 0x5a, 0x9e, 0xfa, 0x3c, 0x35, 0x7f, 0x54,
0x90, 0x7e, 0x40, 0x52, 0x1a, 0xb3, 0x2c, 0x8b, 0x86, 0xd7, 0x82, 0x96, 0x57, 0x13, 0x1a, 0x31,
0x64, 0x7b, 0x2b, 0x3a, 0x1f, 0x53, 0xb0, 0x33, 0x7b, 0x3c, 0x7e, 0x66, 0xc0, 0x6a, 0x74, 0x5d,
0xdc, 0x25, 0xbd, 0x3e, 0xf1, 0xd9, 0xd3, 0xeb, 0x50, 0x56, 0x03, 0x0a, 0x1a, 0x0d, 0xb8, 0x00,
0x35, 0x2a, 0xc7, 0x89, 0x6f, 0x82, 0x23, 0x80, 0xf9, 0xb7, 0x06, 0xac, 0x8d, 0xb1, 0x33, 0xcb,
0x22, 0xb6, 0xa1, 0xe2, 0xfa, 0x0e, 0x79, 0x12, 0x73, 0x13, 0x7d, 0xf2, 0x96, 0xbd, 0xa1, 0xeb,
0x39, 0x31, 0x1b, 0xd1, 0x27, 0xba, 0x0c, 0x0d, 0xe2, 0xe3, 0x3d, 0x8f, 0x74, 0x05, 0xae, 0x50,
0xe4, 0xaa, 0x55, 0x97, 0xb0, 0x6d, 0x0e, 0xe2, 0xc4, 0xfb, 0x2e, 0x11, 0xc4, 0x25, 0x49, 0xac,
0x3e, 0xcd, 0xdf, 0x30, 0x60, 0x89, 0x6b, 0xa1, 0xe2, 0x9e, 0x3e, 0x5b, 0x69, 0xae, 0x43, 0x3d,
0xa1, 0x66, 0x6a, 0x22, 0x49, 0x90, 0xf9, 0x08, 0x96, 0xd3, 0xec, 0xcc, 0x22, 0xcd, 0x17, 0x01,
0xe2, 0xb5, 0x92, 0xbb, 0xa1, 0x68, 0x25, 0x20, 0xe6, 0x67, 0x85, 0x28, 0x28, 0x2c, 0xc4, 0x74,
0xca, 0x31, 0x2b, 0xb1, 0x24, 0x49, 0x7b, 0x5e, 0x13, 0x10, 0xd1, 0xbc, 0x05, 0x0d, 0xf2, 0x84,
0x85, 0xb8, 0x3b, 0xc0, 0x21, 0xee, 0xcb, 0x6d, 0x35, 0x95, 0xe9, 0xad, 0x0b, 0xb2, 0x1d, 0x41,
0xc5, 0x07, 0x11, 0x2a, 0x22, 0x07, 0x29, 0xcb, 0x41, 0x04, 0x44, 0x1c, 0x18, 0xff, 0xc0, 0xbd,
0x38, 0xa5, 0xcd, 0x67, 0x5d, 0x20, 0xe9, 0xa9, 0x94, 0xb2, 0x53, 0xf9, 0x13, 0x03, 0x5a, 0x62,
0x0a, 0x72, 0x3e, 0x03, 0xde, 0x6d, 0x86, 0xc6, 0xc8, 0xd0, 0x4c, 0xd8, 0x7b, 0xff, 0x0f, 0xca,
0x4a, 0xee, 0xc5, 0x69, 0xe5, 0xae, 0x08, 0x8e, 0x99, 0x86, 0xf9, 0x87, 0x06, 0xac, 0x64, 0x44,
0x3e, 0x8b, 0xc2, 0x7f, 0x04, 0x48, 0xce, 0xd0, 0x19, 0x4d, 0x3b, 0x3a, 0xa7, 0x5f, 0xd1, 0x1e,
0x4a, 0x59, 0x21, 0x59, 0xe7, 0xdc, 0x0c, 0x84, 0x9a, 0xff, 0x6c, 0xc0, 0x85, 0x3b, 0x84, 0x09,
0xd4, 0x5b, 0xdc, 0xe8, 0xec, 0x84, 0x41, 0x2f, 0x24, 0x94, 0x3e, 0xbf, 0xfa, 0xf1, 0xdb, 0xd2,
0xb1, 0xd3, 0x4d, 0x69, 0x16, 0xf9, 0x5f, 0x86, 0x86, 0x18, 0x83, 0x38, 0xdd, 0x30, 0x38, 0xa4,
0x4a, 0x8f, 0xea, 0x0a, 0x66, 0x05, 0x87, 0x42, 0x21, 0x58, 0xc0, 0xb0, 0x27, 0x11, 0xd4, 0x89,
0x22, 0x20, 0xbc, 0x59, 0xec, 0xc1, 0x88, 0x31, 0xde, 0x39, 0x79, 0x7e, 0x65, 0xfc, 0xc7, 0x06,
0xac, 0x64, 0xa6, 0x32, 0x8b, 0x6c, 0xbf, 0x24, 0xdd, 0x4e, 0x39, 0x99, 0x85, 0xcd, 0x4b, 0x5a,
0x9a, 0xc4, 0x60, 0x12, 0x1b, 0x5d, 0x82, 0xfa, 0x3e, 0x76, 0xbd, 0x6e, 0x48, 0x30, 0x0d, 0x7c,
0x35, 0x51, 0xe0, 0x20, 0x4b, 0x40, 0xcc, 0xbf, 0x37, 0x64, 0xe6, 0xed, 0x39, 0xb7, 0x78, 0x7f,
0x54, 0x80, 0xe6, 0xb6, 0x4f, 0x49, 0xc8, 0xce, 0xfe, 0xd5, 0x04, 0xbd, 0x0f, 0x75, 0x31, 0x31,
0xda, 0x75, 0x30, 0xc3, 0xea, 0x34, 0x7b, 0x51, 0x1b, 0xa6, 0xff, 0x80, 0xe3, 0x6d, 0x61, 0x86,
0x2d, 0x29, 0x1d, 0xca, 0x7f, 0xa3, 0xf3, 0x50, 0x3b, 0xc0, 0xf4, 0xa0, 0xfb, 0x88, 0x1c, 0x49,
0x7f, 0xb1, 0x69, 0x55, 0x39, 0xe0, 0x43, 0x72, 0x44, 0xd1, 0x0b, 0x50, 0xf5, 0x87, 0x7d, 0xb9,
0xc1, 0x2a, 0xeb, 0xc6, 0x46, 0xd3, 0xaa, 0xf8, 0xc3, 0xbe, 0xd8, 0x5e, 0xff, 0x58, 0x80, 0x85,
0xfb, 0x43, 0x7e, 0x11, 0x12, 0x49, 0x86, 0xa1, 0xc7, 0x9e, 0x4e, 0x19, 0xaf, 0x40, 0x51, 0xba,
0x14, 0x9c, 0xa2, 0xad, 0x65, 0x7c, 0x7b, 0x8b, 0x5a, 0x1c, 0x49, 0x04, 0xd8, 0x87, 0xb6, 0xad,
0xbc, 0xb3, 0xa2, 0x60, 0xb6, 0xc6, 0x21, 0xd2, 0x37, 0x3b, 0x0f, 0x35, 0x12, 0x86, 0xb1, 0xef,
0x26, 0xa6, 0x42, 0xc2, 0x50, 0x36, 0x9a, 0xd0, 0xc0, 0xf6, 0x23, 0x3f, 0x38, 0xf4, 0x88, 0xd3,
0x23, 0x8e, 0x58, 0xf6, 0xaa, 0x95, 0x82, 0x49, 0xc5, 0xe0, 0x0b, 0xdf, 0xb5, 0x7d, 0x26, 0x4e,
0xf5, 0x22, 0x57, 0x0c, 0x0e, 0xb9, 0xed, 0x33, 0xde, 0xec, 0x10, 0x8f, 0x30, 0x22, 0x9a, 0x2b,
0xb2, 0x59, 0x42, 0x54, 0xf3, 0x70, 0x10, 0x53, 0x57, 0x65, 0xb3, 0x84, 0xf0, 0xe6, 0x0b, 0x50,
0x1b, 0x65, 0x11, 0x6a, 0xa3, 0x30, 0xa2, 0x00, 0x98, 0xff, 0x61, 0x40, 0x73, 0x4b, 0x74, 0xf5,
0x1c, 0x28, 0x1d, 0x82, 0x79, 0xf2, 0x64, 0x10, 0xaa, 0xad, 0x23, 0x7e, 0x4f, 0xd4, 0x23, 0xf3,
0x31, 0xb4, 0x76, 0x3c, 0x6c, 0x93, 0x83, 0xc0, 0x73, 0x48, 0x28, 0xce, 0x76, 0xd4, 0x82, 0x22,
0xc3, 0x3d, 0xe5, 0x3c, 0xf0, 0x9f, 0xe8, 0xcb, 0xea, 0xea, 0x27, 0xcd, 0xd2, 0xcb, 0xda, 0x53,
0x36, 0xd1, 0x4d, 0x22, 0xa2, 0xba, 0x0a, 0x65, 0x91, 0xd9, 0x93, 0x6e, 0x45, 0xc3, 0x52, 0x5f,
0xe6, 0xc3, 0xd4, 0xb8, 0x77, 0xc2, 0x60, 0x38, 0x40, 0xdb, 0xd0, 0x18, 0x8c, 0x60, 0x5c, 0x57,
0xf3, 0xcf, 0xf4, 0x2c, 0xd3, 0x56, 0x8a, 0xd4, 0xfc, 0xaf, 0x22, 0x34, 0x77, 0x09, 0x0e, 0xed,
0x83, 0xe7, 0x21, 0x06, 0xc3, 0x25, 0xee, 0x50, 0x4f, 0xad, 0x1a, 0xff, 0x89, 0xae, 0xc2, 0xb9,
0xc4, 0x84, 0xba, 0x3d, 0x2e, 0x20, 0xa1, 0xf7, 0x0d, 0xab, 0x35, 0xc8, 0x0a, 0xee, 0x1d, 0xa8,
0x3a, 0xd4, 0xeb, 0x8a, 0x25, 0xaa, 0x88, 0x25, 0xd2, 0xcf, 0x6f, 0x8b, 0x7a, 0x62, 0x69, 0x2a,
0x8e, 0xfc, 0x81, 0x5e, 0x82, 0x66, 0x30, 0x64, 0x83, 0x21, 0xeb, 0x4a, 0xbb, 0xd3, 0xae, 0x0a,
0xf6, 0x1a, 0x12, 0x28, 0xcc, 0x12, 0x45, 0x1f, 0x40, 0x93, 0x0a, 0x51, 0x46, 0x8e, 0x79, 0x6d,
0x5a, 0x07, 0xb1, 0x21, 0xe9, 0x94, 0x67, 0xfe, 0x3a, 0xb4, 0x58, 0x88, 0x1f, 0x13, 0x2f, 0x91,
0xb3, 0x03, 0xb1, 0xdb, 0x16, 0x25, 0x7c, 0x94, 0xaf, 0xbb, 0x0e, 0x4b, 0xbd, 0x21, 0x0e, 0xb1,
0xcf, 0x08, 0x49, 0x60, 0xd7, 0x05, 0x36, 0x8a, 0x9b, 0x62, 0x02, 0xf3, 0x43, 0x98, 0xbf, 0xeb,
0x32, 0x21, 0x48, 0x6e, 0xb3, 0x0c, 0x71, 0x0d, 0x12, 0x96, 0xe9, 0x05, 0xa8, 0x86, 0xc1, 0xa1,
0xb4, 0xc1, 0x05, 0xa1, 0x82, 0x95, 0x30, 0x38, 0x14, 0x06, 0x56, 0x54, 0x3a, 0x04, 0xa1, 0xd2,
0xcd, 0x82, 0xa5, 0xbe, 0xcc, 0x3f, 0x37, 0x46, 0xca, 0xc3, 0xcd, 0x27, 0x7d, 0x3a, 0xfb, 0xf9,
0x3e, 0x54, 0x42, 0x49, 0x3f, 0x31, 0x47, 0x9b, 0x1c, 0x49, 0x9c, 0x01, 0x11, 0xd5, 0xf4, 0x09,
0xa0, 0x5f, 0x36, 0xa0, 0xf1, 0x81, 0x37, 0xa4, 0xcf, 0x42, 0xd9, 0x75, 0x69, 0x89, 0xa2, 0x3e,
0x25, 0xf2, 0x9b, 0x05, 0x68, 0x2a, 0x36, 0x66, 0x71, 0x82, 0x72, 0x59, 0xd9, 0x85, 0x3a, 0x1f,
0xb2, 0x4b, 0x49, 0x2f, 0x8a, 0xe9, 0xd4, 0x37, 0x37, 0xb5, 0xe6, 0x21, 0xc5, 0x86, 0x48, 0x83,
0xef, 0x0a, 0xa2, 0x9f, 0xf7, 0x59, 0x78, 0x64, 0x81, 0x1d, 0x03, 0x3a, 0x0f, 0x61, 0x31, 0xd3,
0xcc, 0x95, 0xe8, 0x11, 0x39, 0x8a, 0xec, 0xdf, 0x23, 0x72, 0x84, 0xde, 0x4a, 0x16, 0x2b, 0xe4,
0x9d, 0xe2, 0xf7, 0x02, 0xbf, 0x77, 0x33, 0x0c, 0xf1, 0x91, 0x2a, 0x66, 0x78, 0xb7, 0xf0, 0x65,
0xc3, 0xfc, 0xbb, 0x02, 0x34, 0xbe, 0x36, 0x24, 0xe1, 0xd1, 0x69, 0xda, 0xa1, 0xe8, 0x54, 0x98,
0x4f, 0x9c, 0x0a, 0x63, 0x5b, 0xbf, 0xa4, 0xd9, 0xfa, 0x1a, 0x03, 0x56, 0xd6, 0x1a, 0x30, 0xdd,
0xde, 0xae, 0x9c, 0x68, 0x6f, 0x57, 0x73, 0xf7, 0xf6, 0x9f, 0x19, 0xb1, 0x08, 0x67, 0xda, 0x8d,
0x29, 0x77, 0xac, 0x70, 0x62, 0x77, 0x6c, 0xea, 0xdd, 0xf8, 0x43, 0x03, 0x6a, 0x5f, 0x27, 0x36,
0x0b, 0x42, 0x6e, 0x7f, 0x34, 0x64, 0xc6, 0x14, 0xae, 0x71, 0x21, 0xeb, 0x1a, 0xdf, 0x80, 0xaa,
0xeb, 0x74, 0x31, 0xd7, 0x2f, 0x31, 0xee, 0x24, 0x97, 0xac, 0xe2, 0x3a, 0x42, 0x11, 0xa7, 0x4f,
0x02, 0xfc, 0x8e, 0x01, 0x0d, 0xc9, 0x33, 0x95, 0x94, 0xef, 0x25, 0x86, 0x33, 0x74, 0x4a, 0xaf,
0x3e, 0xe2, 0x89, 0xde, 0x9d, 0x1b, 0x0d, 0x7b, 0x13, 0x80, 0x0b, 0x59, 0x91, 0xcb, 0x3d, 0xb3,
0xae, 0xe5, 0x56, 0x92, 0x0b, 0x81, 0xdf, 0x9d, 0xb3, 0x6a, 0x9c, 0x4a, 0x74, 0x71, 0xab, 0x02,
0x25, 0x41, 0x6d, 0xfe, 0xaf, 0x01, 0x4b, 0xb7, 0xb1, 0x67, 0x6f, 0xb9, 0x94, 0x61, 0xdf, 0x9e,
0xc1, 0x09, 0x7b, 0x17, 0x2a, 0xc1, 0xa0, 0xeb, 0x91, 0x7d, 0xa6, 0x58, 0xba, 0x3c, 0x61, 0x46,
0x52, 0x0c, 0x56, 0x39, 0x18, 0xdc, 0x23, 0xfb, 0x0c, 0x7d, 0x05, 0xaa, 0xc1, 0xa0, 0x1b, 0xba,
0xbd, 0x03, 0xa6, 0xa4, 0x3f, 0x05, 0x71, 0x25, 0x18, 0x58, 0x9c, 0x22, 0x11, 0x5b, 0x99, 0x3f,
0x61, 0x6c, 0xc5, 0xfc, 0x97, 0xb1, 0xe9, 0xcf, 0xb0, 0x07, 0xde, 0x85, 0xaa, 0xeb, 0xb3, 0xae,
0xe3, 0xd2, 0x48, 0x04, 0x17, 0xf5, 0x3a, 0xe4, 0x33, 0x31, 0x03, 0xb1, 0xa6, 0x3e, 0xe3, 0x63,
0xa3, 0xaf, 0x02, 0xec, 0x7b, 0x01, 0x56, 0xd4, 0x52, 0x06, 0x97, 0xf4, 0xdb, 0x87, 0xa3, 0x45,
0xf4, 0x35, 0x41, 0xc4, 0x7b, 0x18, 0x2d, 0xe9, 0x3f, 0x19, 0xb0, 0xb2, 0x43, 0x42, 0x59, 0xca,
0xc2, 0x54, 0x18, 0x74, 0xdb, 0xdf, 0x0f, 0xd2, 0x91, 0x68, 0x23, 0x13, 0x89, 0xfe, 0xe9, 0x44,
0x5f, 0x53, 0x37, 0x27, 0x99, 0x0f, 0x89, 0x6e, 0x4e, 0x51, 0xd6, 0x47, 0xde, 0x3c, 0x17, 0x72,
0x96, 0x49, 0xf1, 0x9b, 0xbc, 0x80, 0x9b, 0xbf, 0x25, 0x2b, 0x30, 0xb4, 0x93, 0x7a, 0x7a, 0x85,
0x5d, 0x05, 0x65, 0xe9, 0x33, 0x76, 0xff, 0x55, 0xc8, 0xd8, 0x8e, 0x1c, 0x43, 0xf4, 0x03, 0x03,
0xd6, 0xf3, 0xb9, 0x9a, 0xe5, 0x88, 0xfe, 0x2a, 0x94, 0x5c, 0x7f, 0x3f, 0x88, 0xc2, 0x6e, 0x57,
0xf4, 0x2e, 0xba, 0x76, 0x5c, 0x49, 0x68, 0xfe, 0x55, 0x01, 0x5a, 0xc2, 0xa8, 0x9f, 0xc2, 0xf2,
0xf7, 0x49, 0xbf, 0x4b, 0xdd, 0x4f, 0x49, 0xb4, 0xfc, 0x7d, 0xd2, 0xdf, 0x75, 0x3f, 0x25, 0x29,
0xcd, 0x28, 0xa5, 0x35, 0x63, 0x72, 0x54, 0x39, 0x19, 0x56, 0xad, 0xa4, 0xc3, 0xaa, 0xab, 0x50,
0xf6, 0x03, 0x87, 0x6c, 0x6f, 0xa9, 0x6b, 0xa7, 0xfa, 0x1a, 0xa9, 0x5a, 0xed, 0x84, 0xaa, 0xf6,
0xb9, 0x01, 0x9d, 0x3b, 0x84, 0x65, 0x65, 0x77, 0x7a, 0x5a, 0xf6, 0x3d, 0x03, 0xce, 0x6b, 0x19,
0x9a, 0x45, 0xc1, 0xde, 0x4b, 0x2b, 0x98, 0xfe, 0x0e, 0x38, 0x36, 0xa4, 0xd2, 0xad, 0x37, 0xa1,
0xb1, 0x35, 0xec, 0xf7, 0x63, 0x97, 0xeb, 0x32, 0x34, 0x42, 0xf9, 0x53, 0x5e, 0x91, 0xe4, 0xf9,
0x5b, 0x57, 0x30, 0x7e, 0x11, 0x32, 0xaf, 0x42, 0x53, 0x91, 0x28, 0xae, 0x3b, 0x50, 0x0d, 0xd5,
0x6f, 0x85, 0x1f, 0x7f, 0x9b, 0x2b, 0xb0, 0x64, 0x91, 0x1e, 0x57, 0xed, 0xf0, 0x9e, 0xeb, 0x3f,
0x52, 0xc3, 0x98, 0xdf, 0x31, 0x60, 0x39, 0x0d, 0x57, 0x7d, 0xbd, 0x0d, 0x15, 0xec, 0x38, 0x21,
0xa1, 0x74, 0xe2, 0xb2, 0xdc, 0x94, 0x38, 0x56, 0x84, 0x9c, 0x90, 0x5c, 0x61, 0x6a, 0xc9, 0x99,
0x5d, 0x38, 0x77, 0x87, 0xb0, 0xfb, 0x84, 0x85, 0x33, 0x65, 0xf0, 0xdb, 0xfc, 0xf2, 0x22, 0x88,
0x95, 0x5a, 0x44, 0x9f, 0xe6, 0x67, 0x06, 0xa0, 0xe4, 0x08, 0xb3, 0x2c, 0x73, 0x52, 0xca, 0x85,
0xb4, 0x94, 0x65, 0x91, 0x53, 0x7f, 0x10, 0xf8, 0xc4, 0x67, 0x49, 0x77, 0xab, 0x19, 0x43, 0x85,
0xfa, 0xfd, 0xd8, 0x00, 0x74, 0x2f, 0xc0, 0xce, 0x2d, 0xec, 0xcd, 0xe6, 0x1e, 0x5c, 0x04, 0xa0,
0xa1, 0xdd, 0x55, 0xbb, 0xb5, 0xa0, 0xac, 0x4f, 0x68, 0x3f, 0x90, 0x1b, 0xf6, 0x12, 0xd4, 0x1d,
0xca, 0x54, 0x73, 0x94, 0x50, 0x06, 0x87, 0x32, 0xd9, 0x2e, 0x8a, 0x58, 0x29, 0xc1, 0x1e, 0x71,
0xba, 0x89, 0x7c, 0xdc, 0xbc, 0x40, 0x6b, 0xc9, 0x86, 0xdd, 0x51, 0x56, 0xee, 0x21, 0xac, 0xdd,
0xc7, 0xfe, 0x10, 0x7b, 0xb7, 0x83, 0xfe, 0x00, 0xa7, 0x0a, 0x1b, 0xb3, 0x66, 0xce, 0xd0, 0x98,
0xb9, 0x17, 0x65, 0xe5, 0x9b, 0x74, 0xad, 0x05, 0xaf, 0xf3, 0x56, 0x02, 0x62, 0x52, 0x68, 0x8f,
0x77, 0x3f, 0xcb, 0x42, 0x09, 0xa6, 0xa2, 0xae, 0x92, 0xb6, 0x77, 0x04, 0x33, 0xdf, 0x87, 0x17,
0x44, 0x15, 0x62, 0x04, 0x4a, 0x85, 0xf6, 0xb3, 0x1d, 0x18, 0x9a, 0x0e, 0x7e, 0xb5, 0x20, 0x4c,
0xdb, 0x58, 0x0f, 0xb3, 0x30, 0xfe, 0x6e, 0x3a, 0xa2, 0xfe, 0x72, 0x4e, 0xa5, 0x6d, 0x7a, 0x44,
0x15, 0x56, 0xdf, 0x80, 0x45, 0xf2, 0x84, 0xd8, 0x43, 0xe6, 0xfa, 0xbd, 0x1d, 0x0f, 0xfb, 0x0f,
0x02, 0x75, 0xa0, 0x64, 0xc1, 0xe8, 0x65, 0x68, 0x72, 0xe9, 0x07, 0x43, 0xa6, 0xf0, 0xe4, 0xc9,
0x92, 0x06, 0xf2, 0xfe, 0xf8, 0x7c, 0x3d, 0xc2, 0x88, 0xa3, 0xf0, 0xe4, 0x31, 0x93, 0x05, 0x8f,
0x89, 0x92, 0x83, 0xe9, 0x49, 0x44, 0xf9, 0x6f, 0x46, 0x46, 0x94, 0xaa, 0x87, 0xd3, 0x12, 0xe5,
0x5d, 0x80, 0x3e, 0x09, 0x7b, 0x64, 0x5b, 0x18, 0x75, 0x79, 0x73, 0xdf, 0xd0, 0x1a, 0xf5, 0x51,
0x07, 0xf7, 0x23, 0x02, 0x2b, 0x41, 0x6b, 0xde, 0x81, 0x25, 0x0d, 0x0a, 0xb7, 0x57, 0x34, 0x18,
0x86, 0x36, 0x89, 0x82, 0x3f, 0xd1, 0x27, 0x3f, 0xdf, 0x18, 0x0e, 0x7b, 0x84, 0x29, 0xa5, 0x55,
0x5f, 0xe6, 0xdb, 0x22, 0x09, 0x25, 0x02, 0x05, 0x29, 0x4d, 0x4d, 0x27, 0xd4, 0x8d, 0xb1, 0x84,
0xfa, 0xbe, 0xc8, 0xf8, 0x24, 0xe9, 0x66, 0x2c, 0x86, 0xd8, 0xe7, 0x5d, 0x11, 0x47, 0xbd, 0xb2,
0x88, 0x3e, 0xcd, 0xff, 0x31, 0xa0, 0xb9, 0xdd, 0x1f, 0x04, 0xa3, 0x64, 0xc7, 0xd4, 0x57, 0xc9,
0xf1, 0x60, 0x71, 0x41, 0x17, 0x2c, 0x7e, 0x09, 0x9a, 0xe9, 0x1a, 0x7d, 0x19, 0xd7, 0x69, 0xd8,
0xc9, 0xda, 0xfc, 0xf3, 0x50, 0x0b, 0x83, 0xc3, 0x2e, 0x37, 0x91, 0x8e, 0x2a, 0xbb, 0xa8, 0x86,
0xc1, 0x21, 0x37, 0x9c, 0x0e, 0x5a, 0x86, 0xd2, 0xbe, 0xeb, 0xc5, 0x15, 0x43, 0xf2, 0x03, 0xbd,
0xc7, 0x2f, 0x5a, 0x32, 0x2d, 0x5b, 0x9e, 0xf6, 0xbe, 0x13, 0x51, 0x98, 0x9f, 0xc0, 0x42, 0x34,
0xeb, 0x19, 0x9f, 0x97, 0x30, 0x4c, 0x1f, 0x45, 0x15, 0x11, 0xf2, 0xc3, 0xbc, 0x2a, 0xb3, 0x75,
0xa2, 0xff, 0xd4, 0xa2, 0x23, 0x98, 0xe7, 0x18, 0x6a, 0x2f, 0x89, 0xdf, 0x7c, 0x01, 0x56, 0xb3,
0xd8, 0xb3, 0xb0, 0xf4, 0x76, 0x7a, 0xff, 0xe8, 0x5f, 0x10, 0x24, 0x47, 0x53, 0x7b, 0x47, 0xad,
0x80, 0x1d, 0x0c, 0x7d, 0xa6, 0x0c, 0x10, 0x5f, 0x81, 0xdb, 0xfc, 0x1b, 0xad, 0x41, 0xc5, 0x75,
0xba, 0x1e, 0xbf, 0x93, 0xc9, 0xb3, 0xa6, 0xec, 0x3a, 0xf7, 0xf8, 0x7d, 0xed, 0x9d, 0xc8, 0x83,
0x9a, 0xba, 0x8c, 0x42, 0x79, 0x4f, 0xdf, 0x97, 0xe7, 0xbb, 0x25, 0x4b, 0xf1, 0x9f, 0x71, 0xb1,
0xcc, 0x06, 0xb4, 0x0e, 0x5d, 0x76, 0xd0, 0x15, 0x6f, 0x31, 0xc4, 0xe1, 0x2a, 0xf3, 0xc5, 0x55,
0x6b, 0x81, 0xc3, 0x77, 0x39, 0x98, 0x1f, 0xb0, 0xd4, 0xfc, 0x35, 0x03, 0x96, 0x52, 0x6c, 0xcd,
0xb2, 0x14, 0x5f, 0xe1, 0x7e, 0x87, 0xec, 0x48, 0x79, 0x98, 0xeb, 0x5a, 0x63, 0xa4, 0x46, 0x13,
0x46, 0x28, 0xa6, 0x30, 0xff, 0xdd, 0x80, 0x7a, 0xa2, 0x85, 0x5f, 0x5b, 0x54, 0xdb, 0xe8, 0xda,
0x12, 0x03, 0xa6, 0x12, 0xc3, 0x4b, 0x30, 0xda, 0x9a, 0x89, 0x7a, 0xee, 0x44, 0xbd, 0x9a, 0x43,
0xd1, 0x5d, 0x58, 0x90, 0x62, 0x8a, 0x59, 0xd7, 0x46, 0x13, 0xe2, 0x4a, 0x3c, 0x1c, 0x3a, 0x8a,
0x4b, 0xab, 0x49, 0x13, 0x5f, 0x32, 0x79, 0x18, 0x38, 0x44, 0x8c, 0x54, 0x92, 0xd6, 0x52, 0xf8,
0x35, 0x0e, 0xe5, 0xd7, 0x8b, 0x46, 0x92, 0x94, 0xbb, 0x68, 0x1e, 0xc1, 0x0e, 0x09, 0xe3, 0xb9,
0xc5, 0xdf, 0xdc, 0x27, 0x92, 0xbf, 0xbb, 0xdc, 0x65, 0x55, 0x46, 0x06, 0x24, 0x88, 0x7b, 0xb3,
0xe8, 0x55, 0x58, 0x74, 0xfa, 0xa9, 0x87, 0x40, 0x91, 0x13, 0xe7, 0xf4, 0x13, 0x2f, 0x80, 0x52,
0x0c, 0xcd, 0xa7, 0x19, 0xfa, 0x6f, 0x23, 0x7e, 0x1e, 0x19, 0x12, 0x87, 0xf8, 0xcc, 0xc5, 0xde,
0xd3, 0xeb, 0x64, 0x07, 0xaa, 0x43, 0x4a, 0xc2, 0x84, 0x4d, 0x8c, 0xbf, 0x79, 0xdb, 0x00, 0x53,
0x7a, 0x18, 0x84, 0x8e, 0xe2, 0x32, 0xfe, 0x9e, 0x50, 0xfc, 0x27, 0x9f, 0xde, 0xe9, 0x8b, 0xff,
0xde, 0x86, 0xb5, 0x7e, 0xe0, 0xb8, 0xfb, 0xae, 0xae, 0x66, 0x90, 0x93, 0xad, 0x44, 0xcd, 0x29,
0x3a, 0xf3, 0x07, 0x05, 0x58, 0xfb, 0x78, 0xe0, 0xfc, 0x0c, 0xe6, 0xbc, 0x0e, 0xf5, 0xc0, 0x73,
0x76, 0xd2, 0xd3, 0x4e, 0x82, 0x38, 0x86, 0x4f, 0x0e, 0x63, 0x0c, 0x19, 0x42, 0x4e, 0x82, 0x26,
0x16, 0x46, 0x3e, 0x95, 0x6c, 0xca, 0x93, 0x64, 0xd3, 0x83, 0x35, 0x99, 0x8d, 0x7d, 0xc6, 0xa2,
0x31, 0x7f, 0x09, 0x56, 0xb8, 0x21, 0xe5, 0xc3, 0x7c, 0x4c, 0x49, 0x38, 0xa3, 0xc5, 0xb9, 0x00,
0xb5, 0xa8, 0xe7, 0xa8, 0x66, 0x75, 0x04, 0x30, 0xef, 0xc2, 0x72, 0x66, 0xac, 0xa7, 0x9c, 0xd1,
0x95, 0xcb, 0x50, 0x8d, 0x6a, 0x70, 0x51, 0x05, 0x8a, 0x37, 0x3d, 0xaf, 0x35, 0x87, 0x1a, 0x50,
0xdd, 0x56, 0x85, 0xa6, 0x2d, 0xe3, 0xca, 0xcf, 0xc1, 0x62, 0x26, 0x57, 0x8b, 0xaa, 0x30, 0xff,
0x20, 0xf0, 0x49, 0x6b, 0x0e, 0xb5, 0xa0, 0x71, 0xcb, 0xf5, 0x71, 0x78, 0x24, 0x23, 0x99, 0x2d,
0x07, 0x2d, 0x42, 0x5d, 0x44, 0xf4, 0x14, 0x80, 0x6c, 0xfe, 0xe4, 0x65, 0x68, 0xde, 0x17, 0x8c,
0xec, 0x92, 0xf0, 0xb1, 0x6b, 0x13, 0xd4, 0x85, 0x56, 0xf6, 0x05, 0x33, 0xfa, 0x82, 0xde, 0xbb,
0xd3, 0x3f, 0x74, 0xee, 0x4c, 0x92, 0xa1, 0x39, 0x87, 0x3e, 0x81, 0x85, 0xf4, 0x3b, 0x60, 0xa4,
0x0f, 0x39, 0x69, 0x1f, 0x0b, 0x1f, 0xd7, 0x79, 0x17, 0x9a, 0xa9, 0x67, 0xbd, 0xe8, 0x75, 0x6d,
0xdf, 0xba, 0xa7, 0xbf, 0x1d, 0xbd, 0xed, 0x4d, 0x3e, 0xbd, 0x95, 0xdc, 0xa7, 0xdf, 0xde, 0xe5,
0x70, 0xaf, 0x7d, 0xa0, 0x77, 0x1c, 0xf7, 0x18, 0xce, 0x8d, 0xbd, 0x91, 0x43, 0x6f, 0xe4, 0x9c,
0x66, 0xfa, 0xb7, 0x74, 0xc7, 0x0d, 0x71, 0x08, 0x68, 0xfc, 0xf9, 0x2a, 0xba, 0xa6, 0x5f, 0x81,
0xbc, 0xc7, 0xbb, 0x9d, 0xeb, 0x53, 0xe3, 0xc7, 0x82, 0xfb, 0x15, 0x03, 0xd6, 0x72, 0x1e, 0xb6,
0xa1, 0x1b, 0xda, 0xee, 0x26, 0xbf, 0xce, 0xeb, 0xbc, 0x75, 0x32, 0xa2, 0x98, 0x11, 0x1f, 0x16,
0x33, 0x6f, 0xbd, 0xd0, 0xd5, 0xdc, 0xfa, 0xf7, 0xf1, 0x47, 0x6f, 0x9d, 0x2f, 0x4c, 0x87, 0x1c,
0x8f, 0xf7, 0x10, 0x16, 0x33, 0x0f, 0xa4, 0x72, 0xc6, 0xd3, 0x3f, 0xa3, 0x3a, 0x6e, 0x41, 0xbf,
0x09, 0xcd, 0xd4, 0x4b, 0xa6, 0x1c, 0x8d, 0xd7, 0xbd, 0x76, 0x3a, 0xae, 0xeb, 0x87, 0xd0, 0x48,
0x3e, 0x38, 0x42, 0x1b, 0x79, 0x7b, 0x69, 0xac, 0xe3, 0x93, 0x6c, 0xa5, 0xd1, 0x7b, 0x82, 0x09,
0x5b, 0x69, 0xec, 0x09, 0xc6, 0xf4, 0x5b, 0x29, 0xd1, 0xff, 0xc4, 0xad, 0x74, 0xe2, 0x21, 0xbe,
0x23, 0xef, 0x14, 0x9a, 0xf7, 0x2a, 0x68, 0x33, 0x4f, 0x37, 0xf3, 0x5f, 0xe6, 0x74, 0x6e, 0x9c,
0x88, 0x26, 0x96, 0xe2, 0x23, 0x58, 0x48, 0xbf, 0xca, 0xc8, 0x91, 0xa2, 0xf6, 0x21, 0x4b, 0xe7,
0xea, 0x54, 0xb8, 0xf1, 0x60, 0x1f, 0x43, 0x3d, 0xf1, 0xa7, 0x24, 0xe8, 0xb5, 0x09, 0x7a, 0x9c,
0xfc, 0x87, 0x8e, 0xe3, 0x24, 0xf9, 0x35, 0xa8, 0xc5, 0xff, 0x25, 0x82, 0x5e, 0xc9, 0xd5, 0xdf,
0x93, 0x74, 0xb9, 0x0b, 0x30, 0xfa, 0xa3, 0x10, 0xf4, 0xaa, 0xb6, 0xcf, 0xb1, 0x7f, 0x12, 0x39,
0xae, 0xd3, 0x78, 0xfa, 0xb2, 0xd8, 0x6d, 0xd2, 0xf4, 0x93, 0xd5, 0x99, 0xc7, 0x75, 0x7b, 0x00,
0xcd, 0x54, 0x4d, 0x75, 0xde, 0x16, 0xd6, 0x94, 0xba, 0x77, 0xae, 0x4c, 0x83, 0x1a, 0xaf, 0xdf,
0x01, 0x34, 0x53, 0x15, 0xae, 0x39, 0x23, 0xe9, 0x0a, 0x7a, 0x73, 0x46, 0xd2, 0x16, 0xcc, 0x9a,
0x73, 0xe8, 0xdb, 0x89, 0x62, 0xda, 0x54, 0xc1, 0x32, 0x7a, 0x73, 0x62, 0x3f, 0xba, 0x7a, 0xed,
0xce, 0xe6, 0x49, 0x48, 0x62, 0x16, 0x94, 0x56, 0x49, 0x91, 0xe6, 0x6b, 0xd5, 0x49, 0x56, 0x6a,
0x17, 0xca, 0xb2, 0x66, 0x15, 0x99, 0x39, 0xd5, 0xe9, 0x89, 0x82, 0xd6, 0xce, 0x4b, 0x5a, 0x9c,
0x74, 0x39, 0xa7, 0xec, 0x54, 0x7a, 0xc1, 0x39, 0x9d, 0xa6, 0x0a, 0x16, 0xa7, 0xed, 0xd4, 0x82,
0xb2, 0x2c, 0x46, 0xca, 0xe9, 0x34, 0x55, 0x50, 0xd7, 0x99, 0x8c, 0x23, 0xb2, 0xd4, 0xe6, 0x1c,
0xda, 0x81, 0x92, 0x08, 0x95, 0xa1, 0xcb, 0x93, 0xea, 0x74, 0x26, 0xf5, 0x98, 0x2a, 0xe5, 0x31,
0xe7, 0xd0, 0x2f, 0x40, 0x49, 0x24, 0x7e, 0x72, 0x7a, 0x4c, 0x16, 0xdb, 0x74, 0x26, 0xa2, 0x44,
0x2c, 0x3a, 0xd0, 0x48, 0x66, 0xd8, 0x73, 0x8e, 0x2c, 0x4d, 0x0d, 0x42, 0x67, 0x1a, 0xcc, 0x68,
0x14, 0xb9, 0x8d, 0x46, 0x61, 0xc3, 0xfc, 0x6d, 0x34, 0x16, 0x92, 0xcc, 0xdf, 0x46, 0xe3, 0x51,
0x48, 0x73, 0x0e, 0xfd, 0xba, 0x01, 0xed, 0xbc, 0xb4, 0x2f, 0xca, 0xf5, 0x80, 0x26, 0xe5, 0xae,
0x3b, 0x5f, 0x3a, 0x21, 0x55, 0xcc, 0xcb, 0xa7, 0x22, 0x68, 0x33, 0x96, 0xe8, 0xbd, 0x9e, 0xd7,
0x5f, 0x4e, 0x5a, 0xb3, 0xf3, 0xc5, 0xe9, 0x09, 0xe2, 0xb1, 0xf7, 0xa0, 0x9e, 0x08, 0x18, 0xe5,
0x58, 0xde, 0xf1, 0x48, 0x57, 0xce, 0xaa, 0x6a, 0x62, 0x4f, 0x52, 0xbd, 0x45, 0xde, 0x30, 0x47,
0x19, 0x93, 0x69, 0xc8, 0x1c, 0xf5, 0x4e, 0xa5, 0x1d, 0xcd, 0x39, 0x44, 0xa0, 0x91, 0x4c, 0x22,
0xe6, 0x68, 0xa3, 0x26, 0xff, 0xd8, 0x79, 0x7d, 0x0a, 0xcc, 0x78, 0x98, 0x2e, 0xc0, 0x28, 0x89,
0x97, 0x73, 0xd6, 0x8d, 0xe5, 0x11, 0x3b, 0xaf, 0x1d, 0x8b, 0x97, 0x3c, 0xf6, 0x13, 0x69, 0xb9,
0x1c, 0xe9, 0x8f, 0x27, 0xee, 0xa6, 0xb8, 0x8b, 0x8c, 0xa7, 0x88, 0x72, 0xee, 0x22, 0xb9, 0xd9,
0xa8, 0xce, 0xf5, 0xa9, 0xf1, 0xe3, 0xf9, 0x7c, 0x0b, 0x5a, 0xd9, 0x94, 0x5a, 0xce, 0x1d, 0x37,
0x27, 0xb1, 0xd7, 0x79, 0x63, 0x4a, 0xec, 0xe4, 0x79, 0x78, 0x7e, 0x9c, 0xa7, 0x6f, 0xb8, 0xec,
0x40, 0x64, 0x73, 0xa6, 0x99, 0x75, 0x32, 0x71, 0x34, 0xcd, 0xac, 0x53, 0x69, 0x22, 0x75, 0x78,
0x89, 0x88, 0x74, 0xde, 0xe1, 0x95, 0x4c, 0x50, 0xe4, 0x9c, 0x33, 0xe9, 0x70, 0xbe, 0x74, 0x3f,
0xd3, 0x71, 0x75, 0x94, 0xef, 0x27, 0x8c, 0x85, 0xea, 0x73, 0xdc, 0x4f, 0x7d, 0xa0, 0x5e, 0x28,
0x7a, 0x2b, 0x1b, 0x3e, 0x9c, 0x1c, 0x9b, 0xc8, 0x86, 0x95, 0x8e, 0x0f, 0x1f, 0xb4, 0xb2, 0xb1,
0xba, 0x9c, 0x01, 0x72, 0x42, 0x7a, 0x53, 0x0c, 0x90, 0x8d, 0x78, 0xe5, 0x0c, 0x90, 0x13, 0x18,
0x9b, 0xc2, 0x97, 0x4c, 0x45, 0x9f, 0x72, 0x8e, 0x26, 0x5d, 0x84, 0x2a, 0xe7, 0x68, 0xd2, 0x06,
0xce, 0xcc, 0xb9, 0xcd, 0x21, 0x34, 0x76, 0xc2, 0xe0, 0xc9, 0x51, 0x14, 0x38, 0xfa, 0xd9, 0x18,
0xbb, 0x5b, 0xdf, 0x80, 0x05, 0x37, 0xc6, 0xe9, 0x85, 0x03, 0xfb, 0x56, 0x5d, 0x06, 0xb0, 0x76,
0x38, 0xf1, 0x8e, 0xf1, 0x8b, 0x37, 0x7a, 0x2e, 0x3b, 0x18, 0xee, 0x71, 0xc9, 0x5c, 0x97, 0x68,
0x6f, 0xb8, 0x81, 0xfa, 0x75, 0xdd, 0xf5, 0x19, 0x09, 0x7d, 0xec, 0x5d, 0x17, 0x43, 0x29, 0xe8,
0x60, 0xef, 0x0f, 0x0c, 0x63, 0xaf, 0x2c, 0x40, 0x37, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc5,
0x6f, 0xa3, 0xd8, 0xb9, 0x51, 0x00, 0x00,
// 4255 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3c, 0x4d, 0x6f, 0x1c, 0xc9,
0x75, 0xec, 0x19, 0xce, 0xd7, 0x9b, 0x19, 0x72, 0x54, 0xfc, 0x9a, 0x1d, 0x49, 0x2b, 0xaa, 0xf7,
0x8b, 0x2b, 0x79, 0x25, 0x2f, 0xb5, 0xde, 0x75, 0x76, 0x9d, 0xac, 0x25, 0x31, 0x2b, 0x11, 0x2b,
0x29, 0x74, 0x73, 0xd7, 0x86, 0xb3, 0x10, 0x06, 0xc5, 0xee, 0xe2, 0xb0, 0xa3, 0x9e, 0xee, 0x71,
0x57, 0x8d, 0x28, 0xee, 0xc9, 0x80, 0x83, 0x7c, 0xc0, 0x9b, 0x35, 0x82, 0x18, 0x49, 0x7c, 0x48,
0x10, 0xe4, 0xe3, 0x90, 0x43, 0x82, 0xd8, 0x01, 0x12, 0x23, 0x97, 0xe4, 0x90, 0x43, 0x0e, 0x01,
0xf2, 0x71, 0x09, 0x02, 0x5f, 0xf2, 0x07, 0x72, 0x08, 0x90, 0x63, 0x0e, 0x41, 0x7d, 0x74, 0x4f,
0x77, 0x4f, 0xf5, 0x70, 0xa8, 0xb1, 0x4c, 0xea, 0x36, 0xfd, 0xea, 0xbd, 0xaa, 0x57, 0xaf, 0x5e,
0xbd, 0x7a, 0xf5, 0xde, 0xab, 0x81, 0x46, 0xdf, 0xf5, 0x1e, 0x0f, 0xe9, 0xb5, 0x41, 0x18, 0xb0,
0x00, 0x2d, 0x25, 0xbf, 0xae, 0xc9, 0x8f, 0x4e, 0xc3, 0x0e, 0xfa, 0xfd, 0xc0, 0x97, 0xc0, 0x4e,
0x83, 0xda, 0x07, 0xa4, 0x8f, 0xe5, 0x97, 0xf9, 0x87, 0x06, 0xa0, 0xdb, 0x21, 0xc1, 0x8c, 0xdc,
0xf4, 0x5c, 0x4c, 0x2d, 0xf2, 0xad, 0x21, 0xa1, 0x0c, 0x7d, 0x11, 0xe6, 0xf7, 0x30, 0x25, 0x6d,
0x63, 0xdd, 0xd8, 0xa8, 0x6f, 0x5e, 0xb8, 0x96, 0xea, 0x56, 0x75, 0x77, 0x9f, 0xf6, 0x6e, 0x61,
0x4a, 0x2c, 0x81, 0x89, 0xd6, 0xa0, 0xe2, 0xec, 0x75, 0x7d, 0xdc, 0x27, 0xed, 0xc2, 0xba, 0xb1,
0x51, 0xb3, 0xca, 0xce, 0xde, 0x03, 0xdc, 0x27, 0xe8, 0x35, 0x58, 0xb4, 0x03, 0xcf, 0x23, 0x36,
0x73, 0x03, 0x5f, 0x22, 0x14, 0x05, 0xc2, 0xc2, 0x08, 0x2c, 0x10, 0x97, 0xa1, 0x84, 0x39, 0x0f,
0xed, 0x79, 0xd1, 0x2c, 0x3f, 0x4c, 0x0a, 0xad, 0xad, 0x30, 0x18, 0x3c, 0x2b, 0xee, 0xe2, 0x41,
0x8b, 0xc9, 0x41, 0xff, 0xc0, 0x80, 0x73, 0x37, 0x3d, 0x46, 0xc2, 0x33, 0x2a, 0x94, 0xdf, 0x2f,
0xc0, 0x9a, 0x5c, 0xb5, 0xdb, 0x31, 0xfa, 0x69, 0x72, 0xb9, 0x0a, 0x65, 0xa9, 0x55, 0x82, 0xcd,
0x86, 0xa5, 0xbe, 0xd0, 0x45, 0x00, 0x7a, 0x80, 0x43, 0x87, 0x76, 0xfd, 0x61, 0xbf, 0x5d, 0x5a,
0x37, 0x36, 0x4a, 0x56, 0x4d, 0x42, 0x1e, 0x0c, 0xfb, 0xc8, 0x82, 0x73, 0x76, 0xe0, 0x53, 0x97,
0x32, 0xe2, 0xdb, 0x47, 0x5d, 0x8f, 0x3c, 0x26, 0x5e, 0xbb, 0xbc, 0x6e, 0x6c, 0x2c, 0x6c, 0xbe,
0xa2, 0xe5, 0xfb, 0xf6, 0x08, 0xfb, 0x1e, 0x47, 0xb6, 0x5a, 0x76, 0x06, 0x62, 0x7e, 0xd7, 0x80,
0x15, 0xae, 0x30, 0x67, 0x42, 0x30, 0xe6, 0x9f, 0x1b, 0xb0, 0x7c, 0x17, 0xd3, 0xb3, 0xb1, 0x4a,
0x17, 0x01, 0x98, 0xdb, 0x27, 0x5d, 0xca, 0x70, 0x7f, 0x20, 0x56, 0x6a, 0xde, 0xaa, 0x71, 0xc8,
0x2e, 0x07, 0x98, 0xdf, 0x84, 0xc6, 0xad, 0x20, 0xf0, 0x2c, 0x42, 0x07, 0x81, 0x4f, 0x09, 0xba,
0x01, 0x65, 0xca, 0x30, 0x1b, 0x52, 0xc5, 0xe4, 0x79, 0x2d, 0x93, 0xbb, 0x02, 0xc5, 0x52, 0xa8,
0x5c, 0x5f, 0x1f, 0x63, 0x6f, 0x28, 0x79, 0xac, 0x5a, 0xf2, 0xc3, 0xfc, 0x04, 0x16, 0x76, 0x59,
0xe8, 0xfa, 0xbd, 0x9f, 0x62, 0xe7, 0xb5, 0xa8, 0xf3, 0x7f, 0x37, 0xe0, 0x85, 0x2d, 0x42, 0xed,
0xd0, 0xdd, 0x3b, 0x23, 0xdb, 0xc1, 0x84, 0xc6, 0x08, 0xb2, 0xbd, 0x25, 0x44, 0x5d, 0xb4, 0x52,
0xb0, 0xcc, 0x62, 0x94, 0xb2, 0x8b, 0xf1, 0xed, 0x12, 0x74, 0x74, 0x93, 0x9a, 0x45, 0x7c, 0x3f,
0x1f, 0xef, 0xd2, 0x82, 0x20, 0xca, 0xec, 0x31, 0x75, 0x2e, 0x8c, 0x46, 0xdb, 0x15, 0x80, 0x78,
0x33, 0x67, 0x67, 0x55, 0xd4, 0xcc, 0x6a, 0x13, 0x56, 0x1e, 0xbb, 0x21, 0x1b, 0x62, 0xaf, 0x6b,
0x1f, 0x60, 0xdf, 0x27, 0x9e, 0x90, 0x13, 0x37, 0x5f, 0xc5, 0x8d, 0x9a, 0xb5, 0xa4, 0x1a, 0x6f,
0xcb, 0x36, 0x2e, 0x2c, 0x8a, 0xde, 0x82, 0xd5, 0xc1, 0xc1, 0x11, 0x75, 0xed, 0x31, 0xa2, 0x92,
0x20, 0x5a, 0x8e, 0x5a, 0x53, 0x54, 0x57, 0xe1, 0x9c, 0x2d, 0x2c, 0xa0, 0xd3, 0xe5, 0x52, 0x93,
0x62, 0x2c, 0x0b, 0x31, 0xb6, 0x54, 0xc3, 0x47, 0x11, 0x9c, 0xb3, 0x15, 0x21, 0x0f, 0x99, 0x9d,
0x20, 0xa8, 0x08, 0x82, 0x25, 0xd5, 0xf8, 0x31, 0xb3, 0x47, 0x34, 0x69, 0xdb, 0x55, 0xcd, 0xda,
0xae, 0x36, 0x54, 0x84, 0x2d, 0x26, 0xb4, 0x5d, 0x13, 0x6c, 0x46, 0x9f, 0x68, 0x1b, 0x16, 0x29,
0xc3, 0x21, 0xeb, 0x0e, 0x02, 0xea, 0x72, 0xb9, 0xd0, 0x36, 0xac, 0x17, 0x37, 0xea, 0x9b, 0xeb,
0xda, 0x45, 0xfa, 0x90, 0x1c, 0x6d, 0x61, 0x86, 0x77, 0xb0, 0x1b, 0x5a, 0x0b, 0x82, 0x70, 0x27,
0xa2, 0xd3, 0x1b, 0xc8, 0xfa, 0x4c, 0x06, 0x52, 0xa7, 0xc5, 0x0d, 0xad, 0xed, 0xfa, 0x91, 0x01,
0x2b, 0xf7, 0x02, 0xec, 0x9c, 0x8d, 0x3d, 0xf5, 0x0a, 0x2c, 0x84, 0x64, 0xe0, 0xb9, 0x36, 0xe6,
0xeb, 0xb1, 0x47, 0x42, 0xb1, 0xab, 0x4a, 0x56, 0x53, 0x41, 0x1f, 0x08, 0xa0, 0xf9, 0xb9, 0x01,
0x6d, 0x8b, 0x78, 0x04, 0xd3, 0xb3, 0x61, 0x0b, 0xcc, 0xef, 0x1b, 0xf0, 0xe2, 0x1d, 0xc2, 0x12,
0xbb, 0x8a, 0x61, 0xe6, 0x52, 0xe6, 0xda, 0xa7, 0xe9, 0x57, 0x98, 0xdf, 0x33, 0xe0, 0x52, 0x2e,
0x5b, 0xb3, 0x18, 0x99, 0x77, 0xa0, 0xc4, 0x7f, 0xd1, 0x76, 0x41, 0xe8, 0xfc, 0xe5, 0x3c, 0x9d,
0xff, 0x3a, 0xb7, 0xdd, 0x42, 0xe9, 0x25, 0xbe, 0xf9, 0x5f, 0x06, 0xac, 0xee, 0x1e, 0x04, 0x87,
0x23, 0x96, 0x9e, 0x85, 0x80, 0xd2, 0x66, 0xb7, 0x98, 0x31, 0xbb, 0xe8, 0x4d, 0x98, 0x67, 0x47,
0x03, 0x22, 0x74, 0x6b, 0x61, 0xf3, 0xe2, 0x35, 0x8d, 0x3b, 0x7d, 0x8d, 0x33, 0xf9, 0xd1, 0xd1,
0x80, 0x58, 0x02, 0x15, 0xbd, 0x0e, 0xad, 0x8c, 0xc8, 0x23, 0xc3, 0xb5, 0x98, 0x96, 0x39, 0x35,
0x7f, 0x5c, 0x80, 0xb5, 0xb1, 0x29, 0xce, 0x22, 0x6c, 0xdd, 0xd8, 0x05, 0xed, 0xd8, 0x7c, 0xff,
0x24, 0x50, 0x5d, 0x87, 0x7b, 0xbc, 0xc5, 0x8d, 0xa2, 0xd5, 0x4c, 0xd8, 0x6f, 0x87, 0xa2, 0x37,
0x00, 0x8d, 0x99, 0x55, 0x69, 0xbd, 0xe7, 0xad, 0x73, 0x59, 0xbb, 0x2a, 0x6c, 0xb7, 0xd6, 0xb0,
0x4a, 0x11, 0xcc, 0x5b, 0xcb, 0x1a, 0xcb, 0x4a, 0xd1, 0x9b, 0xb0, 0xec, 0xfa, 0xf7, 0x49, 0x3f,
0x08, 0x8f, 0xba, 0x03, 0x12, 0xda, 0xc4, 0x67, 0xb8, 0x47, 0x68, 0xbb, 0x2c, 0x38, 0x5a, 0x8a,
0xda, 0x76, 0x46, 0x4d, 0xe6, 0x5f, 0x1b, 0xb0, 0x2a, 0x3d, 0xde, 0x1d, 0x1c, 0x32, 0xf7, 0x0c,
0x58, 0xa3, 0x41, 0xc4, 0x87, 0xc4, 0x93, 0xfe, 0x79, 0x33, 0x86, 0x8a, 0x5d, 0xf6, 0x43, 0x03,
0x96, 0xb9, 0x33, 0xfa, 0x3c, 0xf1, 0xfc, 0x57, 0x06, 0x2c, 0xdd, 0xc5, 0xf4, 0x79, 0x62, 0xf9,
0x27, 0xea, 0xa4, 0x8a, 0x79, 0x3e, 0xd5, 0x2b, 0xdb, 0x6b, 0xb0, 0x98, 0x66, 0x3a, 0xf2, 0x7e,
0x16, 0x52, 0x5c, 0x53, 0xcd, 0x91, 0x56, 0xd2, 0x1d, 0x69, 0x7f, 0x3b, 0x3a, 0xd2, 0x9e, 0xaf,
0x09, 0x9a, 0x7f, 0x67, 0xc0, 0xc5, 0x3b, 0x84, 0xc5, 0x5c, 0x9f, 0x89, 0xa3, 0x6f, 0x5a, 0xa5,
0xfa, 0x5c, 0x1e, 0xdc, 0x5a, 0xe6, 0x4f, 0xe5, 0x80, 0xfc, 0x6e, 0x01, 0x56, 0xf8, 0xe9, 0x71,
0x36, 0x94, 0x60, 0x9a, 0x3b, 0x8e, 0x46, 0x51, 0x4a, 0xda, 0x9d, 0x10, 0x1d, 0xbb, 0xe5, 0xa9,
0x8f, 0x5d, 0xf3, 0x47, 0x05, 0xe9, 0x2e, 0x24, 0xa5, 0x31, 0xcb, 0xb2, 0x68, 0x78, 0x2d, 0x68,
0x79, 0x35, 0xa1, 0x11, 0x43, 0xb6, 0xb7, 0xa2, 0x63, 0x34, 0x05, 0x3b, 0xb3, 0xa7, 0xe8, 0x67,
0x06, 0xac, 0x46, 0xb7, 0xca, 0x5d, 0xd2, 0xeb, 0x13, 0x9f, 0x3d, 0xbd, 0x0e, 0x65, 0x35, 0xa0,
0xa0, 0xd1, 0x80, 0x0b, 0x50, 0xa3, 0x72, 0x9c, 0xf8, 0xc2, 0x38, 0x02, 0x98, 0x7f, 0x6f, 0xc0,
0xda, 0x18, 0x3b, 0xb3, 0x2c, 0x62, 0x1b, 0x2a, 0xae, 0xef, 0x90, 0x27, 0x31, 0x37, 0xd1, 0x27,
0x6f, 0xd9, 0x1b, 0xba, 0x9e, 0x13, 0xb3, 0x11, 0x7d, 0xa2, 0xcb, 0xd0, 0x20, 0x3e, 0xde, 0xf3,
0x48, 0x57, 0xe0, 0x0a, 0x45, 0xae, 0x5a, 0x75, 0x09, 0xdb, 0xe6, 0x20, 0x4e, 0xbc, 0xef, 0x12,
0x41, 0x5c, 0x92, 0xc4, 0xea, 0xd3, 0xfc, 0x2d, 0x03, 0x96, 0xb8, 0x16, 0x2a, 0xee, 0xe9, 0xb3,
0x95, 0xe6, 0x3a, 0xd4, 0x13, 0x6a, 0xa6, 0x26, 0x92, 0x04, 0x99, 0x8f, 0x60, 0x39, 0xcd, 0xce,
0x2c, 0xd2, 0x7c, 0x11, 0x20, 0x5e, 0x2b, 0xb9, 0x1b, 0x8a, 0x56, 0x02, 0x62, 0x7e, 0x56, 0x88,
0x62, 0xc7, 0x42, 0x4c, 0xa7, 0x1c, 0xda, 0x12, 0x4b, 0x92, 0xb4, 0xe7, 0x35, 0x01, 0x11, 0xcd,
0x5b, 0xd0, 0x20, 0x4f, 0x58, 0x88, 0xbb, 0x03, 0x1c, 0xe2, 0xbe, 0xdc, 0x56, 0x53, 0x99, 0xde,
0xba, 0x20, 0xdb, 0x11, 0x54, 0x7c, 0x10, 0xa1, 0x22, 0x72, 0x90, 0xb2, 0x1c, 0x44, 0x40, 0xc4,
0x81, 0xf1, 0x4f, 0xdc, 0xd9, 0x53, 0xda, 0x7c, 0xd6, 0x05, 0x92, 0x9e, 0x4a, 0x29, 0x3b, 0x95,
0x3f, 0x33, 0xa0, 0x25, 0xa6, 0x20, 0xe7, 0x33, 0xe0, 0xdd, 0x66, 0x68, 0x8c, 0x0c, 0xcd, 0x84,
0xbd, 0xf7, 0x73, 0x50, 0x56, 0x72, 0x2f, 0x4e, 0x2b, 0x77, 0x45, 0x70, 0xcc, 0x34, 0xcc, 0x3f,
0x36, 0x60, 0x25, 0x23, 0xf2, 0x59, 0x14, 0xfe, 0x23, 0x40, 0x72, 0x86, 0xce, 0x68, 0xda, 0xd1,
0x39, 0xfd, 0x8a, 0xf6, 0x50, 0xca, 0x0a, 0xc9, 0x3a, 0xe7, 0x66, 0x20, 0xd4, 0xfc, 0x57, 0x03,
0x2e, 0xdc, 0x21, 0x4c, 0xa0, 0xde, 0xe2, 0x46, 0x67, 0x27, 0x0c, 0x7a, 0x21, 0xa1, 0xf4, 0xf9,
0xd5, 0x8f, 0xdf, 0x95, 0x8e, 0x9d, 0x6e, 0x4a, 0xb3, 0xc8, 0xff, 0x32, 0x34, 0xc4, 0x18, 0xc4,
0xe9, 0x86, 0xc1, 0x21, 0x55, 0x7a, 0x54, 0x57, 0x30, 0x2b, 0x38, 0x14, 0x0a, 0xc1, 0x02, 0x86,
0x3d, 0x89, 0xa0, 0x4e, 0x14, 0x01, 0xe1, 0xcd, 0x62, 0x0f, 0x46, 0x8c, 0xf1, 0xce, 0xc9, 0xf3,
0x2b, 0xe3, 0x3f, 0x35, 0x60, 0x25, 0x33, 0x95, 0x59, 0x64, 0xfb, 0x25, 0xe9, 0x76, 0xca, 0xc9,
0x2c, 0x6c, 0x5e, 0xd2, 0xd2, 0x24, 0x06, 0x93, 0xd8, 0xe8, 0x12, 0xd4, 0xf7, 0xb1, 0xeb, 0x75,
0x43, 0x82, 0x69, 0xe0, 0xab, 0x89, 0x02, 0x07, 0x59, 0x02, 0x62, 0xfe, 0xa3, 0x21, 0x13, 0x74,
0xcf, 0xb9, 0xc5, 0xfb, 0x93, 0x02, 0x34, 0xb7, 0x7d, 0x4a, 0x42, 0x76, 0xf6, 0xaf, 0x26, 0xe8,
0x7d, 0xa8, 0x8b, 0x89, 0xd1, 0xae, 0x83, 0x19, 0x56, 0xa7, 0xd9, 0x8b, 0xda, 0x68, 0xfe, 0x07,
0x1c, 0x6f, 0x0b, 0x33, 0x6c, 0x49, 0xe9, 0x50, 0xfe, 0x1b, 0x9d, 0x87, 0xda, 0x01, 0xa6, 0x07,
0xdd, 0x47, 0xe4, 0x48, 0xfa, 0x8b, 0x4d, 0xab, 0xca, 0x01, 0x1f, 0x92, 0x23, 0x8a, 0x5e, 0x80,
0xaa, 0x3f, 0xec, 0xcb, 0x0d, 0x56, 0x59, 0x37, 0x36, 0x9a, 0x56, 0xc5, 0x1f, 0xf6, 0xc5, 0xf6,
0xfa, 0xe7, 0x02, 0x2c, 0xdc, 0x1f, 0xf2, 0x8b, 0x90, 0xc8, 0x45, 0x0c, 0x3d, 0xf6, 0x74, 0xca,
0x78, 0x05, 0x8a, 0xd2, 0xa5, 0xe0, 0x14, 0x6d, 0x2d, 0xe3, 0xdb, 0x5b, 0xd4, 0xe2, 0x48, 0x22,
0x0e, 0x3f, 0xb4, 0x6d, 0xe5, 0x9d, 0x15, 0x05, 0xb3, 0x35, 0x0e, 0x91, 0xbe, 0xd9, 0x79, 0xa8,
0x91, 0x30, 0x8c, 0x7d, 0x37, 0x31, 0x15, 0x12, 0x86, 0xb2, 0xd1, 0x84, 0x06, 0xb6, 0x1f, 0xf9,
0xc1, 0xa1, 0x47, 0x9c, 0x1e, 0x71, 0xc4, 0xb2, 0x57, 0xad, 0x14, 0x4c, 0x2a, 0x06, 0x5f, 0xf8,
0xae, 0xed, 0x33, 0x71, 0xaa, 0x17, 0xb9, 0x62, 0x70, 0xc8, 0x6d, 0x9f, 0xf1, 0x66, 0x87, 0x78,
0x84, 0x11, 0xd1, 0x5c, 0x91, 0xcd, 0x12, 0xa2, 0x9a, 0x87, 0x83, 0x98, 0xba, 0x2a, 0x9b, 0x25,
0x84, 0x37, 0x5f, 0x80, 0xda, 0x28, 0xd9, 0x50, 0x1b, 0x45, 0x1b, 0x05, 0xc0, 0xfc, 0x89, 0x01,
0xcd, 0x2d, 0xd1, 0xd5, 0x73, 0xa0, 0x74, 0x08, 0xe6, 0xc9, 0x93, 0x41, 0xa8, 0xb6, 0x8e, 0xf8,
0x3d, 0x51, 0x8f, 0xcc, 0xc7, 0xd0, 0xda, 0xf1, 0xb0, 0x4d, 0x0e, 0x02, 0xcf, 0x21, 0xa1, 0x38,
0xdb, 0x51, 0x0b, 0x8a, 0x0c, 0xf7, 0x94, 0xf3, 0xc0, 0x7f, 0xa2, 0x2f, 0xab, 0xab, 0x9f, 0x34,
0x4b, 0x2f, 0x6b, 0x4f, 0xd9, 0x44, 0x37, 0x89, 0xc0, 0xeb, 0x2a, 0x94, 0x45, 0x02, 0x50, 0xba,
0x15, 0x0d, 0x4b, 0x7d, 0x99, 0x0f, 0x53, 0xe3, 0xde, 0x09, 0x83, 0xe1, 0x00, 0x6d, 0x43, 0x63,
0x30, 0x82, 0x71, 0x5d, 0xcd, 0x3f, 0xd3, 0xb3, 0x4c, 0x5b, 0x29, 0x52, 0xf3, 0xbf, 0x8b, 0xd0,
0xdc, 0x25, 0x38, 0xb4, 0x0f, 0x9e, 0x8b, 0x20, 0x53, 0x0b, 0x8a, 0x0e, 0xf5, 0xd4, 0xaa, 0xf1,
0x9f, 0xe8, 0x2a, 0x9c, 0x4b, 0x4c, 0xa8, 0xdb, 0xe3, 0x02, 0x12, 0x7a, 0xdf, 0xb0, 0x5a, 0x83,
0xac, 0xe0, 0xde, 0x81, 0xaa, 0x43, 0xbd, 0xae, 0x58, 0xa2, 0x8a, 0x58, 0x22, 0xfd, 0xfc, 0xb6,
0xa8, 0x27, 0x96, 0xa6, 0xe2, 0xc8, 0x1f, 0xe8, 0x25, 0x68, 0x06, 0x43, 0x36, 0x18, 0xb2, 0xae,
0xb4, 0x3b, 0xed, 0xaa, 0x60, 0xaf, 0x21, 0x81, 0xc2, 0x2c, 0x51, 0xf4, 0x01, 0x34, 0xa9, 0x10,
0x65, 0xe4, 0x98, 0xd7, 0xa6, 0x75, 0x10, 0x1b, 0x92, 0x4e, 0x79, 0xe6, 0xaf, 0x43, 0x8b, 0x85,
0xf8, 0x31, 0xf1, 0x12, 0xa9, 0x3d, 0x10, 0xbb, 0x6d, 0x51, 0xc2, 0x47, 0x69, 0xbd, 0xeb, 0xb0,
0xd4, 0x1b, 0xe2, 0x10, 0xfb, 0x8c, 0x90, 0x04, 0x76, 0x5d, 0x60, 0xa3, 0xb8, 0x29, 0x26, 0x30,
0x3f, 0x84, 0xf9, 0xbb, 0x2e, 0x13, 0x82, 0xe4, 0x36, 0xcb, 0x10, 0xd7, 0x20, 0x61, 0x99, 0x5e,
0x80, 0x6a, 0x18, 0x1c, 0x4a, 0x1b, 0x5c, 0x10, 0x2a, 0x58, 0x09, 0x83, 0x43, 0x61, 0x60, 0x45,
0x41, 0x44, 0x10, 0x2a, 0xdd, 0x2c, 0x58, 0xea, 0xcb, 0xfc, 0x4b, 0x63, 0xa4, 0x3c, 0xdc, 0x7c,
0xd2, 0xa7, 0xb3, 0x9f, 0xef, 0x43, 0x25, 0x94, 0xf4, 0x13, 0x53, 0xb9, 0xc9, 0x91, 0xc4, 0x19,
0x10, 0x51, 0x4d, 0x9f, 0x27, 0xfa, 0x55, 0x03, 0x1a, 0x1f, 0x78, 0x43, 0xfa, 0x2c, 0x94, 0x5d,
0x97, 0xbd, 0x28, 0xea, 0x33, 0x27, 0xbf, 0x5d, 0x80, 0xa6, 0x62, 0x63, 0x16, 0x27, 0x28, 0x97,
0x95, 0x5d, 0xa8, 0xf3, 0x21, 0xbb, 0x94, 0xf4, 0xa2, 0x98, 0x4e, 0x7d, 0x73, 0x53, 0x6b, 0x1e,
0x52, 0x6c, 0x88, 0x6c, 0xf9, 0xae, 0x20, 0xfa, 0x45, 0x9f, 0x85, 0x47, 0x16, 0xd8, 0x31, 0xa0,
0xf3, 0x10, 0x16, 0x33, 0xcd, 0x5c, 0x89, 0x1e, 0x91, 0xa3, 0xc8, 0xfe, 0x3d, 0x22, 0x47, 0xe8,
0xad, 0x64, 0x4d, 0x43, 0xde, 0x29, 0x7e, 0x2f, 0xf0, 0x7b, 0x37, 0xc3, 0x10, 0x1f, 0xa9, 0x9a,
0x87, 0x77, 0x0b, 0x5f, 0x36, 0xcc, 0x7f, 0x28, 0x40, 0xe3, 0x6b, 0x43, 0x12, 0x1e, 0x9d, 0xa6,
0x1d, 0x8a, 0x4e, 0x85, 0xf9, 0xc4, 0xa9, 0x30, 0xb6, 0xf5, 0x4b, 0x9a, 0xad, 0xaf, 0x31, 0x60,
0x65, 0xad, 0x01, 0xd3, 0xed, 0xed, 0xca, 0x89, 0xf6, 0x76, 0x35, 0x77, 0x6f, 0xff, 0x85, 0x11,
0x8b, 0x70, 0xa6, 0xdd, 0x98, 0x72, 0xc7, 0x0a, 0x27, 0x76, 0xc7, 0xa6, 0xde, 0x8d, 0x3f, 0x34,
0xa0, 0xf6, 0x75, 0x62, 0xb3, 0x20, 0xe4, 0xf6, 0x47, 0x43, 0x66, 0x4c, 0xe1, 0x1a, 0x17, 0xb2,
0xae, 0xf1, 0x0d, 0xa8, 0xba, 0x4e, 0x17, 0x73, 0xfd, 0x12, 0xe3, 0x4e, 0x72, 0xc9, 0x2a, 0xae,
0x23, 0x14, 0x71, 0xfa, 0x24, 0xc0, 0xef, 0x19, 0xd0, 0x90, 0x3c, 0x53, 0x49, 0xf9, 0x5e, 0x62,
0x38, 0x43, 0xa7, 0xf4, 0xea, 0x23, 0x9e, 0xe8, 0xdd, 0xb9, 0xd1, 0xb0, 0x37, 0x01, 0xb8, 0x90,
0x15, 0xb9, 0xdc, 0x33, 0xeb, 0x5a, 0x6e, 0x25, 0xb9, 0x10, 0xf8, 0xdd, 0x39, 0xab, 0xc6, 0xa9,
0x44, 0x17, 0xb7, 0x2a, 0x50, 0x12, 0xd4, 0xe6, 0xff, 0x19, 0xb0, 0x74, 0x1b, 0x7b, 0xf6, 0x96,
0x4b, 0x19, 0xf6, 0xed, 0x19, 0x9c, 0xb0, 0x77, 0xa1, 0x12, 0x0c, 0xba, 0x1e, 0xd9, 0x67, 0x8a,
0xa5, 0xcb, 0x13, 0x66, 0x24, 0xc5, 0x60, 0x95, 0x83, 0xc1, 0x3d, 0xb2, 0xcf, 0xd0, 0x57, 0xa0,
0x1a, 0x0c, 0xba, 0xa1, 0xdb, 0x3b, 0x60, 0x4a, 0xfa, 0x53, 0x10, 0x57, 0x82, 0x81, 0xc5, 0x29,
0x12, 0xb1, 0x95, 0xf9, 0x13, 0xc6, 0x56, 0xcc, 0x7f, 0x1b, 0x9b, 0xfe, 0x0c, 0x7b, 0xe0, 0x5d,
0xa8, 0xba, 0x3e, 0xeb, 0x3a, 0x2e, 0x8d, 0x44, 0x70, 0x51, 0xaf, 0x43, 0x3e, 0x13, 0x33, 0x10,
0x6b, 0xea, 0x33, 0x3e, 0x36, 0xfa, 0x2a, 0xc0, 0xbe, 0x17, 0x60, 0x45, 0x2d, 0x65, 0x70, 0x49,
0xbf, 0x7d, 0x38, 0x5a, 0x44, 0x5f, 0x13, 0x44, 0xbc, 0x87, 0xd1, 0x92, 0xfe, 0x8b, 0x01, 0x2b,
0x3b, 0x24, 0x94, 0x15, 0x2f, 0x4c, 0x85, 0x41, 0xb7, 0xfd, 0xfd, 0x20, 0x1d, 0x89, 0x36, 0x32,
0x91, 0xe8, 0x9f, 0x4e, 0xf4, 0x35, 0x75, 0x73, 0x92, 0xf9, 0x90, 0xe8, 0xe6, 0x14, 0x65, 0x7d,
0xe4, 0xcd, 0x73, 0x21, 0x67, 0x99, 0x14, 0xbf, 0xc9, 0x0b, 0xb8, 0xf9, 0x3b, 0xb2, 0x50, 0x43,
0x3b, 0xa9, 0xa7, 0x57, 0xd8, 0x55, 0x50, 0x96, 0x3e, 0x63, 0xf7, 0x5f, 0x85, 0x8c, 0xed, 0xc8,
0x31, 0x44, 0x3f, 0x30, 0x60, 0x3d, 0x9f, 0xab, 0x59, 0x8e, 0xe8, 0xaf, 0x42, 0xc9, 0xf5, 0xf7,
0x83, 0x28, 0xec, 0x76, 0x45, 0xef, 0xa2, 0x6b, 0xc7, 0x95, 0x84, 0xe6, 0xdf, 0x14, 0xa0, 0x25,
0x8c, 0xfa, 0x29, 0x2c, 0x7f, 0x9f, 0xf4, 0xbb, 0xd4, 0xfd, 0x94, 0x44, 0xcb, 0xdf, 0x27, 0xfd,
0x5d, 0xf7, 0x53, 0x92, 0xd2, 0x8c, 0x52, 0x5a, 0x33, 0x26, 0x47, 0x95, 0x93, 0x61, 0xd5, 0x4a,
0x3a, 0xac, 0xba, 0x0a, 0x65, 0x3f, 0x70, 0xc8, 0xf6, 0x96, 0xba, 0x76, 0xaa, 0xaf, 0x91, 0xaa,
0xd5, 0x4e, 0xa8, 0x6a, 0x9f, 0x1b, 0xd0, 0xb9, 0x43, 0x58, 0x56, 0x76, 0xa7, 0xa7, 0x65, 0xdf,
0x33, 0xe0, 0xbc, 0x96, 0xa1, 0x59, 0x14, 0xec, 0xbd, 0xb4, 0x82, 0xe9, 0xef, 0x80, 0x63, 0x43,
0x2a, 0xdd, 0x7a, 0x13, 0x1a, 0x5b, 0xc3, 0x7e, 0x3f, 0x76, 0xb9, 0x2e, 0x43, 0x23, 0x94, 0x3f,
0xe5, 0x15, 0x49, 0x9e, 0xbf, 0x75, 0x05, 0xe3, 0x17, 0x21, 0xf3, 0x2a, 0x34, 0x15, 0x89, 0xe2,
0xba, 0x03, 0xd5, 0x50, 0xfd, 0x56, 0xf8, 0xf1, 0xb7, 0xb9, 0x02, 0x4b, 0x16, 0xe9, 0x71, 0xd5,
0x0e, 0xef, 0xb9, 0xfe, 0x23, 0x35, 0x8c, 0xf9, 0x1d, 0x03, 0x96, 0xd3, 0x70, 0xd5, 0xd7, 0xdb,
0x50, 0xc1, 0x8e, 0x13, 0x12, 0x4a, 0x27, 0x2e, 0xcb, 0x4d, 0x89, 0x63, 0x45, 0xc8, 0x09, 0xc9,
0x15, 0xa6, 0x96, 0x9c, 0xd9, 0x85, 0x73, 0x77, 0x08, 0xbb, 0x4f, 0x58, 0x38, 0x53, 0x06, 0xbf,
0xcd, 0x2f, 0x2f, 0x82, 0x58, 0xa9, 0x45, 0xf4, 0x69, 0x7e, 0x66, 0x00, 0x4a, 0x8e, 0x30, 0xcb,
0x32, 0x27, 0xa5, 0x5c, 0x48, 0x4b, 0x59, 0xd6, 0x42, 0xf5, 0x07, 0x81, 0x4f, 0x7c, 0x96, 0x74,
0xb7, 0x9a, 0x31, 0x34, 0x2a, 0x2b, 0x41, 0xf7, 0x02, 0xec, 0xdc, 0xc2, 0xde, 0x6c, 0xee, 0xc1,
0x45, 0x00, 0x1a, 0xda, 0x5d, 0xb5, 0x5b, 0x0b, 0xca, 0xfa, 0x84, 0xf6, 0x03, 0xb9, 0x61, 0x2f,
0x41, 0xdd, 0xa1, 0x4c, 0x35, 0x47, 0x09, 0x65, 0x70, 0x28, 0x93, 0xed, 0xa2, 0xd6, 0x95, 0x12,
0xec, 0x11, 0xa7, 0x9b, 0xc8, 0xc7, 0xcd, 0x0b, 0xb4, 0x96, 0x6c, 0xd8, 0x8d, 0xe1, 0x9a, 0xcd,
0x55, 0xd2, 0x6e, 0xae, 0x87, 0xb0, 0x76, 0x1f, 0xfb, 0x43, 0xec, 0xdd, 0x0e, 0xfa, 0x03, 0x9c,
0xaa, 0x93, 0xcc, 0x9a, 0x43, 0x43, 0x63, 0x0e, 0x5f, 0x94, 0x85, 0x74, 0xd2, 0x05, 0x17, 0x73,
0x9a, 0xb7, 0x12, 0x10, 0x93, 0x42, 0x7b, 0xbc, 0xfb, 0x59, 0x16, 0x54, 0x30, 0x15, 0x75, 0x95,
0xb4, 0xd1, 0x23, 0x98, 0xf9, 0x3e, 0xbc, 0x20, 0x8a, 0x1a, 0x23, 0x50, 0x2a, 0x05, 0x90, 0xed,
0xc0, 0xd0, 0x74, 0xf0, 0xeb, 0x05, 0x61, 0x02, 0xc7, 0x7a, 0x98, 0x85, 0xf1, 0x77, 0xd3, 0x91,
0xf7, 0x97, 0x73, 0x0a, 0x77, 0xd3, 0x23, 0xaa, 0xf0, 0xfb, 0x06, 0x2c, 0x92, 0x27, 0xc4, 0x1e,
0x32, 0xd7, 0xef, 0xed, 0x78, 0xd8, 0x7f, 0x10, 0xa8, 0x83, 0x27, 0x0b, 0x46, 0x2f, 0x43, 0x93,
0x4b, 0x3f, 0x18, 0x32, 0x85, 0x27, 0x4f, 0xa0, 0x34, 0x90, 0xf7, 0xc7, 0xe7, 0xeb, 0x11, 0x46,
0x1c, 0x85, 0x27, 0x8f, 0xa3, 0x2c, 0x78, 0x4c, 0x94, 0x1c, 0x4c, 0x4f, 0x22, 0xca, 0xff, 0x30,
0x32, 0xa2, 0x54, 0x3d, 0x9c, 0x96, 0x28, 0xef, 0x02, 0xf4, 0x49, 0xd8, 0x23, 0xdb, 0xc2, 0xf8,
0xcb, 0x1b, 0xfe, 0x86, 0xd6, 0xf8, 0x8f, 0x3a, 0xb8, 0x1f, 0x11, 0x58, 0x09, 0x5a, 0xf3, 0x0e,
0x2c, 0x69, 0x50, 0xb8, 0x5d, 0xa3, 0xc1, 0x30, 0xb4, 0x49, 0x14, 0x24, 0x8a, 0x3e, 0xf9, 0x39,
0xc8, 0x70, 0xd8, 0x23, 0x4c, 0x29, 0xad, 0xfa, 0x32, 0xdf, 0x16, 0xc9, 0x2a, 0x11, 0x50, 0x48,
0x69, 0x6a, 0x3a, 0xf1, 0x6e, 0x8c, 0x25, 0xde, 0xf7, 0x45, 0x66, 0x28, 0x49, 0x37, 0x63, 0xd1,
0xc4, 0x3e, 0xef, 0x8a, 0x38, 0xea, 0xd1, 0x46, 0xf4, 0x69, 0xfe, 0xaf, 0x01, 0xcd, 0xed, 0xfe,
0x20, 0x18, 0x25, 0x45, 0xa6, 0xbe, 0x72, 0x8e, 0x07, 0x95, 0x0b, 0xba, 0xa0, 0xf2, 0x4b, 0xd0,
0x4c, 0x97, 0xfc, 0xcb, 0xf8, 0x4f, 0xc3, 0x4e, 0x96, 0xfa, 0x9f, 0x87, 0x5a, 0x18, 0x1c, 0x76,
0xb9, 0x29, 0x75, 0x54, 0x79, 0x46, 0x35, 0x0c, 0x0e, 0xb9, 0x81, 0x75, 0xd0, 0x32, 0x94, 0xf6,
0x5d, 0x2f, 0xae, 0x2c, 0x92, 0x1f, 0xe8, 0x3d, 0x7e, 0x21, 0x93, 0xe9, 0xdb, 0xf2, 0xb4, 0xf7,
0xa2, 0x88, 0xc2, 0xfc, 0x04, 0x16, 0xa2, 0x59, 0xcf, 0xf8, 0x5a, 0x85, 0x61, 0xfa, 0x28, 0xaa,
0x9c, 0x90, 0x1f, 0xe6, 0x55, 0x99, 0xd5, 0x13, 0xfd, 0xa7, 0x16, 0x1d, 0xc1, 0x3c, 0xc7, 0x50,
0x7b, 0x49, 0xfc, 0xe6, 0x0b, 0xb0, 0x9a, 0xc5, 0x9e, 0x85, 0xa5, 0xb7, 0xd3, 0xfb, 0x47, 0xff,
0x20, 0x21, 0x39, 0x9a, 0xda, 0x3b, 0x6a, 0x05, 0xec, 0x60, 0xe8, 0x33, 0x65, 0x80, 0xf8, 0x0a,
0xdc, 0xe6, 0xdf, 0x68, 0x0d, 0x2a, 0xae, 0xd3, 0xf5, 0xf8, 0xdd, 0x4d, 0x9e, 0x49, 0x65, 0xd7,
0xb9, 0xc7, 0xef, 0x75, 0xef, 0x44, 0x9e, 0xd6, 0xd4, 0xe5, 0x16, 0xca, 0xcb, 0xfa, 0xbe, 0xf4,
0x03, 0x2c, 0x59, 0x06, 0xf9, 0x8c, 0x8b, 0x6a, 0x36, 0xa0, 0x75, 0xe8, 0xb2, 0x83, 0xae, 0x78,
0xda, 0x21, 0x0e, 0x61, 0x99, 0x57, 0xae, 0x5a, 0x0b, 0x1c, 0xbe, 0xcb, 0xc1, 0xfc, 0x20, 0xa6,
0xe6, 0x6f, 0x18, 0xb0, 0x94, 0x62, 0x6b, 0x96, 0xa5, 0xf8, 0x0a, 0xf7, 0x4f, 0x64, 0x47, 0xca,
0x13, 0x5d, 0xd7, 0x1a, 0x23, 0x35, 0x9a, 0x30, 0x42, 0x31, 0x85, 0xf9, 0x9f, 0x06, 0xd4, 0x13,
0x2d, 0xfc, 0x7a, 0xa3, 0xda, 0x46, 0xd7, 0x9b, 0x18, 0x30, 0x95, 0x18, 0x5e, 0x82, 0xd1, 0xd6,
0x4c, 0x94, 0x87, 0x27, 0xea, 0xda, 0x1c, 0x8a, 0xee, 0xc2, 0x82, 0x14, 0x53, 0xcc, 0xba, 0x36,
0xea, 0x10, 0x57, 0xec, 0xe1, 0xd0, 0x51, 0x5c, 0x5a, 0x4d, 0x9a, 0xf8, 0x92, 0x49, 0xc6, 0xc0,
0x21, 0x62, 0xa4, 0x92, 0xb4, 0x96, 0xc2, 0xff, 0x71, 0x28, 0xbf, 0x86, 0x34, 0x92, 0xa4, 0xdc,
0x95, 0xf3, 0x08, 0x76, 0x48, 0x18, 0xcf, 0x2d, 0xfe, 0xe6, 0xbe, 0x93, 0xfc, 0xdd, 0xe5, 0xae,
0xad, 0x32, 0x32, 0x20, 0x41, 0xdc, 0xeb, 0x45, 0xaf, 0xc2, 0xa2, 0xd3, 0x4f, 0xbd, 0x2b, 0x8a,
0x9c, 0x3d, 0xa7, 0x9f, 0x78, 0x50, 0x94, 0x62, 0x68, 0x3e, 0xcd, 0xd0, 0xff, 0x18, 0xf1, 0x6b,
0xcb, 0x90, 0x38, 0xc4, 0x67, 0x2e, 0xf6, 0x9e, 0x5e, 0x27, 0x3b, 0x50, 0x1d, 0x52, 0x12, 0x26,
0x6c, 0x62, 0xfc, 0xcd, 0xdb, 0x06, 0x98, 0xd2, 0xc3, 0x20, 0x74, 0x14, 0x97, 0xf1, 0xf7, 0x84,
0x22, 0x41, 0xf9, 0x92, 0x4f, 0x5f, 0x24, 0xf8, 0x36, 0xac, 0xf5, 0x03, 0xc7, 0xdd, 0x77, 0x75,
0xb5, 0x85, 0x9c, 0x6c, 0x25, 0x6a, 0x4e, 0xd1, 0x99, 0x3f, 0x28, 0xc0, 0xda, 0xc7, 0x03, 0xe7,
0x67, 0x30, 0xe7, 0x75, 0xa8, 0x07, 0x9e, 0xb3, 0x93, 0x9e, 0x76, 0x12, 0xc4, 0x31, 0x7c, 0x72,
0x18, 0x63, 0xc8, 0x50, 0x73, 0x12, 0x34, 0xb1, 0x80, 0xf2, 0xa9, 0x64, 0x53, 0x9e, 0x24, 0x9b,
0x1e, 0xac, 0xc9, 0xac, 0xed, 0x33, 0x16, 0x8d, 0xf9, 0x2b, 0xb0, 0xc2, 0x0d, 0x29, 0x1f, 0xe6,
0x63, 0x4a, 0xc2, 0x19, 0x2d, 0xce, 0x05, 0xa8, 0x45, 0x3d, 0x47, 0xb5, 0xad, 0x23, 0x80, 0x79,
0x17, 0x96, 0x33, 0x63, 0x3d, 0xe5, 0x8c, 0xae, 0x5c, 0x86, 0x6a, 0x54, 0xab, 0x8b, 0x2a, 0x50,
0xbc, 0xe9, 0x79, 0xad, 0x39, 0xd4, 0x80, 0xea, 0xb6, 0x2a, 0x48, 0x6d, 0x19, 0x57, 0x7e, 0x01,
0x16, 0x33, 0x39, 0x5d, 0x54, 0x85, 0xf9, 0x07, 0x81, 0x4f, 0x5a, 0x73, 0xa8, 0x05, 0x8d, 0x5b,
0xae, 0x8f, 0xc3, 0x23, 0x19, 0xf1, 0x6c, 0x39, 0x68, 0x11, 0xea, 0x22, 0xf2, 0xa7, 0x00, 0x64,
0xf3, 0xc7, 0x2f, 0x43, 0xf3, 0xbe, 0x60, 0x64, 0x97, 0x84, 0x8f, 0x5d, 0x9b, 0xa0, 0x2e, 0xb4,
0xb2, 0x0f, 0xa2, 0xd1, 0x17, 0xf4, 0xde, 0x9d, 0xfe, 0xdd, 0x74, 0x67, 0x92, 0x0c, 0xcd, 0x39,
0xf4, 0x09, 0x2c, 0xa4, 0x9f, 0x15, 0x23, 0x7d, 0x68, 0x4a, 0xfb, 0xf6, 0xf8, 0xb8, 0xce, 0xbb,
0xd0, 0x4c, 0xbd, 0x12, 0x46, 0xaf, 0x6b, 0xfb, 0xd6, 0xbd, 0x24, 0xee, 0xe8, 0x6d, 0x6f, 0xf2,
0x25, 0xaf, 0xe4, 0x3e, 0xfd, 0x94, 0x2f, 0x87, 0x7b, 0xed, 0x7b, 0xbf, 0xe3, 0xb8, 0xc7, 0x70,
0x6e, 0xec, 0xc9, 0x1d, 0x7a, 0x23, 0xe7, 0x34, 0xd3, 0x3f, 0xcd, 0x3b, 0x6e, 0x88, 0x43, 0x40,
0xe3, 0xaf, 0x61, 0xd1, 0x35, 0xfd, 0x0a, 0xe4, 0xbd, 0x05, 0xee, 0x5c, 0x9f, 0x1a, 0x3f, 0x16,
0xdc, 0xaf, 0x19, 0xb0, 0x96, 0xf3, 0x4e, 0x0e, 0xdd, 0xd0, 0x76, 0x37, 0xf9, 0xb1, 0x5f, 0xe7,
0xad, 0x93, 0x11, 0xc5, 0x8c, 0xf8, 0xb0, 0x98, 0x79, 0x3a, 0x86, 0xae, 0xe6, 0xd6, 0xc9, 0x8f,
0xbf, 0xa1, 0xeb, 0x7c, 0x61, 0x3a, 0xe4, 0x78, 0xbc, 0x87, 0xb0, 0x98, 0x79, 0x6f, 0x95, 0x33,
0x9e, 0xfe, 0x55, 0xd6, 0x71, 0x0b, 0xfa, 0x4d, 0x68, 0xa6, 0x1e, 0x46, 0xe5, 0x68, 0xbc, 0xee,
0xf1, 0xd4, 0x71, 0x5d, 0x3f, 0x84, 0x46, 0xf2, 0xfd, 0x12, 0xda, 0xc8, 0xdb, 0x4b, 0x63, 0x1d,
0x9f, 0x64, 0x2b, 0x8d, 0xde, 0x1d, 0x4c, 0xd8, 0x4a, 0x63, 0x4f, 0x35, 0xa6, 0xdf, 0x4a, 0x89,
0xfe, 0x27, 0x6e, 0xa5, 0x13, 0x0f, 0xf1, 0x1d, 0x79, 0xa7, 0xd0, 0xbc, 0x6b, 0x41, 0x9b, 0x79,
0xba, 0x99, 0xff, 0x82, 0xa7, 0x73, 0xe3, 0x44, 0x34, 0xb1, 0x14, 0x1f, 0xc1, 0x42, 0xfa, 0xf5,
0x46, 0x8e, 0x14, 0xb5, 0x0f, 0x5e, 0x3a, 0x57, 0xa7, 0xc2, 0x8d, 0x07, 0xfb, 0x18, 0xea, 0x89,
0xff, 0x38, 0x41, 0xaf, 0x4d, 0xd0, 0xe3, 0xe4, 0x1f, 0x7e, 0x1c, 0x27, 0xc9, 0xaf, 0x41, 0x2d,
0xfe, 0x6b, 0x12, 0xf4, 0x4a, 0xae, 0xfe, 0x9e, 0xa4, 0xcb, 0x5d, 0x80, 0xd1, 0xff, 0x8e, 0xa0,
0x57, 0xb5, 0x7d, 0x8e, 0xfd, 0x31, 0xc9, 0x71, 0x9d, 0xc6, 0xd3, 0x97, 0x45, 0x71, 0x93, 0xa6,
0x9f, 0xac, 0xe2, 0x3c, 0xae, 0xdb, 0x03, 0x68, 0xa6, 0x6a, 0xaf, 0xf3, 0xb6, 0xb0, 0xa6, 0x24,
0xbe, 0x73, 0x65, 0x1a, 0xd4, 0x78, 0xfd, 0x0e, 0xa0, 0x99, 0xaa, 0x84, 0xcd, 0x19, 0x49, 0x57,
0xf8, 0x9b, 0x33, 0x92, 0xb6, 0xb0, 0xd6, 0x9c, 0x43, 0xdf, 0x4e, 0x14, 0xdd, 0xa6, 0x0a, 0x9b,
0xd1, 0x9b, 0x13, 0xfb, 0xd1, 0xd5, 0x75, 0x77, 0x36, 0x4f, 0x42, 0x12, 0xb3, 0xa0, 0xb4, 0x4a,
0x8a, 0x34, 0x5f, 0xab, 0x4e, 0xb2, 0x52, 0xbb, 0x50, 0x96, 0xb5, 0xad, 0xc8, 0xcc, 0xa9, 0x62,
0x4f, 0x14, 0xbe, 0x76, 0x5e, 0xd2, 0xe2, 0xa4, 0xcb, 0x3e, 0x65, 0xa7, 0xd2, 0x0b, 0xce, 0xe9,
0x34, 0x55, 0xd8, 0x38, 0x6d, 0xa7, 0x16, 0x94, 0x65, 0xd1, 0x52, 0x4e, 0xa7, 0xa9, 0xc2, 0xbb,
0xce, 0x64, 0x1c, 0x91, 0xcd, 0x36, 0xe7, 0xd0, 0x0e, 0x94, 0x44, 0xa8, 0x0c, 0x5d, 0x9e, 0x54,
0xcf, 0x33, 0xa9, 0xc7, 0x54, 0xc9, 0x8f, 0x39, 0x87, 0x7e, 0x09, 0x4a, 0x22, 0x41, 0x94, 0xd3,
0x63, 0xb2, 0x28, 0xa7, 0x33, 0x11, 0x25, 0x62, 0xd1, 0x81, 0x46, 0x32, 0x13, 0x9f, 0x73, 0x64,
0x69, 0x6a, 0x15, 0x3a, 0xd3, 0x60, 0x46, 0xa3, 0xc8, 0x6d, 0x34, 0x0a, 0x1b, 0xe6, 0x6f, 0xa3,
0xb1, 0x90, 0x64, 0xfe, 0x36, 0x1a, 0x8f, 0x42, 0x9a, 0x73, 0xe8, 0x37, 0x0d, 0x68, 0xe7, 0xa5,
0x87, 0x51, 0xae, 0x07, 0x34, 0x29, 0xc7, 0xdd, 0xf9, 0xd2, 0x09, 0xa9, 0x62, 0x5e, 0x3e, 0x15,
0x41, 0x9b, 0xb1, 0x84, 0xf0, 0xf5, 0xbc, 0xfe, 0x72, 0xd2, 0x9f, 0x9d, 0x2f, 0x4e, 0x4f, 0x10,
0x8f, 0xbd, 0x07, 0xf5, 0x44, 0xc0, 0x28, 0xc7, 0xf2, 0x8e, 0x47, 0xba, 0x72, 0x56, 0x55, 0x13,
0x7b, 0x92, 0xea, 0x2d, 0xf2, 0x8b, 0x39, 0xca, 0x98, 0x4c, 0x57, 0xe6, 0xa8, 0x77, 0x2a, 0x3d,
0x69, 0xce, 0x21, 0x02, 0x8d, 0x64, 0xb2, 0x31, 0x47, 0x1b, 0x35, 0x79, 0xca, 0xce, 0xeb, 0x53,
0x60, 0xc6, 0xc3, 0x74, 0x01, 0x46, 0xc9, 0xbe, 0x9c, 0xb3, 0x6e, 0x2c, 0xdf, 0xd8, 0x79, 0xed,
0x58, 0xbc, 0xe4, 0xb1, 0x9f, 0x48, 0xdf, 0xe5, 0x48, 0x7f, 0x3c, 0xc1, 0x37, 0xc5, 0x5d, 0x64,
0x3c, 0x45, 0x94, 0x73, 0x17, 0xc9, 0xcd, 0x46, 0x75, 0xae, 0x4f, 0x8d, 0x1f, 0xcf, 0xe7, 0x5b,
0xd0, 0xca, 0xa6, 0xd4, 0x72, 0xee, 0xb8, 0x39, 0x89, 0xbd, 0xce, 0x1b, 0x53, 0x62, 0x27, 0xcf,
0xc3, 0xf3, 0xe3, 0x3c, 0x7d, 0xc3, 0x65, 0x07, 0x22, 0x9b, 0x33, 0xcd, 0xac, 0x93, 0x89, 0xa3,
0x69, 0x66, 0x9d, 0x4a, 0x13, 0xa9, 0xc3, 0x4b, 0x44, 0xa4, 0xf3, 0x0e, 0xaf, 0x64, 0x82, 0x22,
0xe7, 0x9c, 0x49, 0x87, 0xf3, 0xa5, 0xfb, 0x99, 0x8e, 0xab, 0xa3, 0x7c, 0x3f, 0x61, 0x2c, 0x54,
0x9f, 0xe3, 0x7e, 0xea, 0x03, 0xf5, 0x42, 0xd1, 0x5b, 0xd9, 0xf0, 0xe1, 0xe4, 0xd8, 0x44, 0x36,
0xac, 0x74, 0x7c, 0xf8, 0xa0, 0x95, 0x8d, 0xd5, 0xe5, 0x0c, 0x90, 0x13, 0xd2, 0x9b, 0x62, 0x80,
0x6c, 0xc4, 0x2b, 0x67, 0x80, 0x9c, 0xc0, 0xd8, 0x14, 0xbe, 0x64, 0x2a, 0xfa, 0x94, 0x73, 0x34,
0xe9, 0x22, 0x54, 0x39, 0x47, 0x93, 0x36, 0x70, 0x66, 0xce, 0x6d, 0x0e, 0xa1, 0xb1, 0x13, 0x06,
0x4f, 0x8e, 0xa2, 0xc0, 0xd1, 0xcf, 0xc6, 0xd8, 0xdd, 0xfa, 0x06, 0x2c, 0xb8, 0x31, 0x4e, 0x2f,
0x1c, 0xd8, 0xb7, 0xea, 0x32, 0x80, 0xb5, 0xc3, 0x89, 0x77, 0x8c, 0x5f, 0xbe, 0xd1, 0x73, 0xd9,
0xc1, 0x70, 0x8f, 0x4b, 0xe6, 0xba, 0x44, 0x7b, 0xc3, 0x0d, 0xd4, 0xaf, 0xeb, 0xae, 0xcf, 0x48,
0xe8, 0x63, 0xef, 0xba, 0x18, 0x4a, 0x41, 0x07, 0x7b, 0x7f, 0x64, 0x18, 0x7b, 0x65, 0x01, 0xba,
0xf1, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x77, 0xf8, 0xe0, 0x92, 0x08, 0x52, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -201,6 +201,7 @@ message WatchDmChannelsRequest {
schema.CollectionSchema schema = 6;
repeated data.SegmentInfo exclude_infos = 7;
LoadMetaInfo load_meta = 8;
int64 replicaID = 9;
}
message WatchDeltaChannelsRequest {
@ -224,6 +225,7 @@ message SegmentLoadInfo {
repeated int64 compactionFrom = 10; // segmentIDs compacted from
repeated FieldIndexInfo index_infos = 11;
int64 segment_size = 12;
string insert_channel = 13;
}
message FieldIndexInfo {
@ -245,6 +247,7 @@ message LoadSegmentsRequest {
int64 source_nodeID = 5;
int64 collectionID = 6;
LoadMetaInfo load_meta = 7;
int64 replicaID = 8;
}
message ReleaseSegmentsRequest {
@ -281,6 +284,7 @@ message LoadBalanceRequest {
TriggerCondition balance_reason = 3;
repeated int64 dst_nodeIDs = 4;
repeated int64 sealed_segmentIDs = 5;
int64 collectionID = 6;
}
//-------------------- internal meta proto------------------
@ -312,6 +316,7 @@ message DmChannelWatchInfo {
int64 collectionID = 1;
string dmChannel = 2;
int64 nodeID_loaded = 3;
int64 replicaID = 4;
}
message QueryChannelInfo {
@ -380,4 +385,3 @@ message SealedSegmentsChangeInfo {
common.MsgBase base = 1;
repeated SegmentChangeInfo infos = 2;
}

View File

@ -1332,6 +1332,7 @@ type WatchDmChannelsRequest struct {
Schema *schemapb.CollectionSchema `protobuf:"bytes,6,opt,name=schema,proto3" json:"schema,omitempty"`
ExcludeInfos []*datapb.SegmentInfo `protobuf:"bytes,7,rep,name=exclude_infos,json=excludeInfos,proto3" json:"exclude_infos,omitempty"`
LoadMeta *LoadMetaInfo `protobuf:"bytes,8,opt,name=load_meta,json=loadMeta,proto3" json:"load_meta,omitempty"`
ReplicaID int64 `protobuf:"varint,9,opt,name=replicaID,proto3" json:"replicaID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1418,6 +1419,13 @@ func (m *WatchDmChannelsRequest) GetLoadMeta() *LoadMetaInfo {
return nil
}
func (m *WatchDmChannelsRequest) GetReplicaID() int64 {
if m != nil {
return m.ReplicaID
}
return 0
}
type WatchDeltaChannelsRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
NodeID int64 `protobuf:"varint,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
@ -1502,6 +1510,7 @@ type SegmentLoadInfo struct {
CompactionFrom []int64 `protobuf:"varint,10,rep,packed,name=compactionFrom,proto3" json:"compactionFrom,omitempty"`
IndexInfos []*FieldIndexInfo `protobuf:"bytes,11,rep,name=index_infos,json=indexInfos,proto3" json:"index_infos,omitempty"`
SegmentSize int64 `protobuf:"varint,12,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
InsertChannel string `protobuf:"bytes,13,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1616,6 +1625,13 @@ func (m *SegmentLoadInfo) GetSegmentSize() int64 {
return 0
}
func (m *SegmentLoadInfo) GetInsertChannel() string {
if m != nil {
return m.InsertChannel
}
return ""
}
type FieldIndexInfo struct {
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
EnableIndex bool `protobuf:"varint,2,opt,name=enable_index,json=enableIndex,proto3" json:"enable_index,omitempty"`
@ -1719,6 +1735,7 @@ type LoadSegmentsRequest struct {
SourceNodeID int64 `protobuf:"varint,5,opt,name=source_nodeID,json=sourceNodeID,proto3" json:"source_nodeID,omitempty"`
CollectionID int64 `protobuf:"varint,6,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
LoadMeta *LoadMetaInfo `protobuf:"bytes,7,opt,name=load_meta,json=loadMeta,proto3" json:"load_meta,omitempty"`
ReplicaID int64 `protobuf:"varint,8,opt,name=replicaID,proto3" json:"replicaID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1798,6 +1815,13 @@ func (m *LoadSegmentsRequest) GetLoadMeta() *LoadMetaInfo {
return nil
}
func (m *LoadSegmentsRequest) GetReplicaID() int64 {
if m != nil {
return m.ReplicaID
}
return 0
}
type ReleaseSegmentsRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
NodeID int64 `protobuf:"varint,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
@ -2042,6 +2066,7 @@ type LoadBalanceRequest struct {
BalanceReason TriggerCondition `protobuf:"varint,3,opt,name=balance_reason,json=balanceReason,proto3,enum=milvus.proto.query.TriggerCondition" json:"balance_reason,omitempty"`
DstNodeIDs []int64 `protobuf:"varint,4,rep,packed,name=dst_nodeIDs,json=dstNodeIDs,proto3" json:"dst_nodeIDs,omitempty"`
SealedSegmentIDs []int64 `protobuf:"varint,5,rep,packed,name=sealed_segmentIDs,json=sealedSegmentIDs,proto3" json:"sealed_segmentIDs,omitempty"`
CollectionID int64 `protobuf:"varint,6,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -2107,10 +2132,18 @@ func (m *LoadBalanceRequest) GetSealedSegmentIDs() []int64 {
return nil
}
func (m *LoadBalanceRequest) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
type DmChannelWatchInfo struct {
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
DmChannel string `protobuf:"bytes,2,opt,name=dmChannel,proto3" json:"dmChannel,omitempty"`
NodeIDLoaded int64 `protobuf:"varint,3,opt,name=nodeID_loaded,json=nodeIDLoaded,proto3" json:"nodeID_loaded,omitempty"`
ReplicaID int64 `protobuf:"varint,4,opt,name=replicaID,proto3" json:"replicaID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -2162,6 +2195,13 @@ func (m *DmChannelWatchInfo) GetNodeIDLoaded() int64 {
return 0
}
func (m *DmChannelWatchInfo) GetReplicaID() int64 {
if m != nil {
return m.ReplicaID
}
return 0
}
type QueryChannelInfo struct {
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
QueryChannel string `protobuf:"bytes,2,opt,name=query_channel,json=queryChannel,proto3" json:"query_channel,omitempty"`
@ -2795,177 +2835,180 @@ func init() {
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
// 2707 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4b, 0x73, 0x1c, 0x49,
0x11, 0x56, 0xcf, 0x4b, 0x33, 0x39, 0x0f, 0x8d, 0x4b, 0xb6, 0x76, 0x34, 0xec, 0xae, 0xe5, 0xf6,
0x4b, 0x78, 0x59, 0xd9, 0xc8, 0x40, 0xec, 0x06, 0x70, 0xb0, 0x24, 0xac, 0x15, 0xb6, 0xb5, 0xda,
0x96, 0x6c, 0xc0, 0xe1, 0x88, 0xa1, 0x67, 0xba, 0x34, 0xea, 0x70, 0x3f, 0xc6, 0x5d, 0x3d, 0xb6,
0x65, 0xae, 0x1c, 0x76, 0x79, 0x04, 0xc1, 0x69, 0x2f, 0x04, 0x27, 0x08, 0xe0, 0xb0, 0xc1, 0x99,
0x1b, 0x3f, 0x81, 0x08, 0xee, 0x04, 0x37, 0xf8, 0x03, 0x1c, 0x09, 0x88, 0x7a, 0x74, 0x4f, 0x3f,
0xaa, 0x35, 0x2d, 0x69, 0xbd, 0x76, 0x10, 0xdc, 0xba, 0xb2, 0xaa, 0x32, 0xb3, 0x2a, 0xb3, 0x32,
0xbf, 0xaa, 0x6c, 0x38, 0xf3, 0x64, 0x8c, 0xbd, 0xc3, 0xde, 0xc0, 0x75, 0x3d, 0x63, 0x65, 0xe4,
0xb9, 0xbe, 0x8b, 0x90, 0x6d, 0x5a, 0x4f, 0xc7, 0x84, 0xb7, 0x56, 0x58, 0x7f, 0xb7, 0x31, 0x70,
0x6d, 0xdb, 0x75, 0x38, 0xad, 0xdb, 0x88, 0x8e, 0xe8, 0xb6, 0x4c, 0xc7, 0xc7, 0x9e, 0xa3, 0x5b,
0x41, 0x2f, 0x19, 0x1c, 0x60, 0x5b, 0x17, 0xad, 0xb6, 0xa1, 0xfb, 0x7a, 0x94, 0xbf, 0xfa, 0x63,
0x05, 0x16, 0x76, 0x0f, 0xdc, 0x67, 0xeb, 0xae, 0x65, 0xe1, 0x81, 0x6f, 0xba, 0x0e, 0xd1, 0xf0,
0x93, 0x31, 0x26, 0x3e, 0xba, 0x01, 0xa5, 0xbe, 0x4e, 0x70, 0x47, 0x59, 0x52, 0x96, 0xeb, 0xab,
0x6f, 0xae, 0xc4, 0x34, 0x11, 0x2a, 0xdc, 0x23, 0xc3, 0x35, 0x9d, 0x60, 0x8d, 0x8d, 0x44, 0x08,
0x4a, 0x46, 0x7f, 0x6b, 0xa3, 0x53, 0x58, 0x52, 0x96, 0x8b, 0x1a, 0xfb, 0x46, 0x97, 0xa0, 0x39,
0x08, 0x79, 0x6f, 0x6d, 0x90, 0x4e, 0x71, 0xa9, 0xb8, 0x5c, 0xd4, 0xe2, 0x44, 0xf5, 0x77, 0x0a,
0xbc, 0x91, 0x52, 0x83, 0x8c, 0x5c, 0x87, 0x60, 0x74, 0x13, 0x2a, 0xc4, 0xd7, 0xfd, 0x31, 0x11,
0x9a, 0x7c, 0x49, 0xaa, 0xc9, 0x2e, 0x1b, 0xa2, 0x89, 0xa1, 0x69, 0xb1, 0x05, 0x89, 0x58, 0xf4,
0x55, 0x38, 0x6b, 0x3a, 0xf7, 0xb0, 0xed, 0x7a, 0x87, 0xbd, 0x11, 0xf6, 0x06, 0xd8, 0xf1, 0xf5,
0x21, 0x0e, 0x74, 0x9c, 0x0f, 0xfa, 0x76, 0x26, 0x5d, 0xea, 0x6f, 0x15, 0x38, 0x47, 0x35, 0xdd,
0xd1, 0x3d, 0xdf, 0x7c, 0x09, 0xfb, 0xa5, 0x42, 0x23, 0xaa, 0x63, 0xa7, 0xc8, 0xfa, 0x62, 0x34,
0x3a, 0x66, 0x14, 0x88, 0xa7, 0x6b, 0x2b, 0x31, 0x75, 0x63, 0x34, 0xf5, 0x37, 0xc2, 0xb0, 0x51,
0x3d, 0x4f, 0xb3, 0xa1, 0x49, 0x99, 0x85, 0xb4, 0xcc, 0x93, 0x6c, 0xe7, 0x3f, 0x14, 0x38, 0x77,
0xd7, 0xd5, 0x8d, 0x89, 0xe1, 0xbf, 0xf8, 0xed, 0xfc, 0x36, 0x54, 0xf8, 0x29, 0xe9, 0x94, 0x98,
0xac, 0xcb, 0x71, 0x59, 0xe2, 0x04, 0x4d, 0x34, 0xdc, 0x65, 0x04, 0x4d, 0x4c, 0x42, 0x97, 0xa1,
0xe5, 0xe1, 0x91, 0x65, 0x0e, 0xf4, 0x9e, 0x33, 0xb6, 0xfb, 0xd8, 0xeb, 0x94, 0x97, 0x94, 0xe5,
0xb2, 0xd6, 0x14, 0xd4, 0x6d, 0x46, 0x54, 0x7f, 0xa5, 0x40, 0x47, 0xc3, 0x16, 0xd6, 0x09, 0x7e,
0x95, 0x8b, 0x5d, 0x80, 0x8a, 0xe3, 0x1a, 0x78, 0x6b, 0x83, 0x2d, 0xb6, 0xa8, 0x89, 0x96, 0xfa,
0xd3, 0x02, 0x37, 0xc4, 0x6b, 0xee, 0xd7, 0x11, 0x63, 0x95, 0x3f, 0x1f, 0x63, 0x55, 0x64, 0xc6,
0xfa, 0xf3, 0xc4, 0x58, 0xaf, 0xfb, 0x86, 0x4c, 0x0c, 0x5a, 0x8e, 0x19, 0xf4, 0x07, 0xb0, 0xb8,
0xee, 0x61, 0xdd, 0xc7, 0x1f, 0xd1, 0xa4, 0xb1, 0x7e, 0xa0, 0x3b, 0x0e, 0xb6, 0x82, 0x25, 0x24,
0x85, 0x2b, 0x12, 0xe1, 0x1d, 0x98, 0x1d, 0x79, 0xee, 0xf3, 0xc3, 0x50, 0xef, 0xa0, 0xa9, 0xfe,
0x5e, 0x81, 0xae, 0x8c, 0xf7, 0x69, 0xe2, 0xcb, 0x45, 0x68, 0x8a, 0xec, 0xc7, 0xb9, 0x31, 0x99,
0x35, 0xad, 0xf1, 0x24, 0x22, 0x01, 0xdd, 0x80, 0xb3, 0x7c, 0x90, 0x87, 0xc9, 0xd8, 0xf2, 0xc3,
0xb1, 0x45, 0x36, 0x16, 0xb1, 0x3e, 0x8d, 0x75, 0x89, 0x19, 0xea, 0x1f, 0x14, 0x58, 0xdc, 0xc4,
0x7e, 0x68, 0x44, 0x2a, 0x15, 0xbf, 0xa6, 0x21, 0xfb, 0x33, 0x05, 0xba, 0x32, 0x5d, 0x4f, 0xb3,
0xad, 0x0f, 0x61, 0x21, 0x94, 0xd1, 0x33, 0x30, 0x19, 0x78, 0xe6, 0x88, 0x39, 0x33, 0x0b, 0xe0,
0xf5, 0xd5, 0x8b, 0x2b, 0x69, 0x80, 0xb1, 0x92, 0xd4, 0xe0, 0x5c, 0xc8, 0x62, 0x23, 0xc2, 0x41,
0xfd, 0xb9, 0x02, 0xe7, 0x36, 0xb1, 0xbf, 0x8b, 0x87, 0x36, 0x76, 0xfc, 0x2d, 0x67, 0xdf, 0x3d,
0xf9, 0xbe, 0xbe, 0x0d, 0x40, 0x04, 0x9f, 0x30, 0xb9, 0x44, 0x28, 0x79, 0xf6, 0x98, 0x61, 0x99,
0xa4, 0x3e, 0xa7, 0xd9, 0xbb, 0xaf, 0x43, 0xd9, 0x74, 0xf6, 0xdd, 0x60, 0xab, 0xce, 0xcb, 0xb6,
0x2a, 0x2a, 0x8c, 0x8f, 0x56, 0x1d, 0xae, 0xc5, 0x81, 0xee, 0x19, 0x77, 0xb1, 0x6e, 0x60, 0xef,
0x14, 0xee, 0x96, 0x5c, 0x76, 0x41, 0xb2, 0xec, 0x9f, 0x29, 0xf0, 0x46, 0x4a, 0xe0, 0x69, 0xd6,
0xfd, 0x2d, 0xa8, 0x10, 0xca, 0x2c, 0x58, 0xf8, 0x25, 0xe9, 0xc2, 0x23, 0xe2, 0xee, 0x9a, 0xc4,
0xd7, 0xc4, 0x1c, 0xd5, 0x85, 0x76, 0xb2, 0x0f, 0x5d, 0x80, 0x86, 0x38, 0xaa, 0x3d, 0x47, 0xb7,
0xf9, 0x06, 0xd4, 0xb4, 0xba, 0xa0, 0x6d, 0xeb, 0x36, 0x46, 0x8b, 0x50, 0xa5, 0x81, 0xab, 0x67,
0x1a, 0x81, 0xf9, 0x67, 0x59, 0x20, 0x33, 0x08, 0x7a, 0x0b, 0x80, 0x75, 0xe9, 0x86, 0xe1, 0x71,
0x30, 0x51, 0xd3, 0x6a, 0x94, 0x72, 0x8b, 0x12, 0xd4, 0x7f, 0x17, 0x60, 0xe1, 0x96, 0x61, 0xc8,
0xc2, 0xdc, 0xf1, 0x37, 0x7c, 0x12, 0x4d, 0x0b, 0xd1, 0x68, 0x9a, 0xeb, 0x8c, 0xa7, 0x42, 0x58,
0xe9, 0x18, 0x21, 0xac, 0x9c, 0x15, 0xc2, 0xd0, 0x26, 0x34, 0x09, 0xc6, 0x8f, 0x7b, 0x23, 0x97,
0xb0, 0x33, 0xc8, 0x32, 0x56, 0x7d, 0x55, 0x8d, 0xaf, 0x26, 0xc4, 0xfd, 0xf7, 0xc8, 0x70, 0x47,
0x8c, 0xd4, 0x1a, 0x74, 0x62, 0xd0, 0x42, 0xf7, 0x61, 0x61, 0x68, 0xb9, 0x7d, 0xdd, 0xea, 0x11,
0xac, 0x5b, 0xd8, 0xe8, 0x89, 0xf3, 0x45, 0x3a, 0xb3, 0xf9, 0x1c, 0xfc, 0x2c, 0x9f, 0xbe, 0xcb,
0x66, 0x8b, 0x0e, 0xa2, 0xfe, 0x5d, 0x81, 0x45, 0x0d, 0xdb, 0xee, 0x53, 0xfc, 0xbf, 0x6a, 0x02,
0xf5, 0x97, 0x0a, 0x34, 0x28, 0x38, 0xba, 0x87, 0x7d, 0x9d, 0xee, 0x04, 0x7a, 0x1f, 0x6a, 0x96,
0xab, 0x1b, 0x3d, 0xff, 0x70, 0xc4, 0x97, 0xd6, 0x4a, 0x2e, 0x8d, 0xef, 0x1e, 0x9d, 0xb4, 0x77,
0x38, 0xc2, 0x5a, 0xd5, 0x12, 0x5f, 0x79, 0x8e, 0x74, 0x2a, 0x5b, 0x14, 0x65, 0x00, 0xbf, 0x08,
0x0b, 0xdf, 0xd3, 0xfd, 0xc1, 0xc1, 0x86, 0x2d, 0xd4, 0x24, 0xaf, 0x66, 0xcf, 0xf3, 0x80, 0x94,
0x30, 0x94, 0x96, 0x65, 0x9e, 0x46, 0x6f, 0xa5, 0x2b, 0x0f, 0x84, 0x19, 0x22, 0xa1, 0x34, 0x02,
0xf6, 0x2a, 0x27, 0x01, 0x7b, 0xeb, 0xd0, 0xc4, 0xcf, 0x07, 0xd6, 0x98, 0x86, 0x15, 0x26, 0x9d,
0xfb, 0xf9, 0xdb, 0x12, 0xe9, 0x51, 0x37, 0x6f, 0x88, 0x49, 0x5b, 0x42, 0x07, 0x6e, 0x6a, 0x1b,
0xfb, 0x7a, 0xa7, 0xca, 0xd4, 0x58, 0xca, 0x32, 0x75, 0xe0, 0x1f, 0xdc, 0xdc, 0xb4, 0xa5, 0xfe,
0x47, 0x81, 0x45, 0x6e, 0x26, 0x6c, 0xf9, 0xfa, 0xab, 0xb5, 0x54, 0x68, 0x85, 0xd2, 0x31, 0xad,
0x10, 0xd9, 0x81, 0xda, 0xb1, 0x77, 0xe0, 0xd3, 0x12, 0xcc, 0x89, 0xed, 0xa5, 0x23, 0xd8, 0xf9,
0x79, 0x13, 0x6a, 0x61, 0x72, 0x17, 0xe0, 0x73, 0x42, 0x40, 0x4b, 0x50, 0x8f, 0x78, 0x8f, 0x58,
0x68, 0x94, 0x94, 0x6b, 0xb5, 0x01, 0x54, 0x2b, 0x45, 0xa0, 0xda, 0x5b, 0x00, 0xfb, 0xd6, 0x98,
0x1c, 0xf4, 0x7c, 0xd3, 0xc6, 0x02, 0x30, 0xd7, 0x18, 0x65, 0xcf, 0xb4, 0x31, 0xba, 0x05, 0x8d,
0xbe, 0xe9, 0x58, 0xee, 0xb0, 0x37, 0xd2, 0xfd, 0x03, 0xd2, 0xa9, 0x64, 0xfa, 0xcb, 0x6d, 0x13,
0x5b, 0xc6, 0x1a, 0x1b, 0xab, 0xd5, 0xf9, 0x9c, 0x1d, 0x3a, 0x05, 0xbd, 0x0d, 0x75, 0x67, 0x6c,
0xf7, 0xdc, 0xfd, 0x9e, 0xe7, 0x3e, 0xa3, 0x1e, 0xc7, 0x44, 0x38, 0x63, 0xfb, 0xc3, 0x7d, 0xcd,
0x7d, 0x46, 0x93, 0x6b, 0x8d, 0xa6, 0x59, 0x62, 0xb9, 0x43, 0xd2, 0xa9, 0xe6, 0xe2, 0x3f, 0x99,
0x40, 0x67, 0x1b, 0xd4, 0x8f, 0xd8, 0xec, 0x5a, 0xbe, 0xd9, 0xe1, 0x04, 0x74, 0x05, 0x5a, 0x03,
0xd7, 0x1e, 0xe9, 0x6c, 0x87, 0x6e, 0x7b, 0xae, 0xdd, 0x01, 0x76, 0x56, 0x13, 0x54, 0xb4, 0x0e,
0x75, 0xd3, 0x31, 0xf0, 0x73, 0x71, 0x6a, 0xea, 0x4c, 0x8e, 0x2a, 0x33, 0x39, 0x13, 0xb4, 0x45,
0xc7, 0x32, 0xa3, 0x83, 0x19, 0x7c, 0x12, 0x9a, 0xf3, 0x85, 0x45, 0x7b, 0xc4, 0x7c, 0x81, 0x3b,
0x0d, 0x6e, 0x45, 0x41, 0xdb, 0x35, 0x5f, 0x60, 0xf5, 0x8f, 0x05, 0x68, 0xc5, 0x39, 0xd0, 0x4b,
0xc7, 0x3e, 0xa3, 0x04, 0x6e, 0x11, 0x34, 0x29, 0x3f, 0xec, 0xe8, 0x7d, 0x8b, 0x9e, 0x65, 0x03,
0x3f, 0x67, 0x5e, 0x51, 0xd5, 0xea, 0x9c, 0xc6, 0x18, 0x50, 0xeb, 0x72, 0xbd, 0x19, 0xc8, 0xe0,
0x97, 0x82, 0x1a, 0xa3, 0x30, 0x88, 0xd1, 0x81, 0x59, 0xae, 0x5f, 0xe0, 0x13, 0x41, 0x93, 0xf6,
0xf4, 0xc7, 0x26, 0x93, 0xca, 0x7d, 0x22, 0x68, 0xa2, 0x0d, 0x68, 0x70, 0x96, 0x23, 0xdd, 0xd3,
0xed, 0xc0, 0x23, 0x2e, 0x48, 0x0f, 0xea, 0x1d, 0x7c, 0xf8, 0x40, 0xb7, 0xc6, 0x78, 0x47, 0x37,
0x3d, 0x8d, 0xef, 0xe0, 0x0e, 0x9b, 0x85, 0x96, 0xa1, 0xcd, 0xb9, 0xec, 0x9b, 0x16, 0x16, 0xbe,
0x35, 0xcb, 0x70, 0x4c, 0x8b, 0xd1, 0x6f, 0x9b, 0x16, 0xe6, 0xee, 0x13, 0x2e, 0x81, 0xed, 0x59,
0x95, 0x7b, 0x0f, 0xa3, 0xb0, 0x1d, 0xfb, 0x67, 0x01, 0xe6, 0xe9, 0x21, 0x0a, 0x92, 0xef, 0xc9,
0xe3, 0xc8, 0x5b, 0x00, 0x06, 0xf1, 0x7b, 0xb1, 0x58, 0x52, 0x33, 0x88, 0xbf, 0xcd, 0xc3, 0xc9,
0xfb, 0x41, 0xa8, 0x28, 0x66, 0x5f, 0x13, 0x12, 0x87, 0x3a, 0x1d, 0xb4, 0x4f, 0xf4, 0x9c, 0x72,
0x11, 0x9a, 0xc4, 0x1d, 0x7b, 0x03, 0xdc, 0x8b, 0x5d, 0x6b, 0x1b, 0x9c, 0xb8, 0x2d, 0x8f, 0x76,
0x15, 0xe9, 0xb3, 0x4e, 0x24, 0x6c, 0xcd, 0x1e, 0x3b, 0x6c, 0xfd, 0x4d, 0x81, 0x05, 0xf1, 0x04,
0x70, 0xfa, 0xdd, 0xce, 0x8a, 0xda, 0x41, 0x8c, 0x2a, 0x1e, 0x71, 0x9d, 0x2c, 0xe5, 0xc8, 0xb9,
0x65, 0x49, 0xce, 0x8d, 0x5f, 0xa9, 0x2a, 0xc9, 0x2b, 0x95, 0xfa, 0xb1, 0x02, 0xcd, 0x5d, 0xac,
0x7b, 0x83, 0x83, 0x60, 0x5d, 0xdf, 0x80, 0xa2, 0x87, 0x9f, 0x88, 0x65, 0x5d, 0xca, 0xc0, 0x97,
0xb1, 0x29, 0x1a, 0x9d, 0x80, 0xce, 0x43, 0xdd, 0xb0, 0xad, 0xc4, 0xcd, 0x1d, 0x0c, 0xdb, 0x0a,
0x10, 0x57, 0x5c, 0x95, 0x62, 0x4a, 0x95, 0x4f, 0x14, 0x68, 0x7c, 0xc4, 0x61, 0x17, 0xd7, 0xe4,
0xbd, 0xa8, 0x26, 0x57, 0x32, 0x34, 0xd1, 0xb0, 0xef, 0x99, 0xf8, 0x29, 0xfe, 0x7c, 0x75, 0xf9,
0x85, 0x02, 0x0b, 0x1f, 0xe8, 0x8e, 0xe1, 0xee, 0xef, 0x9f, 0xde, 0xee, 0xeb, 0x61, 0x10, 0xdc,
0x3a, 0xce, 0x4d, 0x32, 0x36, 0x49, 0xfd, 0xb8, 0x00, 0x88, 0x3a, 0xe9, 0x9a, 0x6e, 0xe9, 0xce,
0x00, 0x9f, 0x5c, 0x9b, 0xcb, 0xd0, 0x8a, 0x1d, 0xad, 0xf0, 0x55, 0x3c, 0x7a, 0xb6, 0x08, 0xba,
0x03, 0xad, 0x3e, 0x17, 0xd5, 0xf3, 0xb0, 0x4e, 0x5c, 0x87, 0xb9, 0x67, 0x4b, 0x7e, 0x0f, 0xdc,
0xf3, 0xcc, 0xe1, 0x10, 0x7b, 0xeb, 0xae, 0x63, 0xf0, 0x3b, 0x47, 0xb3, 0x1f, 0xa8, 0x49, 0xa7,
0x32, 0x7b, 0x84, 0x71, 0x26, 0x00, 0x87, 0x10, 0x06, 0x1a, 0x82, 0xde, 0x81, 0x33, 0xf1, 0xeb,
0xc8, 0xc4, 0x9f, 0xdb, 0x24, 0x7a, 0xd3, 0xa0, 0xc6, 0xf9, 0x11, 0xa0, 0x10, 0xee, 0x32, 0x54,
0xc5, 0x92, 0x46, 0x9e, 0xd7, 0xac, 0x37, 0xa1, 0x66, 0x04, 0x33, 0x85, 0x57, 0x4c, 0x08, 0x34,
0xe8, 0x70, 0x0d, 0x7b, 0xf4, 0xfc, 0x63, 0x23, 0x00, 0x14, 0x9c, 0x78, 0x97, 0xd1, 0xd4, 0xcf,
0x0a, 0xd0, 0x8e, 0x5e, 0x71, 0x72, 0xcb, 0x7e, 0x39, 0x6f, 0x5b, 0x47, 0xdc, 0xe7, 0x4a, 0xa7,
0xb8, 0xcf, 0xa5, 0xef, 0x9b, 0xe5, 0x93, 0xdd, 0x37, 0xd5, 0x5f, 0x2b, 0x30, 0x97, 0x78, 0x4a,
0x4a, 0x42, 0x3b, 0x25, 0x0d, 0xed, 0xde, 0x83, 0x32, 0xc5, 0x3b, 0x98, 0x6d, 0x52, 0x4b, 0x0e,
0x3b, 0xe2, 0x5c, 0x35, 0x3e, 0x01, 0x5d, 0x87, 0x79, 0x49, 0xf9, 0x41, 0x98, 0x12, 0xa5, 0xab,
0x0f, 0xea, 0x9f, 0x4a, 0x50, 0x8f, 0xec, 0xc7, 0x14, 0x54, 0x9a, 0xe7, 0xe2, 0x96, 0x58, 0x5e,
0x31, 0xbd, 0xbc, 0x8c, 0xf7, 0x77, 0xb4, 0x08, 0x55, 0x1b, 0xdb, 0x3c, 0xed, 0x0b, 0x0c, 0x62,
0x63, 0x9b, 0x26, 0x7d, 0xf6, 0x34, 0x32, 0xb6, 0x39, 0x9e, 0xe4, 0x89, 0x6e, 0xd6, 0x19, 0xdb,
0x0c, 0x4d, 0xc6, 0x11, 0xcf, 0xec, 0x11, 0x88, 0xa7, 0x1a, 0x47, 0x3c, 0xb1, 0xe3, 0x50, 0x4b,
0x1e, 0x87, 0xbc, 0x40, 0xf1, 0x06, 0xcc, 0x0f, 0xd8, 0x3b, 0xb0, 0xb1, 0x76, 0xb8, 0x1e, 0x76,
0x75, 0xea, 0x0c, 0x9a, 0xc9, 0xba, 0xd0, 0x6d, 0xea, 0x5c, 0x02, 0x15, 0x32, 0x2b, 0x37, 0x98,
0x95, 0xe5, 0x80, 0x4a, 0xd8, 0x86, 0x1b, 0x39, 0x88, 0x89, 0xac, 0x95, 0x84, 0xa8, 0xcd, 0x13,
0x41, 0xd4, 0xf3, 0x50, 0x0f, 0x8a, 0x01, 0xa6, 0x41, 0x3a, 0x2d, 0x1e, 0x9b, 0x04, 0x69, 0xcb,
0x20, 0xb1, 0x47, 0xa9, 0xb9, 0xd8, 0xa3, 0x94, 0xfa, 0x97, 0x22, 0xb4, 0x26, 0x18, 0x26, 0x77,
0x28, 0xc8, 0x53, 0x46, 0xdb, 0x86, 0xf6, 0xe4, 0xcd, 0x96, 0xed, 0xd2, 0x91, 0x30, 0x2c, 0xf9,
0x5a, 0x3b, 0x37, 0x4a, 0x9c, 0xb9, 0xd8, 0x63, 0x45, 0xe9, 0x58, 0x8f, 0x15, 0xa7, 0xac, 0xb6,
0xdc, 0x84, 0x73, 0x1e, 0x87, 0x50, 0x46, 0x2f, 0xb6, 0x6c, 0x8e, 0x46, 0xce, 0x06, 0x9d, 0x3b,
0xd1, 0xe5, 0x67, 0x1c, 0xe3, 0xd9, 0xac, 0x63, 0x9c, 0x34, 0x63, 0x35, 0x65, 0xc6, 0x74, 0xd1,
0xa7, 0x26, 0x2b, 0xfa, 0xdc, 0x87, 0xf9, 0xfb, 0x0e, 0x19, 0xf7, 0xc9, 0xc0, 0x33, 0xfb, 0x38,
0xb8, 0xaa, 0xe7, 0x32, 0x6b, 0x17, 0xaa, 0x22, 0x5e, 0x73, 0x93, 0xd6, 0xb4, 0xb0, 0xad, 0xfe,
0x44, 0x81, 0x85, 0x34, 0x5f, 0xe6, 0x31, 0x93, 0x60, 0xa0, 0xc4, 0x82, 0xc1, 0xf7, 0x61, 0x7e,
0xc2, 0xbe, 0x17, 0xe3, 0x5c, 0x5f, 0xbd, 0x2a, 0xb3, 0x9d, 0x44, 0x71, 0x0d, 0x4d, 0x78, 0x04,
0x34, 0xf5, 0x5f, 0x0a, 0x9c, 0x11, 0xc7, 0x8a, 0xd2, 0x86, 0xec, 0x91, 0x83, 0x26, 0x28, 0xd7,
0xb1, 0x4c, 0x27, 0xc4, 0xdc, 0x62, 0x8d, 0x9c, 0x28, 0x30, 0xf7, 0x07, 0x30, 0x27, 0x06, 0x85,
0x79, 0x26, 0x27, 0x9c, 0x69, 0xf1, 0x79, 0x61, 0x86, 0xb9, 0x0c, 0x2d, 0x77, 0x7f, 0x3f, 0x2a,
0x8f, 0x07, 0xca, 0xa6, 0xa0, 0x0a, 0x81, 0xdf, 0x85, 0x76, 0x30, 0xec, 0xb8, 0x99, 0x6d, 0x4e,
0x4c, 0x0c, 0x1f, 0x29, 0x3f, 0x51, 0xa0, 0x13, 0xcf, 0x73, 0x91, 0xe5, 0x1f, 0x1f, 0x49, 0x7d,
0x33, 0x5e, 0x1a, 0xb8, 0x7c, 0x84, 0x3e, 0x13, 0x39, 0xe2, 0x82, 0x74, 0xed, 0x05, 0xb4, 0xe2,
0x67, 0x16, 0x35, 0xa0, 0xba, 0xed, 0xfa, 0xdf, 0x79, 0x6e, 0x12, 0xbf, 0x3d, 0x83, 0x5a, 0x00,
0xdb, 0xae, 0xbf, 0xe3, 0x61, 0x82, 0x1d, 0xbf, 0xad, 0x20, 0x80, 0xca, 0x87, 0xce, 0x86, 0x49,
0x1e, 0xb7, 0x0b, 0x68, 0x5e, 0xa4, 0x54, 0xdd, 0xda, 0x12, 0x07, 0xa1, 0x5d, 0xa4, 0xd3, 0xc3,
0x56, 0x09, 0xb5, 0xa1, 0x11, 0x0e, 0xd9, 0xdc, 0xb9, 0xdf, 0x2e, 0xa3, 0x1a, 0x94, 0xf9, 0x67,
0xe5, 0x9a, 0x01, 0xed, 0x24, 0x62, 0xa3, 0x3c, 0xef, 0x3b, 0x77, 0x1c, 0xf7, 0x59, 0x48, 0x6a,
0xcf, 0xa0, 0x3a, 0xcc, 0x0a, 0x14, 0xdc, 0x56, 0xd0, 0x1c, 0xd4, 0x23, 0x00, 0xb4, 0x5d, 0xa0,
0x84, 0x4d, 0x6f, 0x34, 0x10, 0x50, 0x94, 0xab, 0x40, 0xad, 0xb6, 0xe1, 0x3e, 0x73, 0xda, 0xa5,
0x6b, 0x6b, 0x50, 0x0d, 0x82, 0x09, 0x1d, 0xca, 0xb9, 0x3b, 0xb4, 0xd9, 0x9e, 0x41, 0x67, 0xa0,
0x19, 0x2b, 0x34, 0xb7, 0x15, 0x84, 0xa0, 0x15, 0xff, 0x09, 0xa0, 0x5d, 0x58, 0xfd, 0xb4, 0x09,
0xc0, 0xd1, 0x96, 0xeb, 0x7a, 0x06, 0x1a, 0x01, 0xda, 0xc4, 0x3e, 0xcd, 0x24, 0xae, 0x13, 0x64,
0x01, 0x82, 0x6e, 0x64, 0x80, 0x92, 0xf4, 0x50, 0xa1, 0x6a, 0x37, 0xeb, 0x32, 0x91, 0x18, 0xae,
0xce, 0x20, 0x9b, 0x49, 0xdc, 0x33, 0x6d, 0xbc, 0x67, 0x0e, 0x1e, 0x87, 0x30, 0x2d, 0x5b, 0x62,
0x62, 0x68, 0x20, 0x31, 0x11, 0xb4, 0x45, 0x63, 0xd7, 0xf7, 0x4c, 0x67, 0x18, 0x14, 0x6a, 0xd4,
0x19, 0xf4, 0x04, 0xce, 0x6e, 0x62, 0x26, 0xdd, 0x24, 0xbe, 0x39, 0x20, 0x81, 0xc0, 0xd5, 0x6c,
0x81, 0xa9, 0xc1, 0xc7, 0x14, 0x69, 0xc1, 0x5c, 0xe2, 0xa7, 0x1b, 0x74, 0x4d, 0x5e, 0xeb, 0x91,
0xfd, 0x20, 0xd4, 0x7d, 0x27, 0xd7, 0xd8, 0x50, 0x9a, 0x09, 0xad, 0xf8, 0x0f, 0x29, 0xe8, 0xcb,
0x59, 0x0c, 0x52, 0x35, 0xf7, 0xee, 0xb5, 0x3c, 0x43, 0x43, 0x51, 0x0f, 0xb9, 0x3f, 0x4d, 0x13,
0x25, 0xfd, 0xdf, 0xa1, 0x7b, 0x54, 0x8d, 0x4c, 0x9d, 0x41, 0x3f, 0x84, 0x33, 0xa9, 0x3f, 0x03,
0xd0, 0x57, 0x64, 0xec, 0xb3, 0x7e, 0x20, 0x98, 0x26, 0xe1, 0x61, 0xf2, 0x34, 0x64, 0x6b, 0x9f,
0xfa, 0x93, 0x24, 0xbf, 0xf6, 0x11, 0xf6, 0x47, 0x69, 0x7f, 0x6c, 0x09, 0x63, 0x40, 0xe9, 0x7f,
0x03, 0xd0, 0xbb, 0x32, 0x11, 0x99, 0xff, 0x27, 0x74, 0x57, 0xf2, 0x0e, 0x0f, 0x4d, 0x3e, 0x66,
0xa7, 0x35, 0x79, 0xdd, 0x90, 0x8a, 0xcd, 0xfc, 0x1f, 0x40, 0x2e, 0x36, 0xbb, 0x24, 0xcf, 0x9d,
0x3a, 0x5e, 0x72, 0x96, 0xdb, 0x4a, 0x5a, 0x26, 0x97, 0x3b, 0xb5, 0xbc, 0x82, 0xad, 0xce, 0xa0,
0xbd, 0x58, 0x10, 0x46, 0x57, 0xb2, 0x7c, 0x22, 0xfe, 0x4c, 0x30, 0xcd, 0x5c, 0x3d, 0x80, 0x4d,
0xec, 0xdf, 0xc3, 0xbe, 0x67, 0x0e, 0x48, 0x92, 0xa9, 0x68, 0x4c, 0x06, 0x04, 0x4c, 0xaf, 0x4e,
0x1d, 0x17, 0xaa, 0xdd, 0x87, 0xfa, 0x26, 0xf6, 0x35, 0x8e, 0xb4, 0x08, 0xca, 0x9c, 0x19, 0x8c,
0x08, 0x44, 0x2c, 0x4f, 0x1f, 0x18, 0x0d, 0x64, 0x89, 0x0a, 0x38, 0xca, 0xdc, 0xdb, 0x74, 0x5d,
0x5e, 0x1e, 0xc8, 0x32, 0x4a, 0xea, 0xea, 0xcc, 0xea, 0x5f, 0xeb, 0x50, 0x63, 0x5e, 0x48, 0x33,
0xde, 0xff, 0x13, 0xd3, 0x4b, 0x48, 0x4c, 0x8f, 0x60, 0x2e, 0x51, 0xd1, 0x97, 0xdb, 0x53, 0x5e,
0xf6, 0x9f, 0xe6, 0xf2, 0x7d, 0x40, 0xe9, 0x7a, 0xb5, 0x3c, 0x54, 0x64, 0xd6, 0xb5, 0xa7, 0xc9,
0x78, 0x04, 0x73, 0x89, 0xe2, 0xac, 0x7c, 0x05, 0xf2, 0x0a, 0x6e, 0x8e, 0x15, 0xa4, 0x6b, 0x8a,
0xf2, 0x15, 0x64, 0xd6, 0x1e, 0xa7, 0xc9, 0x78, 0xc0, 0x4b, 0xde, 0x21, 0x68, 0xbf, 0x9a, 0x15,
0x6f, 0x12, 0xaf, 0xa4, 0xaf, 0x3e, 0x03, 0xbd, 0xfc, 0x0c, 0xfd, 0x08, 0xe6, 0x12, 0xa5, 0x01,
0xb9, 0x75, 0xe5, 0xf5, 0x83, 0x69, 0xdc, 0xbf, 0xc0, 0x9c, 0xb2, 0x0b, 0x15, 0xfe, 0x9e, 0x8f,
0x2e, 0xc8, 0xaf, 0x30, 0x91, 0xb7, 0xfe, 0xee, 0xb4, 0x8a, 0x00, 0x19, 0x5b, 0x3e, 0x61, 0x4c,
0xcb, 0xec, 0xc4, 0x20, 0x69, 0xb9, 0x25, 0xfa, 0xce, 0xdf, 0x9d, 0xfe, 0xb4, 0x1f, 0x30, 0x7d,
0xd9, 0x79, 0x6a, 0xed, 0x6b, 0x0f, 0x57, 0x87, 0xa6, 0x7f, 0x30, 0xee, 0x53, 0x7b, 0x5c, 0xe7,
0x23, 0xdf, 0x35, 0x5d, 0xf1, 0x75, 0x3d, 0x50, 0xed, 0x3a, 0xe3, 0x74, 0x9d, 0xad, 0x65, 0xd4,
0xef, 0x57, 0x58, 0xf3, 0xe6, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe5, 0x45, 0x22, 0x53, 0xbb,
0x2f, 0x00, 0x00,
// 2755 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x5b, 0x6f, 0x1c, 0x49,
0x15, 0x76, 0xcf, 0xcd, 0x33, 0x67, 0x2e, 0x9e, 0x94, 0x13, 0xef, 0x78, 0xc8, 0xc5, 0xe9, 0xdc,
0x4c, 0x96, 0x75, 0x82, 0x03, 0x68, 0x57, 0xc0, 0x43, 0x6c, 0x13, 0xaf, 0x49, 0xe2, 0xf5, 0xb6,
0x9d, 0x00, 0x51, 0xa4, 0xa1, 0x67, 0xba, 0x3c, 0x6e, 0xa5, 0x2f, 0x93, 0xae, 0x9e, 0x24, 0xce,
0x33, 0x42, 0x2c, 0x17, 0x21, 0x9e, 0x10, 0x12, 0xe2, 0x09, 0x04, 0x48, 0xac, 0x78, 0xe6, 0x05,
0x21, 0x7e, 0x01, 0x12, 0xef, 0x88, 0x37, 0x7e, 0x01, 0x8f, 0x08, 0x54, 0x97, 0xee, 0xe9, 0x4b,
0xb5, 0x67, 0xec, 0xd9, 0x6c, 0x22, 0xb4, 0x6f, 0x5d, 0xa7, 0x4f, 0xd5, 0x39, 0xa7, 0xce, 0xa9,
0x53, 0x5f, 0xd5, 0x29, 0x38, 0xf5, 0x74, 0x88, 0xbd, 0xc3, 0x4e, 0xcf, 0x75, 0x3d, 0x63, 0x65,
0xe0, 0xb9, 0xbe, 0x8b, 0x90, 0x6d, 0x5a, 0xcf, 0x86, 0x84, 0xb7, 0x56, 0xd8, 0xff, 0x76, 0xad,
0xe7, 0xda, 0xb6, 0xeb, 0x70, 0x5a, 0xbb, 0x16, 0xe5, 0x68, 0x37, 0x4c, 0xc7, 0xc7, 0x9e, 0xa3,
0x5b, 0xc1, 0x5f, 0xd2, 0x3b, 0xc0, 0xb6, 0x2e, 0x5a, 0x4d, 0x43, 0xf7, 0xf5, 0xe8, 0xf8, 0xea,
0xf7, 0x14, 0x58, 0xd8, 0x3d, 0x70, 0x9f, 0xaf, 0xbb, 0x96, 0x85, 0x7b, 0xbe, 0xe9, 0x3a, 0x44,
0xc3, 0x4f, 0x87, 0x98, 0xf8, 0xe8, 0x26, 0x14, 0xba, 0x3a, 0xc1, 0x2d, 0x65, 0x49, 0x59, 0xae,
0xae, 0x9e, 0x5d, 0x89, 0x69, 0x22, 0x54, 0xb8, 0x4f, 0xfa, 0x6b, 0x3a, 0xc1, 0x1a, 0xe3, 0x44,
0x08, 0x0a, 0x46, 0x77, 0x6b, 0xa3, 0x95, 0x5b, 0x52, 0x96, 0xf3, 0x1a, 0xfb, 0x46, 0x97, 0xa1,
0xde, 0x0b, 0xc7, 0xde, 0xda, 0x20, 0xad, 0xfc, 0x52, 0x7e, 0x39, 0xaf, 0xc5, 0x89, 0xea, 0x6f,
0x15, 0x78, 0x2b, 0xa5, 0x06, 0x19, 0xb8, 0x0e, 0xc1, 0xe8, 0x16, 0x94, 0x88, 0xaf, 0xfb, 0x43,
0x22, 0x34, 0xf9, 0x9c, 0x54, 0x93, 0x5d, 0xc6, 0xa2, 0x09, 0xd6, 0xb4, 0xd8, 0x9c, 0x44, 0x2c,
0xfa, 0x22, 0x9c, 0x36, 0x9d, 0xfb, 0xd8, 0x76, 0xbd, 0xc3, 0xce, 0x00, 0x7b, 0x3d, 0xec, 0xf8,
0x7a, 0x1f, 0x07, 0x3a, 0xce, 0x07, 0xff, 0x76, 0x46, 0xbf, 0xd4, 0xdf, 0x28, 0x70, 0x86, 0x6a,
0xba, 0xa3, 0x7b, 0xbe, 0xf9, 0x0a, 0xe6, 0x4b, 0x85, 0x5a, 0x54, 0xc7, 0x56, 0x9e, 0xfd, 0x8b,
0xd1, 0x28, 0xcf, 0x20, 0x10, 0x4f, 0x6d, 0x2b, 0x30, 0x75, 0x63, 0x34, 0xf5, 0xd7, 0xc2, 0xb1,
0x51, 0x3d, 0xa7, 0x99, 0xd0, 0xa4, 0xcc, 0x5c, 0x5a, 0xe6, 0x49, 0xa6, 0xf3, 0x5f, 0x0a, 0x9c,
0xb9, 0xe7, 0xea, 0xc6, 0xc8, 0xf1, 0x9f, 0xfe, 0x74, 0x7e, 0x1d, 0x4a, 0x7c, 0x95, 0xb4, 0x0a,
0x4c, 0xd6, 0x95, 0xb8, 0x2c, 0xb1, 0x82, 0x46, 0x1a, 0xee, 0x32, 0x82, 0x26, 0x3a, 0xa1, 0x2b,
0xd0, 0xf0, 0xf0, 0xc0, 0x32, 0x7b, 0x7a, 0xc7, 0x19, 0xda, 0x5d, 0xec, 0xb5, 0x8a, 0x4b, 0xca,
0x72, 0x51, 0xab, 0x0b, 0xea, 0x36, 0x23, 0xaa, 0xbf, 0x54, 0xa0, 0xa5, 0x61, 0x0b, 0xeb, 0x04,
0xbf, 0x4e, 0x63, 0x17, 0xa0, 0xe4, 0xb8, 0x06, 0xde, 0xda, 0x60, 0xc6, 0xe6, 0x35, 0xd1, 0x52,
0x7f, 0x94, 0xe3, 0x8e, 0x78, 0xc3, 0xe3, 0x3a, 0xe2, 0xac, 0xe2, 0x27, 0xe3, 0xac, 0x92, 0xcc,
0x59, 0x7f, 0x19, 0x39, 0xeb, 0x4d, 0x9f, 0x90, 0x91, 0x43, 0x8b, 0x31, 0x87, 0x7e, 0x07, 0x16,
0xd7, 0x3d, 0xac, 0xfb, 0xf8, 0x43, 0xba, 0x69, 0xac, 0x1f, 0xe8, 0x8e, 0x83, 0xad, 0xc0, 0x84,
0xa4, 0x70, 0x45, 0x22, 0xbc, 0x05, 0xb3, 0x03, 0xcf, 0x7d, 0x71, 0x18, 0xea, 0x1d, 0x34, 0xd5,
0xdf, 0x29, 0xd0, 0x96, 0x8d, 0x3d, 0x4d, 0x7e, 0xb9, 0x04, 0x75, 0xb1, 0xfb, 0xf1, 0xd1, 0x98,
0xcc, 0x8a, 0x56, 0x7b, 0x1a, 0x91, 0x80, 0x6e, 0xc2, 0x69, 0xce, 0xe4, 0x61, 0x32, 0xb4, 0xfc,
0x90, 0x37, 0xcf, 0x78, 0x11, 0xfb, 0xa7, 0xb1, 0x5f, 0xa2, 0x87, 0xfa, 0x7b, 0x05, 0x16, 0x37,
0xb1, 0x1f, 0x3a, 0x91, 0x4a, 0xc5, 0x6f, 0x68, 0xca, 0xfe, 0x58, 0x81, 0xb6, 0x4c, 0xd7, 0x69,
0xa6, 0xf5, 0x11, 0x2c, 0x84, 0x32, 0x3a, 0x06, 0x26, 0x3d, 0xcf, 0x1c, 0xb0, 0x60, 0x66, 0x09,
0xbc, 0xba, 0x7a, 0x69, 0x25, 0x0d, 0x30, 0x56, 0x92, 0x1a, 0x9c, 0x09, 0x87, 0xd8, 0x88, 0x8c,
0xa0, 0xfe, 0x44, 0x81, 0x33, 0x9b, 0xd8, 0xdf, 0xc5, 0x7d, 0x1b, 0x3b, 0xfe, 0x96, 0xb3, 0xef,
0x9e, 0x7c, 0x5e, 0xcf, 0x03, 0x10, 0x31, 0x4e, 0xb8, 0xb9, 0x44, 0x28, 0x93, 0xcc, 0x31, 0xc3,
0x32, 0x49, 0x7d, 0xa6, 0x99, 0xbb, 0x2f, 0x43, 0xd1, 0x74, 0xf6, 0xdd, 0x60, 0xaa, 0x2e, 0xc8,
0xa6, 0x2a, 0x2a, 0x8c, 0x73, 0xab, 0x0e, 0xd7, 0xe2, 0x40, 0xf7, 0x8c, 0x7b, 0x58, 0x37, 0xb0,
0x37, 0x45, 0xb8, 0x25, 0xcd, 0xce, 0x49, 0xcc, 0xfe, 0xb1, 0x02, 0x6f, 0xa5, 0x04, 0x4e, 0x63,
0xf7, 0xd7, 0xa0, 0x44, 0xe8, 0x60, 0x81, 0xe1, 0x97, 0xa5, 0x86, 0x47, 0xc4, 0xdd, 0x33, 0x89,
0xaf, 0x89, 0x3e, 0xaa, 0x0b, 0xcd, 0xe4, 0x3f, 0x74, 0x11, 0x6a, 0x62, 0xa9, 0x76, 0x1c, 0xdd,
0xe6, 0x13, 0x50, 0xd1, 0xaa, 0x82, 0xb6, 0xad, 0xdb, 0x18, 0x2d, 0x42, 0x99, 0x26, 0xae, 0x8e,
0x69, 0x04, 0xee, 0x9f, 0x65, 0x89, 0xcc, 0x20, 0xe8, 0x1c, 0x00, 0xfb, 0xa5, 0x1b, 0x86, 0xc7,
0xc1, 0x44, 0x45, 0xab, 0x50, 0xca, 0x6d, 0x4a, 0x50, 0xff, 0x93, 0x83, 0x85, 0xdb, 0x86, 0x21,
0x4b, 0x73, 0xc7, 0x9f, 0xf0, 0x51, 0x36, 0xcd, 0x45, 0xb3, 0xe9, 0x44, 0x6b, 0x3c, 0x95, 0xc2,
0x0a, 0xc7, 0x48, 0x61, 0xc5, 0xac, 0x14, 0x86, 0x36, 0xa1, 0x4e, 0x30, 0x7e, 0xd2, 0x19, 0xb8,
0x84, 0xad, 0x41, 0xb6, 0x63, 0x55, 0x57, 0xd5, 0xb8, 0x35, 0x21, 0xee, 0xbf, 0x4f, 0xfa, 0x3b,
0x82, 0x53, 0xab, 0xd1, 0x8e, 0x41, 0x0b, 0x3d, 0x80, 0x85, 0xbe, 0xe5, 0x76, 0x75, 0xab, 0x43,
0xb0, 0x6e, 0x61, 0xa3, 0x23, 0xd6, 0x17, 0x69, 0xcd, 0x4e, 0x16, 0xe0, 0xa7, 0x79, 0xf7, 0x5d,
0xd6, 0x5b, 0xfc, 0x20, 0xea, 0x3f, 0x15, 0x58, 0xd4, 0xb0, 0xed, 0x3e, 0xc3, 0xff, 0xaf, 0x2e,
0x50, 0x7f, 0xa6, 0x40, 0x8d, 0x82, 0xa3, 0xfb, 0xd8, 0xd7, 0xe9, 0x4c, 0xa0, 0xf7, 0xa0, 0x62,
0xb9, 0xba, 0xd1, 0xf1, 0x0f, 0x07, 0xdc, 0xb4, 0x46, 0xd2, 0x34, 0x3e, 0x7b, 0xb4, 0xd3, 0xde,
0xe1, 0x00, 0x6b, 0x65, 0x4b, 0x7c, 0x4d, 0xb2, 0xa4, 0x53, 0xbb, 0x45, 0x5e, 0xb2, 0x5b, 0xfc,
0x35, 0x0f, 0x0b, 0xdf, 0xd2, 0xfd, 0xde, 0xc1, 0x86, 0x2d, 0xd4, 0x24, 0xaf, 0x67, 0xce, 0x27,
0x01, 0x29, 0x61, 0x2a, 0x2d, 0xca, 0x22, 0x8d, 0x9e, 0x4a, 0x57, 0x1e, 0x0a, 0x37, 0x44, 0x52,
0x69, 0x04, 0xec, 0x95, 0x4e, 0x02, 0xf6, 0xd6, 0xa1, 0x8e, 0x5f, 0xf4, 0xac, 0x21, 0x4d, 0x2b,
0x4c, 0x3a, 0x8f, 0xf3, 0xf3, 0x12, 0xe9, 0xd1, 0x30, 0xaf, 0x89, 0x4e, 0x5b, 0x42, 0x07, 0xee,
0x6a, 0x1b, 0xfb, 0x7a, 0xab, 0xcc, 0xd4, 0x58, 0xca, 0x72, 0x75, 0x10, 0x1f, 0xdc, 0xdd, 0xb4,
0x85, 0xce, 0x42, 0x45, 0x40, 0xcb, 0xad, 0x8d, 0x56, 0x85, 0x4d, 0xdf, 0x88, 0xa0, 0xfe, 0x57,
0x81, 0x45, 0xee, 0x44, 0x6c, 0xf9, 0xfa, 0xeb, 0xf5, 0x63, 0xe8, 0xa3, 0xc2, 0x31, 0x7d, 0x14,
0x99, 0x9f, 0xca, 0x71, 0xe7, 0x47, 0xfd, 0x73, 0x01, 0xe6, 0xc4, 0xe4, 0x53, 0x0e, 0xb6, 0xba,
0xce, 0x42, 0x25, 0xdc, 0xfa, 0x05, 0x34, 0x1d, 0x11, 0xd0, 0x12, 0x54, 0x23, 0xb1, 0x25, 0x0c,
0x8d, 0x92, 0x26, 0xb2, 0x36, 0x00, 0x72, 0x85, 0x08, 0x90, 0x3b, 0x07, 0xb0, 0x6f, 0x0d, 0xc9,
0x41, 0xc7, 0x37, 0x6d, 0x2c, 0xe0, 0x74, 0x85, 0x51, 0xf6, 0x4c, 0x1b, 0xa3, 0xdb, 0x50, 0xeb,
0x9a, 0x8e, 0xe5, 0xf6, 0x3b, 0x03, 0xdd, 0x3f, 0x20, 0xad, 0x52, 0x66, 0x34, 0xdd, 0x31, 0xb1,
0x65, 0xac, 0x31, 0x5e, 0xad, 0xca, 0xfb, 0xec, 0xd0, 0x2e, 0xe8, 0x3c, 0x54, 0x9d, 0xa1, 0xdd,
0x71, 0xf7, 0x3b, 0x9e, 0xfb, 0x9c, 0xc6, 0x23, 0x13, 0xe1, 0x0c, 0xed, 0x0f, 0xf6, 0x35, 0xf7,
0x39, 0xdd, 0x7a, 0x2b, 0x74, 0x13, 0x26, 0x96, 0xdb, 0x27, 0xad, 0xf2, 0x44, 0xe3, 0x8f, 0x3a,
0xd0, 0xde, 0x06, 0x8d, 0x23, 0xd6, 0xbb, 0x32, 0x59, 0xef, 0xb0, 0x03, 0xba, 0x0a, 0x8d, 0x9e,
0x6b, 0x0f, 0x74, 0x36, 0x43, 0x77, 0x3c, 0xd7, 0x6e, 0x01, 0x5b, 0xc9, 0x09, 0x2a, 0x5a, 0x87,
0xaa, 0xe9, 0x18, 0xf8, 0x85, 0x58, 0x53, 0x55, 0x26, 0x47, 0x95, 0xb9, 0x9c, 0x09, 0xda, 0xa2,
0xbc, 0xcc, 0xe9, 0x60, 0x06, 0x9f, 0x84, 0x22, 0x02, 0xe1, 0xd1, 0x0e, 0x31, 0x5f, 0xe2, 0x56,
0x8d, 0x7b, 0x51, 0xd0, 0x76, 0xcd, 0x97, 0x98, 0x1e, 0xd5, 0x4c, 0x87, 0x60, 0x6f, 0x94, 0xa0,
0xeb, 0x2c, 0x41, 0xd7, 0x39, 0x35, 0xc8, 0xcd, 0x7f, 0xcc, 0x41, 0x23, 0x2e, 0x88, 0x9e, 0x5c,
0xf6, 0x19, 0x25, 0x88, 0x9e, 0xa0, 0x49, 0xc5, 0x62, 0x47, 0xef, 0x5a, 0x34, 0x21, 0x18, 0xf8,
0x05, 0x0b, 0x9e, 0xb2, 0x56, 0xe5, 0x34, 0x36, 0x00, 0x0d, 0x02, 0x6e, 0x1e, 0x43, 0x2a, 0xfc,
0x64, 0x51, 0x61, 0x14, 0x86, 0x53, 0x5a, 0x30, 0xcb, 0xcd, 0x08, 0x42, 0x27, 0x68, 0xd2, 0x3f,
0xdd, 0xa1, 0xc9, 0xa4, 0xf2, 0xd0, 0x09, 0x9a, 0x68, 0x03, 0x6a, 0x7c, 0xc8, 0x81, 0xee, 0xe9,
0x76, 0x10, 0x38, 0x17, 0xa5, 0xeb, 0xf9, 0x2e, 0x3e, 0x7c, 0xa8, 0x5b, 0x43, 0xbc, 0xa3, 0x9b,
0x9e, 0xc6, 0x27, 0x7a, 0x87, 0xf5, 0x42, 0xcb, 0xd0, 0xe4, 0xa3, 0xec, 0x9b, 0x16, 0x16, 0x21,
0x38, 0xcb, 0xc0, 0x50, 0x83, 0xd1, 0xef, 0x98, 0x16, 0xe6, 0x51, 0x16, 0x9a, 0xc0, 0xa6, 0xb6,
0xcc, 0x83, 0x8c, 0x51, 0xe8, 0xc4, 0xaa, 0xdf, 0xcf, 0xc3, 0x3c, 0x5d, 0x6b, 0xc1, 0x0e, 0x7e,
0xf2, 0x74, 0x73, 0x0e, 0xc0, 0x20, 0x7e, 0x27, 0x96, 0x72, 0x2a, 0x06, 0xf1, 0xb7, 0x79, 0xd6,
0x79, 0x2f, 0xc8, 0x28, 0xf9, 0xec, 0xb3, 0x46, 0x62, 0xed, 0xa7, 0x33, 0xff, 0x89, 0xee, 0x64,
0x2e, 0x41, 0x9d, 0xb8, 0x43, 0xaf, 0x87, 0x3b, 0xb1, 0xb3, 0x71, 0x8d, 0x13, 0xb7, 0xe5, 0x49,
0xb1, 0x24, 0xbd, 0x1b, 0x8a, 0x64, 0xb7, 0xd9, 0xe9, 0xb2, 0x7f, 0x39, 0x99, 0xfd, 0xff, 0xa1,
0xc0, 0x82, 0xb8, 0x65, 0x98, 0xde, 0x17, 0x59, 0xa9, 0x3f, 0x48, 0x74, 0xf9, 0x23, 0x4e, 0xac,
0x85, 0x09, 0xb6, 0xf5, 0xa2, 0x64, 0x5b, 0x8f, 0x9f, 0xda, 0x4a, 0xc9, 0x53, 0x9b, 0xfa, 0x03,
0x05, 0xea, 0xbb, 0x58, 0xf7, 0x7a, 0x07, 0x81, 0x5d, 0x5f, 0x81, 0xbc, 0x87, 0x9f, 0x0a, 0xb3,
0x2e, 0x67, 0x40, 0xd8, 0x58, 0x17, 0x8d, 0x76, 0x40, 0x17, 0xa0, 0x6a, 0xd8, 0x56, 0xe2, 0x72,
0x00, 0x0c, 0xdb, 0x0a, 0x40, 0x5d, 0x5c, 0x95, 0x7c, 0x4a, 0x95, 0x8f, 0x14, 0xa8, 0x7d, 0xc8,
0x91, 0x1d, 0xd7, 0xe4, 0xdd, 0xa8, 0x26, 0x57, 0x33, 0x34, 0xd1, 0xb0, 0xef, 0x99, 0xf8, 0x19,
0xfe, 0x64, 0x75, 0xf9, 0xa9, 0x02, 0x0b, 0xef, 0xeb, 0x8e, 0xe1, 0xee, 0xef, 0x4f, 0xef, 0xf7,
0xf5, 0x30, 0x93, 0x6e, 0x1d, 0xe7, 0xb0, 0x1a, 0xeb, 0xa4, 0xfe, 0x21, 0x07, 0x88, 0x86, 0xf0,
0x9a, 0x6e, 0xe9, 0x4e, 0x0f, 0x9f, 0x5c, 0x9b, 0x2b, 0xd0, 0x88, 0x2d, 0xbc, 0xf0, 0xe2, 0x3d,
0xba, 0xf2, 0x08, 0xba, 0x0b, 0x8d, 0x2e, 0x17, 0xd5, 0xf1, 0xb0, 0x4e, 0x5c, 0x87, 0x85, 0x67,
0x43, 0x7e, 0xd4, 0xdc, 0xf3, 0xcc, 0x7e, 0x1f, 0x7b, 0xeb, 0xae, 0x63, 0xf0, 0x63, 0x4d, 0xbd,
0x1b, 0xa8, 0x49, 0xbb, 0x32, 0x7f, 0x84, 0x59, 0x28, 0xc0, 0x9f, 0x10, 0xa6, 0x21, 0x82, 0xde,
0x86, 0x53, 0xf1, 0x13, 0xcf, 0x28, 0x9e, 0x9b, 0x24, 0x7a, 0x98, 0x91, 0xdd, 0x34, 0x48, 0xb2,
0x82, 0xfa, 0x0b, 0x05, 0x50, 0x08, 0xbb, 0x19, 0x7e, 0x63, 0xfb, 0xce, 0x24, 0xb7, 0x6a, 0x67,
0xa1, 0x62, 0x04, 0x3d, 0x45, 0xe8, 0x8c, 0x08, 0x34, 0x6f, 0x71, 0x33, 0x3a, 0x34, 0x85, 0x60,
0x23, 0x80, 0x2e, 0x9c, 0x78, 0x8f, 0xd1, 0xe2, 0x49, 0xa5, 0x90, 0x4c, 0x2a, 0x1f, 0xe7, 0xa0,
0x19, 0x3d, 0x88, 0x4d, 0xac, 0xd9, 0xab, 0xb9, 0x81, 0x3b, 0xe2, 0xd4, 0x59, 0x98, 0xe2, 0xd4,
0x99, 0x3e, 0x15, 0x17, 0x4f, 0x76, 0x2a, 0x56, 0x7f, 0xa5, 0xc0, 0x5c, 0xe2, 0xc2, 0x2b, 0x09,
0x31, 0x95, 0x34, 0xc4, 0x7c, 0x17, 0x8a, 0x14, 0x77, 0x61, 0x36, 0x49, 0x0d, 0x39, 0xfc, 0x89,
0x8f, 0xaa, 0xf1, 0x0e, 0xe8, 0x06, 0xcc, 0x4b, 0x8a, 0x24, 0xc2, 0xd1, 0x28, 0x5d, 0x23, 0x51,
0xff, 0x54, 0x80, 0x6a, 0x64, 0x3e, 0xc6, 0xa0, 0xe3, 0x49, 0x8e, 0x97, 0x09, 0xf3, 0xf2, 0x69,
0xf3, 0x32, 0xaa, 0x04, 0x68, 0x11, 0xca, 0x36, 0xb6, 0x39, 0xae, 0x10, 0x20, 0xc7, 0xc6, 0x36,
0x83, 0x6b, 0x8b, 0x50, 0xa6, 0xd0, 0x96, 0xe1, 0x5a, 0xbe, 0x66, 0x66, 0x9d, 0xa1, 0xcd, 0x50,
0x6d, 0x1c, 0x52, 0xcd, 0x1e, 0x01, 0xa9, 0xca, 0x71, 0x48, 0x15, 0x5b, 0x2c, 0x95, 0xe4, 0x62,
0x99, 0x14, 0xb0, 0xde, 0x84, 0xf9, 0x1e, 0xbb, 0xad, 0x36, 0xd6, 0x0e, 0xd7, 0xc3, 0x5f, 0xad,
0x2a, 0xc3, 0x7e, 0xb2, 0x5f, 0xe8, 0x0e, 0x0d, 0x2e, 0x81, 0x4e, 0x99, 0x97, 0x6b, 0xcc, 0xcb,
0x72, 0xc4, 0x26, 0x7c, 0xc3, 0x9d, 0x1c, 0xa4, 0x55, 0xd6, 0x4a, 0x42, 0xe5, 0xfa, 0x89, 0xa0,
0xf2, 0x05, 0xa8, 0x06, 0x25, 0x0b, 0xd3, 0x20, 0xad, 0x06, 0x4f, 0x6f, 0xc1, 0x82, 0x37, 0x48,
0xec, 0xea, 0x6c, 0x2e, 0x76, 0x75, 0xa6, 0xfe, 0x2d, 0x0f, 0x8d, 0x11, 0x48, 0x9a, 0x38, 0x15,
0x4c, 0x52, 0xec, 0xdb, 0x86, 0xe6, 0xe8, 0x66, 0x99, 0xcd, 0xd2, 0x91, 0x38, 0x2f, 0x79, 0xa7,
0x3c, 0x37, 0x48, 0xac, 0xb9, 0xd8, 0x95, 0x4a, 0xe1, 0x58, 0x57, 0x2a, 0x53, 0xd6, 0x84, 0x6e,
0xc1, 0x19, 0x8f, 0xa3, 0x30, 0xa3, 0x13, 0x33, 0x9b, 0x03, 0x9a, 0xd3, 0xc1, 0xcf, 0x9d, 0xa8,
0xf9, 0x19, 0xcb, 0x78, 0x36, 0x6b, 0x19, 0x27, 0xdd, 0x58, 0x4e, 0xb9, 0x31, 0x5d, 0x9a, 0xaa,
0xc8, 0x4a, 0x53, 0x0f, 0x60, 0xfe, 0x81, 0x43, 0x86, 0x5d, 0xd2, 0xf3, 0xcc, 0x2e, 0x0e, 0xae,
0x0c, 0x26, 0x72, 0x6b, 0x1b, 0xca, 0x22, 0x5f, 0x73, 0x97, 0x56, 0xb4, 0xb0, 0xad, 0xfe, 0x50,
0x81, 0x85, 0xf4, 0xb8, 0x2c, 0x62, 0x46, 0xc9, 0x40, 0x89, 0x25, 0x83, 0x6f, 0xc3, 0xfc, 0x68,
0xf8, 0x4e, 0x6c, 0xe4, 0xea, 0xea, 0x35, 0x99, 0xef, 0x24, 0x8a, 0x6b, 0x68, 0x34, 0x46, 0x40,
0x53, 0xff, 0xad, 0xc0, 0x29, 0xb1, 0xac, 0x28, 0xad, 0xcf, 0xae, 0x62, 0xe8, 0x06, 0xe5, 0x3a,
0x96, 0xe9, 0x84, 0xa0, 0x5e, 0xd8, 0xc8, 0x89, 0x02, 0xd4, 0xbf, 0x0f, 0x73, 0x82, 0x29, 0xdc,
0x67, 0x26, 0x44, 0x44, 0x0d, 0xde, 0x2f, 0xdc, 0x61, 0xae, 0x40, 0xc3, 0xdd, 0xdf, 0x8f, 0xca,
0xe3, 0x89, 0xb2, 0x2e, 0xa8, 0x42, 0xe0, 0x37, 0xa1, 0x19, 0xb0, 0x1d, 0x77, 0x67, 0x9b, 0x13,
0x1d, 0xc3, 0xab, 0xd4, 0x8f, 0x14, 0x68, 0xc5, 0xf7, 0xb9, 0x88, 0xf9, 0xc7, 0x07, 0x63, 0x5f,
0x8d, 0x17, 0x30, 0xae, 0x1c, 0xa1, 0xcf, 0x48, 0x8e, 0x38, 0x81, 0x5d, 0x7f, 0x09, 0x8d, 0xf8,
0x9a, 0x45, 0x35, 0x28, 0x6f, 0xbb, 0xfe, 0x37, 0x5e, 0x98, 0xc4, 0x6f, 0xce, 0xa0, 0x06, 0xc0,
0xb6, 0xeb, 0xef, 0x78, 0x98, 0x60, 0xc7, 0x6f, 0x2a, 0x08, 0xa0, 0xf4, 0x81, 0xb3, 0x61, 0x92,
0x27, 0xcd, 0x1c, 0x9a, 0x17, 0x5b, 0xaa, 0x6e, 0x6d, 0x89, 0x85, 0xd0, 0xcc, 0xd3, 0xee, 0x61,
0xab, 0x80, 0x9a, 0x50, 0x0b, 0x59, 0x36, 0x77, 0x1e, 0x34, 0x8b, 0xa8, 0x02, 0x45, 0xfe, 0x59,
0xba, 0x6e, 0x40, 0x33, 0x09, 0xfa, 0xe8, 0x98, 0x0f, 0x9c, 0xbb, 0x8e, 0xfb, 0x3c, 0x24, 0x35,
0x67, 0x50, 0x15, 0x66, 0x05, 0x90, 0x6e, 0x2a, 0x68, 0x0e, 0xaa, 0x11, 0x0c, 0xdb, 0xcc, 0x51,
0xc2, 0xa6, 0x37, 0xe8, 0x09, 0x34, 0xcb, 0x55, 0xa0, 0x5e, 0xdb, 0x70, 0x9f, 0x3b, 0xcd, 0xc2,
0xf5, 0x35, 0x28, 0x07, 0xc9, 0x84, 0xb2, 0xf2, 0xd1, 0x1d, 0xda, 0x6c, 0xce, 0xa0, 0x53, 0x50,
0x8f, 0x95, 0xc3, 0x9b, 0x0a, 0x42, 0xd0, 0x88, 0x3f, 0x55, 0x68, 0xe6, 0x56, 0x7f, 0x5e, 0x07,
0xe0, 0x68, 0xcb, 0x75, 0x3d, 0x03, 0x0d, 0x00, 0x6d, 0x62, 0x9f, 0xee, 0x24, 0xae, 0x13, 0xec,
0x02, 0x04, 0xdd, 0xcc, 0x00, 0x25, 0x69, 0x56, 0xa1, 0x6a, 0x3b, 0xeb, 0x3c, 0x92, 0x60, 0x57,
0x67, 0x90, 0xcd, 0x24, 0xee, 0x99, 0x36, 0xde, 0x33, 0x7b, 0x4f, 0x42, 0x98, 0x96, 0x2d, 0x31,
0xc1, 0x1a, 0x48, 0x4c, 0x24, 0x6d, 0xd1, 0xd8, 0xf5, 0x3d, 0xd3, 0xe9, 0x07, 0xe5, 0x24, 0x75,
0x06, 0x3d, 0x85, 0xd3, 0x9b, 0x98, 0x49, 0x37, 0x89, 0x6f, 0xf6, 0x48, 0x20, 0x70, 0x35, 0x5b,
0x60, 0x8a, 0xf9, 0x98, 0x22, 0x2d, 0x98, 0x4b, 0x3c, 0x0d, 0x42, 0xd7, 0xe5, 0x15, 0x29, 0xd9,
0x33, 0xa6, 0xf6, 0xdb, 0x13, 0xf1, 0x86, 0xd2, 0x4c, 0x68, 0xc4, 0x9f, 0xcd, 0xa0, 0xcf, 0x67,
0x0d, 0x90, 0x7a, 0x19, 0xd0, 0xbe, 0x3e, 0x09, 0x6b, 0x28, 0xea, 0x11, 0x8f, 0xa7, 0x71, 0xa2,
0xa4, 0xaf, 0x32, 0xda, 0x47, 0x55, 0xf2, 0xd4, 0x19, 0xf4, 0x5d, 0x38, 0x95, 0x7a, 0xbf, 0x80,
0xbe, 0x20, 0x1b, 0x3e, 0xeb, 0x99, 0xc3, 0x38, 0x09, 0x8f, 0x92, 0xab, 0x21, 0x5b, 0xfb, 0xd4,
0x7b, 0x97, 0xc9, 0xb5, 0x8f, 0x0c, 0x7f, 0x94, 0xf6, 0xc7, 0x96, 0x30, 0x04, 0x94, 0x7e, 0xc1,
0x80, 0xde, 0x91, 0x89, 0xc8, 0x7c, 0x45, 0xd1, 0x5e, 0x99, 0x94, 0x3d, 0x74, 0xf9, 0x90, 0xad,
0xd6, 0xe4, 0x71, 0x43, 0x2a, 0x36, 0xf3, 0xd5, 0x82, 0x5c, 0x6c, 0xf6, 0xc3, 0x01, 0x1e, 0xd4,
0xf1, 0xc2, 0xb8, 0xdc, 0x57, 0xd2, 0x62, 0xbe, 0x3c, 0xa8, 0xe5, 0x75, 0x76, 0x75, 0x06, 0xed,
0xc5, 0x92, 0x30, 0xba, 0x9a, 0x15, 0x13, 0xf1, 0x9b, 0x86, 0x71, 0xee, 0xea, 0x00, 0x6c, 0x62,
0xff, 0x3e, 0xf6, 0x3d, 0xb3, 0x47, 0x92, 0x83, 0x8a, 0xc6, 0x88, 0x21, 0x18, 0xf4, 0xda, 0x58,
0xbe, 0x50, 0xed, 0x2e, 0x54, 0x37, 0xb1, 0xaf, 0x71, 0xa4, 0x45, 0x50, 0x66, 0xcf, 0x80, 0x23,
0x10, 0xb1, 0x3c, 0x9e, 0x31, 0x9a, 0xc8, 0x12, 0x75, 0x7a, 0x94, 0x39, 0xb7, 0xe9, 0xd7, 0x03,
0xf2, 0x44, 0x96, 0x51, 0xf8, 0x57, 0x67, 0x56, 0xff, 0x5e, 0x85, 0x0a, 0x8b, 0x42, 0xba, 0xe3,
0x7d, 0xb6, 0x31, 0xbd, 0x82, 0x8d, 0xe9, 0x31, 0xcc, 0x25, 0xde, 0x1d, 0xc8, 0xfd, 0x29, 0x7f,
0x9c, 0x30, 0x2e, 0xe4, 0xbb, 0x80, 0xd2, 0x55, 0x75, 0x79, 0xaa, 0xc8, 0xac, 0xbe, 0x8f, 0x93,
0xf1, 0x18, 0xe6, 0x12, 0x25, 0x64, 0xb9, 0x05, 0xf2, 0x3a, 0xf3, 0x04, 0x16, 0xa4, 0x6b, 0x9b,
0x72, 0x0b, 0x32, 0x6b, 0xa0, 0xe3, 0x64, 0x3c, 0xe4, 0x85, 0xf9, 0x10, 0xb4, 0x5f, 0xcb, 0xca,
0x37, 0x89, 0x8b, 0xd6, 0xd7, 0xbf, 0x03, 0xbd, 0xfa, 0x1d, 0xfa, 0x31, 0xcc, 0x25, 0xaa, 0x0b,
0x72, 0xef, 0xca, 0x4b, 0x10, 0xe3, 0x46, 0xff, 0x14, 0xf7, 0x94, 0x5d, 0x28, 0xf1, 0x92, 0x00,
0xba, 0x28, 0x3f, 0xc2, 0x44, 0xca, 0x05, 0xed, 0x71, 0x45, 0x05, 0x32, 0xb4, 0x7c, 0xc2, 0x06,
0x2d, 0xb2, 0x15, 0x83, 0xa4, 0xf5, 0x9c, 0x68, 0xa9, 0xa0, 0x3d, 0xbe, 0x3a, 0x10, 0x0c, 0xfa,
0xaa, 0xf7, 0xa9, 0xb5, 0x2f, 0x3d, 0x5a, 0xed, 0x9b, 0xfe, 0xc1, 0xb0, 0x4b, 0xfd, 0x71, 0x83,
0x73, 0xbe, 0x63, 0xba, 0xe2, 0xeb, 0x46, 0xa0, 0xda, 0x0d, 0x36, 0xd2, 0x0d, 0x66, 0xcb, 0xa0,
0xdb, 0x2d, 0xb1, 0xe6, 0xad, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xf8, 0xae, 0x2f, 0x61,
0x30, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
@ -144,8 +145,8 @@ func (node *Proxy) ReleaseDQLMessageStream(ctx context.Context, request *proxypb
}, nil
}
// TODO(dragondriver): add more detailed ut for ConsistencyLevel, should we support multiple consistency level in Proxy?
// CreateCollection create a collection by the schema.
// TODO(dragondriver): add more detailed ut for ConsistencyLevel, should we support multiple consistency level in Proxy?
func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
return unhealthyStatus(), nil
@ -2399,11 +2400,10 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
},
ResultChannelID: strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
},
resultBuf: make(chan []*internalpb.SearchResults, 1),
query: request,
chMgr: node.chMgr,
qc: node.queryCoord,
tr: timerecord.NewTimeRecorder("search"),
request: request,
qc: node.queryCoord,
tr: timerecord.NewTimeRecorder("search"),
getQueryNodePolicy: defaultGetQueryNodePolicy,
}
travelTs := request.TravelTimestamp
@ -2516,11 +2516,11 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
metrics.ProxySearchCount.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
metrics.SearchLabel, metrics.SuccessLabel).Inc()
metrics.ProxySearchVectors.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
metrics.SearchLabel).Set(float64(qt.result.Results.NumQueries))
metrics.SearchLabel).Set(float64(qt.result.GetResults().GetNumQueries()))
searchDur := tr.ElapseSpan().Milliseconds()
metrics.ProxySearchLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
metrics.SearchLabel).Observe(float64(searchDur))
metrics.ProxySearchLatencyPerNQ.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(searchDur) / float64(qt.result.Results.NumQueries))
metrics.ProxySearchLatencyPerNQ.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(searchDur) / float64(qt.result.GetResults().GetNumQueries()))
return qt.result, nil
}
@ -2641,10 +2641,10 @@ func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (*
},
ResultChannelID: strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
},
resultBuf: make(chan []*internalpb.RetrieveResults),
query: request,
chMgr: node.chMgr,
qc: node.queryCoord,
request: request,
qc: node.queryCoord,
getQueryNodePolicy: defaultGetQueryNodePolicy,
queryShardPolicy: roundRobinPolicy,
}
method := "Query"
@ -3058,11 +3058,12 @@ func (node *Proxy) CalcDistance(ctx context.Context, request *milvuspb.CalcDista
},
ResultChannelID: strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
},
resultBuf: make(chan []*internalpb.RetrieveResults),
query: queryRequest,
chMgr: node.chMgr,
qc: node.queryCoord,
ids: ids.IdArray,
request: queryRequest,
qc: node.queryCoord,
ids: ids.IdArray,
getQueryNodePolicy: defaultGetQueryNodePolicy,
queryShardPolicy: roundRobinPolicy,
}
err := node.sched.dqQueue.Enqueue(qt)
@ -3715,6 +3716,7 @@ func (node *Proxy) RegisterLink(ctx context.Context, req *milvuspb.RegisterLinkR
}, nil
}
// GetMetrics gets the metrics of proxy
// TODO(dragondriver): cache the Metrics and set a retention to the cache
func (node *Proxy) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
log.Debug("Proxy.GetMetrics",
@ -3817,6 +3819,13 @@ func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceReq
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
}
collectionID, err := globalMetaCache.GetCollectionID(ctx, req.GetCollectionName())
if err != nil {
log.Error("failed to get collection id", zap.String("collection name", req.GetCollectionName()), zap.Error(err))
status.Reason = err.Error()
return status, nil
}
infoResp, err := node.queryCoord.LoadBalance(ctx, &querypb.LoadBalanceRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadBalanceSegments,
@ -3828,6 +3837,7 @@ func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceReq
DstNodeIDs: req.DstNodeIDs,
BalanceReason: querypb.TriggerCondition_GrpcRequest,
SealedSegmentIDs: req.SealedSegmentIDs,
CollectionID: collectionID,
})
if err != nil {
log.Error("Failed to LoadBalance from Query Coordinator",
@ -3873,6 +3883,7 @@ func (node *Proxy) ManualCompaction(ctx context.Context, req *milvuspb.ManualCom
return resp, err
}
// GetCompactionStateWithPlans returns the compactions states with the given plan ID
func (node *Proxy) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.GetCompactionPlansRequest) (*milvuspb.GetCompactionPlansResponse, error) {
log.Info("received GetCompactionStateWithPlans request", zap.Int64("compactionID", req.GetCompactionID()))
resp := &milvuspb.GetCompactionPlansResponse{}
@ -3979,7 +3990,7 @@ func (node *Proxy) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReq
return resp, err
}
// Check import task state from datanode
// GetImportState checks import task state from datanode
func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportStateRequest) (*milvuspb.GetImportStateResponse, error) {
log.Info("received get import state request", zap.Int64("taskID", req.GetTask()))
resp := &milvuspb.GetImportStateResponse{}
@ -4206,3 +4217,19 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser
Usernames: usernames,
}, nil
}
// SendSearchResult needs to be removed TODO
func (node *Proxy) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "Not implemented",
}, nil
}
// SendRetrieveResult needs to be removed TODO
func (node *Proxy) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "Not implemented",
}, nil
}

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/types"
@ -52,6 +53,7 @@ type Cache interface {
GetPartitionInfo(ctx context.Context, collectionName string, partitionName string) (*partitionInfo, error)
// GetCollectionSchema get collection's schema.
GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error)
GetShards(ctx context.Context, withCache bool, collectionName string, qc types.QueryCoord) ([]*querypb.ShardLeadersList, error)
RemoveCollection(ctx context.Context, collectionName string)
RemovePartition(ctx context.Context, collectionName string, partitionName string)
@ -67,6 +69,7 @@ type collectionInfo struct {
collID typeutil.UniqueID
schema *schemapb.CollectionSchema
partInfo map[string]*partitionInfo
shardLeaders []*querypb.ShardLeadersList
createdTimestamp uint64
createdUtcTimestamp uint64
}
@ -160,6 +163,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
collInfo = m.collInfo[collectionName]
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
}
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetCollectionInfo", metrics.CacheHitLabel).Inc()
return &collectionInfo{
collID: collInfo.collID,
@ -167,6 +171,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
partInfo: collInfo.partInfo,
createdTimestamp: collInfo.createdTimestamp,
createdUtcTimestamp: collInfo.createdUtcTimestamp,
shardLeaders: collInfo.shardLeaders,
}, nil
}
@ -520,3 +525,41 @@ func (m *MetaCache) GetCredUsernames(ctx context.Context) ([]string, error) {
return usernames, nil
}
// GetShards update cache if withCache == false
func (m *MetaCache) GetShards(ctx context.Context, withCache bool, collectionName string, qc types.QueryCoord) ([]*querypb.ShardLeadersList, error) {
info, err := m.GetCollectionInfo(ctx, collectionName)
if err != nil {
return nil, err
}
if withCache {
if len(info.shardLeaders) > 0 {
return info.shardLeaders, nil
}
log.Info("no shard cache for collection, try to get shard leaders from QueryCoord",
zap.String("collectionName", collectionName))
}
m.mu.Lock()
defer m.mu.Unlock()
req := &querypb.GetShardLeadersRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_GetShardLeaders,
SourceID: Params.ProxyCfg.ProxyID,
},
CollectionID: info.collID,
}
resp, err := qc.GetShardLeaders(ctx, req)
if err != nil {
return nil, err
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
return nil, fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.Status.Reason)
}
shards := resp.GetShards()
m.collInfo[collectionName].shardLeaders = shards
return shards, nil
}

View File

@ -22,17 +22,17 @@ import (
"fmt"
"testing"
"github.com/milvus-io/milvus/internal/util/crypto"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/crypto"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type MockRootCoordClientInterface struct {
@ -310,3 +310,51 @@ func TestMetaCache_GetPartitionError(t *testing.T) {
log.Debug(err.Error())
assert.Equal(t, id, typeutil.UniqueID(0))
}
func TestMetaCache_GetShards(t *testing.T) {
client := &MockRootCoordClientInterface{}
err := InitMetaCache(client)
require.Nil(t, err)
var (
ctx = context.TODO()
collectionName = "collection1"
qc = NewQueryCoordMock()
)
qc.Init()
qc.Start()
defer qc.Stop()
t.Run("No collection in meta cache", func(t *testing.T) {
shards, err := globalMetaCache.GetShards(ctx, true, "non-exists", qc)
assert.Error(t, err)
assert.Empty(t, shards)
})
t.Run("without shardLeaders in collection info invalid shardLeaders", func(t *testing.T) {
qc.validShardLeaders = false
shards, err := globalMetaCache.GetShards(ctx, false, collectionName, qc)
assert.Error(t, err)
assert.Empty(t, shards)
})
t.Run("without shardLeaders in collection info", func(t *testing.T) {
qc.validShardLeaders = true
shards, err := globalMetaCache.GetShards(ctx, true, collectionName, qc)
assert.NoError(t, err)
assert.NotEmpty(t, shards)
assert.Equal(t, 1, len(shards))
assert.Equal(t, 3, len(shards[0].GetNodeAddrs()))
assert.Equal(t, 3, len(shards[0].GetNodeIds()))
// get from cache
qc.validShardLeaders = false
shards, err = globalMetaCache.GetShards(ctx, true, collectionName, qc)
assert.NoError(t, err)
assert.NotEmpty(t, shards)
assert.Equal(t, 1, len(shards))
assert.Equal(t, 3, len(shards[0].GetNodeAddrs()))
assert.Equal(t, 3, len(shards[0].GetNodeIds()))
})
}

View File

@ -93,8 +93,7 @@ type Proxy struct {
factory dependency.Factory
searchResultCh chan *internalpb.SearchResults
retrieveResultCh chan *internalpb.RetrieveResults
searchResultCh chan *internalpb.SearchResults
// Add callback functions at different stages
startCallbacks []func()
@ -107,11 +106,10 @@ func NewProxy(ctx context.Context, factory dependency.Factory) (*Proxy, error) {
ctx1, cancel := context.WithCancel(ctx)
n := 1024 // better to be configurable
node := &Proxy{
ctx: ctx1,
cancel: cancel,
factory: factory,
searchResultCh: make(chan *internalpb.SearchResults, n),
retrieveResultCh: make(chan *internalpb.RetrieveResults, n),
ctx: ctx1,
cancel: cancel,
factory: factory,
searchResultCh: make(chan *internalpb.SearchResults, n),
}
node.UpdateStateCode(internalpb.StateCode_Abnormal)
logutil.Logger(ctx).Debug("create a new Proxy instance", zap.Any("state", node.stateCode.Load()))
@ -228,9 +226,7 @@ func (node *Proxy) Init() error {
log.Debug("create channels manager done", zap.String("role", typeutil.ProxyRole))
log.Debug("create task scheduler", zap.String("role", typeutil.ProxyRole))
node.sched, err = newTaskScheduler(node.ctx, node.idAllocator, node.tsoAllocator, node.factory,
schedOptWithSearchResultCh(node.searchResultCh),
schedOptWithRetrieveResultCh(node.retrieveResultCh))
node.sched, err = newTaskScheduler(node.ctx, node.idAllocator, node.tsoAllocator, node.factory)
if err != nil {
log.Warn("failed to create task scheduler", zap.Error(err), zap.String("role", typeutil.ProxyRole))
return err

View File

@ -17,12 +17,7 @@
package proxy
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"math/rand"
"net"
"os"
"strconv"
@ -30,81 +25,56 @@ import (
"testing"
"time"
"github.com/golang/protobuf/proto"
ot "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/rootcoord"
"github.com/milvus-io/milvus/internal/util/crypto"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/prometheus/client_golang/prometheus"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/distance"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
grpcindexcoordclient "github.com/milvus-io/milvus/internal/distributed/indexcoord/client"
grpcquerycoordclient "github.com/milvus-io/milvus/internal/distributed/querycoord/client"
grpcdatacoordclient2 "github.com/milvus-io/milvus/internal/distributed/datacoord/client"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/typeutil"
rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client"
grpcindexnode "github.com/milvus-io/milvus/internal/distributed/indexnode"
grpcindexcoord "github.com/milvus-io/milvus/internal/distributed/indexcoord"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
grpcdatacoordclient "github.com/milvus-io/milvus/internal/distributed/datacoord"
grpcdatacoordclient2 "github.com/milvus-io/milvus/internal/distributed/datacoord/client"
grpcdatanode "github.com/milvus-io/milvus/internal/distributed/datanode"
grpcquerynode "github.com/milvus-io/milvus/internal/distributed/querynode"
grpcindexcoord "github.com/milvus-io/milvus/internal/distributed/indexcoord"
grpcindexcoordclient "github.com/milvus-io/milvus/internal/distributed/indexcoord/client"
grpcindexnode "github.com/milvus-io/milvus/internal/distributed/indexnode"
grpcquerycoord "github.com/milvus-io/milvus/internal/distributed/querycoord"
grpcquerycoordclient "github.com/milvus-io/milvus/internal/distributed/querycoord/client"
grpcquerynode "github.com/milvus-io/milvus/internal/distributed/querynode"
grpcrootcoord "github.com/milvus-io/milvus/internal/distributed/rootcoord"
rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client"
"github.com/milvus-io/milvus/internal/datacoord"
"github.com/milvus-io/milvus/internal/datanode"
"github.com/milvus-io/milvus/internal/indexcoord"
"github.com/milvus-io/milvus/internal/indexnode"
"github.com/milvus-io/milvus/internal/querynode"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/querycoord"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/rootcoord"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/querynode"
)
const (
@ -620,12 +590,12 @@ func TestProxy(t *testing.T) {
rowNum := 3000
indexName := "_default"
nlist := 10
nprobe := 10
topk := 10
// nprobe := 10
// topk := 10
// add a test parameter
roundDecimal := 6
// roundDecimal := 6
nq := 10
expr := fmt.Sprintf("%s > 0", int64Field)
// expr := fmt.Sprintf("%s > 0", int64Field)
var segmentIDs []int64
// an int64 field (pk) & a float vector field
@ -721,76 +691,6 @@ func TestProxy(t *testing.T) {
}
}
constructPlaceholderGroup := func() *milvuspb.PlaceholderGroup {
values := make([][]byte, 0, nq)
for i := 0; i < nq; i++ {
bs := make([]byte, 0, dim*4)
for j := 0; j < dim; j++ {
var buffer bytes.Buffer
f := rand.Float32()
err := binary.Write(&buffer, common.Endian, f)
assert.NoError(t, err)
bs = append(bs, buffer.Bytes()...)
}
values = append(values, bs)
}
return &milvuspb.PlaceholderGroup{
Placeholders: []*milvuspb.PlaceholderValue{
{
Tag: "$0",
Type: milvuspb.PlaceholderType_FloatVector,
Values: values,
},
},
}
}
constructSearchRequest := func() *milvuspb.SearchRequest {
params := make(map[string]string)
params["nprobe"] = strconv.Itoa(nprobe)
b, err := json.Marshal(params)
assert.NoError(t, err)
plg := constructPlaceholderGroup()
plgBs, err := proto.Marshal(plg)
assert.NoError(t, err)
return &milvuspb.SearchRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
PartitionNames: nil,
Dsl: expr,
PlaceholderGroup: plgBs,
DslType: commonpb.DslType_BoolExprV1,
OutputFields: nil,
SearchParams: []*commonpb.KeyValuePair{
{
Key: MetricTypeKey,
Value: distance.L2,
},
{
Key: SearchParamsKey,
Value: string(b),
},
{
Key: AnnsFieldKey,
Value: floatVecField,
},
{
Key: TopKKey,
Value: strconv.Itoa(topk),
},
{
Key: RoundDecimalKey,
Value: strconv.Itoa(roundDecimal),
},
},
TravelTimestamp: 0,
GuaranteeTimestamp: 0,
}
}
wg.Add(1)
t.Run("create collection", func(t *testing.T) {
defer wg.Done()
@ -1368,103 +1268,178 @@ func TestProxy(t *testing.T) {
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
if loaded {
wg.Add(1)
t.Run("search", func(t *testing.T) {
defer wg.Done()
req := constructSearchRequest()
// nprobe := 10
// topk := 10
// roundDecimal := 6
// expr := fmt.Sprintf("%s > 0", int64Field)
// constructPlaceholderGroup := func() *milvuspb.PlaceholderGroup {
// values := make([][]byte, 0, nq)
// for i := 0; i < nq; i++ {
// bs := make([]byte, 0, dim*4)
// for j := 0; j < dim; j++ {
// var buffer bytes.Buffer
// f := rand.Float32()
// err := binary.Write(&buffer, common.Endian, f)
// assert.NoError(t, err)
// bs = append(bs, buffer.Bytes()...)
// }
// values = append(values, bs)
// }
//
// return &milvuspb.PlaceholderGroup{
// Placeholders: []*milvuspb.PlaceholderValue{
// {
// Tag: "$0",
// Type: milvuspb.PlaceholderType_FloatVector,
// Values: values,
// },
// },
// }
// }
//
// constructSearchRequest := func() *milvuspb.SearchRequest {
// params := make(map[string]string)
// params["nprobe"] = strconv.Itoa(nprobe)
// b, err := json.Marshal(params)
// assert.NoError(t, err)
// plg := constructPlaceholderGroup()
// plgBs, err := proto.Marshal(plg)
// assert.NoError(t, err)
//
// return &milvuspb.SearchRequest{
// Base: nil,
// DbName: dbName,
// CollectionName: collectionName,
// PartitionNames: nil,
// Dsl: expr,
// PlaceholderGroup: plgBs,
// DslType: commonpb.DslType_BoolExprV1,
// OutputFields: nil,
// SearchParams: []*commonpb.KeyValuePair{
// {
// Key: MetricTypeKey,
// Value: distance.L2,
// },
// {
// Key: SearchParamsKey,
// Value: string(b),
// },
// {
// Key: AnnsFieldKey,
// Value: floatVecField,
// },
// {
// Key: TopKKey,
// Value: strconv.Itoa(topk),
// },
// {
// Key: RoundDecimalKey,
// Value: strconv.Itoa(roundDecimal),
// },
// },
// TravelTimestamp: 0,
// GuaranteeTimestamp: 0,
// }
// }
resp, err := proxy.Search(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
wg.Add(1)
t.Run("search_travel", func(t *testing.T) {
defer wg.Done()
past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration-100) * time.Second)
travelTs := tsoutil.ComposeTSByTime(past, 0)
req := constructSearchRequest()
req.TravelTimestamp = travelTs
//resp, err := proxy.Search(ctx, req)
res, err := proxy.Search(ctx, req)
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
})
wg.Add(1)
t.Run("search_travel_succ", func(t *testing.T) {
defer wg.Done()
past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration+100) * time.Second)
travelTs := tsoutil.ComposeTSByTime(past, 0)
req := constructSearchRequest()
req.TravelTimestamp = travelTs
//resp, err := proxy.Search(ctx, req)
res, err := proxy.Search(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
})
wg.Add(1)
t.Run("query", func(t *testing.T) {
defer wg.Done()
//resp, err := proxy.Query(ctx, &milvuspb.QueryRequest{
_, err := proxy.Query(ctx, &milvuspb.QueryRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
Expr: expr,
OutputFields: nil,
PartitionNames: nil,
TravelTimestamp: 0,
GuaranteeTimestamp: 0,
})
assert.NoError(t, err)
// FIXME(dragondriver)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// TODO(dragondriver): compare query result
})
wg.Add(1)
t.Run("query_travel", func(t *testing.T) {
defer wg.Done()
past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration-100) * time.Second)
travelTs := tsoutil.ComposeTSByTime(past, 0)
queryReq := &milvuspb.QueryRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
Expr: expr,
OutputFields: nil,
PartitionNames: nil,
TravelTimestamp: travelTs,
GuaranteeTimestamp: 0,
}
res, err := proxy.Query(ctx, queryReq)
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
})
wg.Add(1)
t.Run("query_travel_succ", func(t *testing.T) {
defer wg.Done()
past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration+100) * time.Second)
travelTs := tsoutil.ComposeTSByTime(past, 0)
queryReq := &milvuspb.QueryRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
Expr: expr,
OutputFields: nil,
PartitionNames: nil,
TravelTimestamp: travelTs,
GuaranteeTimestamp: 0,
}
res, err := proxy.Query(ctx, queryReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_EmptyCollection, res.Status.ErrorCode)
})
}
// TODO(Goose): reopen after joint-tests
// if loaded {
// wg.Add(1)
// t.Run("search", func(t *testing.T) {
// defer wg.Done()
// req := constructSearchRequest()
//
// resp, err := proxy.Search(ctx, req)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// })
//
// wg.Add(1)
// t.Run("search_travel", func(t *testing.T) {
// defer wg.Done()
// past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration-100) * time.Second)
// travelTs := tsoutil.ComposeTSByTime(past, 0)
// req := constructSearchRequest()
// req.TravelTimestamp = travelTs
// //resp, err := proxy.Search(ctx, req)
// res, err := proxy.Search(ctx, req)
// assert.NoError(t, err)
// assert.NotEqual(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
// })
//
// wg.Add(1)
// t.Run("search_travel_succ", func(t *testing.T) {
// defer wg.Done()
// past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration+100) * time.Second)
// travelTs := tsoutil.ComposeTSByTime(past, 0)
// req := constructSearchRequest()
// req.TravelTimestamp = travelTs
// //resp, err := proxy.Search(ctx, req)
// res, err := proxy.Search(ctx, req)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
// })
//
// wg.Add(1)
// t.Run("query", func(t *testing.T) {
// defer wg.Done()
// //resp, err := proxy.Query(ctx, &milvuspb.QueryRequest{
// _, err := proxy.Query(ctx, &milvuspb.QueryRequest{
// Base: nil,
// DbName: dbName,
// CollectionName: collectionName,
// Expr: expr,
// OutputFields: nil,
// PartitionNames: nil,
// TravelTimestamp: 0,
// GuaranteeTimestamp: 0,
// })
// assert.NoError(t, err)
// // FIXME(dragondriver)
// // assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// // TODO(dragondriver): compare query result
// })
//
// wg.Add(1)
// t.Run("query_travel", func(t *testing.T) {
// defer wg.Done()
// past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration-100) * time.Second)
// travelTs := tsoutil.ComposeTSByTime(past, 0)
// queryReq := &milvuspb.QueryRequest{
// Base: nil,
// DbName: dbName,
// CollectionName: collectionName,
// Expr: expr,
// OutputFields: nil,
// PartitionNames: nil,
// TravelTimestamp: travelTs,
// GuaranteeTimestamp: 0,
// }
// res, err := proxy.Query(ctx, queryReq)
// assert.NoError(t, err)
// assert.NotEqual(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
// })
//
// wg.Add(1)
// t.Run("query_travel_succ", func(t *testing.T) {
// defer wg.Done()
// past := time.Now().Add(time.Duration(-1*Params.CommonCfg.RetentionDuration+100) * time.Second)
// travelTs := tsoutil.ComposeTSByTime(past, 0)
// queryReq := &milvuspb.QueryRequest{
// Base: nil,
// DbName: dbName,
// CollectionName: collectionName,
// Expr: expr,
// OutputFields: nil,
// PartitionNames: nil,
// TravelTimestamp: travelTs,
// GuaranteeTimestamp: 0,
// }
// res, err := proxy.Query(ctx, queryReq)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_EmptyCollection, res.Status.ErrorCode)
// })
// }
wg.Add(1)
t.Run("calculate distance", func(t *testing.T) {
@ -1683,6 +1658,7 @@ func TestProxy(t *testing.T) {
DbName: dbName,
CollectionName: collectionName,
PartitionNames: []string{partitionName},
ReplicaNumber: 1,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
@ -1693,6 +1669,7 @@ func TestProxy(t *testing.T) {
DbName: dbName,
CollectionName: collectionName,
PartitionNames: []string{otherPartitionName},
ReplicaNumber: 1,
})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
@ -1703,6 +1680,7 @@ func TestProxy(t *testing.T) {
DbName: dbName,
CollectionName: otherCollectionName,
PartitionNames: []string{partitionName},
ReplicaNumber: 1,
})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)

View File

@ -36,12 +36,20 @@ type QueryCoordMockOption func(mock *QueryCoordMock)
type queryCoordShowCollectionsFuncType func(ctx context.Context, request *querypb.ShowCollectionsRequest) (*querypb.ShowCollectionsResponse, error)
type queryCoordShowPartitionsFuncType func(ctx context.Context, request *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error)
func SetQueryCoordShowCollectionsFunc(f queryCoordShowCollectionsFuncType) QueryCoordMockOption {
return func(mock *QueryCoordMock) {
mock.showCollectionsFunc = f
}
}
func withValidShardLeaders() QueryCoordMockOption {
return func(mock *QueryCoordMock) {
mock.validShardLeaders = true
}
}
type QueryCoordMock struct {
nodeID typeutil.UniqueID
address string
@ -54,9 +62,12 @@ type QueryCoordMock struct {
showCollectionsFunc queryCoordShowCollectionsFuncType
getMetricsFunc getMetricsFuncType
showPartitionsFunc queryCoordShowPartitionsFuncType
statisticsChannel string
timeTickChannel string
validShardLeaders bool
}
func (coord *QueryCoordMock) updateState(state internalpb.StateCode) {
@ -223,6 +234,14 @@ func (coord *QueryCoordMock) ReleaseCollection(ctx context.Context, req *querypb
}, nil
}
func (coord *QueryCoordMock) SetShowPartitionsFunc(f queryCoordShowPartitionsFuncType) {
coord.showPartitionsFunc = f
}
func (coord *QueryCoordMock) ResetShowPartitionsFunc() {
coord.showPartitionsFunc = nil
}
func (coord *QueryCoordMock) ShowPartitions(ctx context.Context, req *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error) {
if !coord.healthy() {
return &querypb.ShowPartitionsResponse{
@ -233,6 +252,10 @@ func (coord *QueryCoordMock) ShowPartitions(ctx context.Context, req *querypb.Sh
}, nil
}
if coord.showPartitionsFunc != nil {
return coord.showPartitionsFunc(ctx, req)
}
panic("implement me")
}
@ -360,6 +383,21 @@ func (coord *QueryCoordMock) GetShardLeaders(ctx context.Context, req *querypb.G
}, nil
}
if coord.validShardLeaders {
return &querypb.GetShardLeadersResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Shards: []*querypb.ShardLeadersList{
{
ChannelName: "channel-1",
NodeIds: []int64{1, 2, 3},
NodeAddrs: []string{"localhost:9000", "localhost:9001", "localhost:9002"},
},
},
}, nil
}
return &querypb.GetShardLeadersResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,

View File

@ -0,0 +1,114 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"sync/atomic"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
var _ types.QueryNode = &QueryNodeMock{}
type QueryNodeMock struct {
nodeID typeutil.UniqueID
address string
state atomic.Value // internal.StateCode
withSearchResult *internalpb.SearchResults
withQueryResult *internalpb.RetrieveResults
}
func (m *QueryNodeMock) Search(ctx context.Context, req *querypb.SearchRequest) (*internalpb.SearchResults, error) {
return m.withSearchResult, nil
}
func (m *QueryNodeMock) Query(ctx context.Context, req *querypb.QueryRequest) (*internalpb.RetrieveResults, error) {
return m.withQueryResult, nil
}
// TODO
func (m *QueryNodeMock) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) WatchDeltaChannels(ctx context.Context, req *querypb.WatchDeltaChannelsRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) LoadSegments(ctx context.Context, req *querypb.LoadSegmentsRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) ReleaseSegments(ctx context.Context, req *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
return nil, nil
}
func (m *QueryNodeMock) Init() error { return nil }
func (m *QueryNodeMock) Start() error { return nil }
func (m *QueryNodeMock) Stop() error { return nil }
func (m *QueryNodeMock) Register() error { return nil }
func (m *QueryNodeMock) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
return nil, nil
}
func (m *QueryNodeMock) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return nil, nil
}
func (m *QueryNodeMock) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return nil, nil
}

View File

@ -1,24 +0,0 @@
package proxy
import (
"context"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
)
func (node *Proxy) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) {
node.searchResultCh <- req
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
}
func (node *Proxy) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) {
node.retrieveResultCh <- req
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
}

View File

@ -2675,9 +2675,10 @@ func (lct *loadCollectionTask) Execute(ctx context.Context) (err error) {
Timestamp: lct.Base.Timestamp,
SourceID: lct.Base.SourceID,
},
DbID: 0,
CollectionID: collID,
Schema: collSchema,
DbID: 0,
CollectionID: collID,
Schema: collSchema,
ReplicaNumber: lct.ReplicaNumber,
}
log.Debug("send LoadCollectionRequest to query coordinator", zap.String("role", typeutil.ProxyRole),
zap.Int64("msgID", request.Base.MsgID), zap.Int64("collectionID", request.CollectionID),
@ -2869,10 +2870,11 @@ func (lpt *loadPartitionsTask) Execute(ctx context.Context) error {
Timestamp: lpt.Base.Timestamp,
SourceID: lpt.Base.SourceID,
},
DbID: 0,
CollectionID: collID,
PartitionIDs: partitionIDs,
Schema: collSchema,
DbID: 0,
CollectionID: collID,
PartitionIDs: partitionIDs,
Schema: collSchema,
ReplicaNumber: lpt.ReplicaNumber,
}
lpt.result, err = lpt.queryCoord.LoadPartitions(ctx, request)
return err

View File

@ -0,0 +1,81 @@
package proxy
import (
"context"
"errors"
"fmt"
qnClient "github.com/milvus-io/milvus/internal/distributed/querynode/client"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"go.uber.org/zap"
)
type getQueryNodePolicy func(context.Context, string) (types.QueryNode, error)
type pickShardPolicy func(ctx context.Context, policy getQueryNodePolicy, query func(UniqueID, types.QueryNode) error, leaders *querypb.ShardLeadersList) error
// TODO add another policy to enbale the use of cache
// defaultGetQueryNodePolicy creates QueryNode client for every address everytime
func defaultGetQueryNodePolicy(ctx context.Context, address string) (types.QueryNode, error) {
qn, err := qnClient.NewClient(ctx, address)
if err != nil {
return nil, err
}
if err := qn.Init(); err != nil {
return nil, err
}
if err := qn.Start(); err != nil {
return nil, err
}
return qn, nil
}
var (
errBegin = errors.New("begin error")
errInvalidShardLeaders = errors.New("Invalid shard leader")
)
func roundRobinPolicy(ctx context.Context, getQueryNodePolicy getQueryNodePolicy, query func(UniqueID, types.QueryNode) error, leaders *querypb.ShardLeadersList) error {
var (
err = errBegin
current = 0
qn types.QueryNode
)
replicaNum := len(leaders.GetNodeIds())
for err != nil && current < replicaNum {
currentID := leaders.GetNodeIds()[current]
if err != errBegin {
log.Warn("retry with another QueryNode", zap.String("leader", leaders.GetChannelName()), zap.Int64("nodeID", currentID))
}
qn, err = getQueryNodePolicy(ctx, leaders.GetNodeAddrs()[current])
if err != nil {
log.Warn("fail to get valid QueryNode", zap.Int64("nodeID", currentID),
zap.Error(err))
current++
continue
}
defer qn.Stop()
err = query(currentID, qn)
if err != nil {
log.Warn("fail to Query with shard leader",
zap.String("leader", leaders.GetChannelName()),
zap.Int64("nodeID", currentID),
zap.Error(err))
}
current++
}
if current == replicaNum && err != nil {
return fmt.Errorf("no shard leaders available for channel: %s, leaders: %v, err: %s", leaders.GetChannelName(), leaders.GetNodeIds(), err.Error())
}
return nil
}

View File

@ -6,219 +6,121 @@ import (
"fmt"
"regexp"
"strings"
"sync"
"time"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
const (
WithCache = true
WithoutCache = false
)
type queryTask struct {
Condition
*internalpb.RetrieveRequest
ctx context.Context
resultBuf chan []*internalpb.RetrieveResults
result *milvuspb.QueryResults
query *milvuspb.QueryRequest
chMgr channelsMgr
request *milvuspb.QueryRequest
qc types.QueryCoord
ids *schemapb.IDs
collectionName string
collectionID UniqueID
resultBuf chan *internalpb.RetrieveResults
toReduceResults []*internalpb.RetrieveResults
runningGroup *errgroup.Group
runningGroupCtx context.Context
getQueryNodePolicy getQueryNodePolicy
queryShardPolicy pickShardPolicy
}
func (qt *queryTask) PreExecute(ctx context.Context) error {
qt.Base.MsgType = commonpb.MsgType_Retrieve
qt.Base.SourceID = Params.ProxyCfg.ProxyID
func (t *queryTask) PreExecute(ctx context.Context) error {
if t.getQueryNodePolicy == nil {
t.getQueryNodePolicy = defaultGetQueryNodePolicy
}
collectionName := qt.query.CollectionName
if t.queryShardPolicy == nil {
t.queryShardPolicy = roundRobinPolicy
}
if err := validateCollectionName(qt.query.CollectionName); err != nil {
t.Base.MsgType = commonpb.MsgType_Retrieve
t.Base.SourceID = Params.ProxyCfg.ProxyID
collectionName := t.request.CollectionName
t.collectionName = collectionName
if err := validateCollectionName(collectionName); err != nil {
log.Warn("Invalid collection name.", zap.String("collectionName", collectionName),
zap.Int64("requestID", qt.Base.MsgID), zap.String("requestType", "query"))
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "query"))
return err
}
log.Info("Validate collection name.", zap.Any("collectionName", collectionName),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
info, err := globalMetaCache.GetCollectionInfo(ctx, collectionName)
log.Info("Validate collection name.", zap.Any("collectionName", collectionName),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
collID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
if err != nil {
log.Debug("Failed to get collection id.", zap.Any("collectionName", collectionName),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
return err
}
qt.collectionName = info.schema.Name
log.Info("Get collection id by name.", zap.Any("collectionName", collectionName),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
for _, tag := range qt.query.PartitionNames {
t.CollectionID = collID
log.Info("Get collection ID by name",
zap.Int64("collectionID", t.CollectionID), zap.String("collection name", collectionName),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
for _, tag := range t.request.PartitionNames {
if err := validatePartitionTag(tag, false); err != nil {
log.Debug("Invalid partition name.", zap.Any("partitionName", tag),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
log.Warn("invalid partition name", zap.String("partition name", tag),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
return err
}
}
log.Info("Validate partition names.",
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
// check if collection was already loaded into query node
showResp, err := qt.qc.ShowCollections(qt.ctx, &querypb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: qt.Base.MsgID,
Timestamp: qt.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
DbID: 0, // TODO(dragondriver)
})
if err != nil {
return err
}
if showResp.Status.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(showResp.Status.Reason)
}
log.Debug("QueryCoord show collections",
zap.Any("collections", showResp.CollectionIDs),
zap.Any("collID", info.collID))
collectionLoaded := false
for _, collID := range showResp.CollectionIDs {
if info.collID == collID {
collectionLoaded = true
break
}
}
if !collectionLoaded {
return fmt.Errorf("collection %v was not loaded into memory", collectionName)
}
schema, _ := globalMetaCache.GetCollectionSchema(ctx, qt.query.CollectionName)
if qt.ids != nil {
pkField := ""
for _, field := range schema.Fields {
if field.IsPrimaryKey {
pkField = field.Name
}
}
qt.query.Expr = IDs2Expr(pkField, qt.ids.GetIntId().Data)
}
if qt.query.Expr == "" {
errMsg := "Query expression is empty"
return fmt.Errorf(errMsg)
}
plan, err := createExprPlan(schema, qt.query.Expr)
if err != nil {
return err
}
qt.query.OutputFields, err = translateOutputFields(qt.query.OutputFields, schema, true)
if err != nil {
return err
}
log.Debug("translate output fields", zap.Any("OutputFields", qt.query.OutputFields))
if len(qt.query.OutputFields) == 0 {
for _, field := range schema.Fields {
if field.FieldID >= 100 && field.DataType != schemapb.DataType_FloatVector && field.DataType != schemapb.DataType_BinaryVector {
qt.OutputFieldsId = append(qt.OutputFieldsId, field.FieldID)
}
}
} else {
addPrimaryKey := false
for _, reqField := range qt.query.OutputFields {
findField := false
for _, field := range schema.Fields {
if reqField == field.Name {
if field.IsPrimaryKey {
addPrimaryKey = true
}
findField = true
qt.OutputFieldsId = append(qt.OutputFieldsId, field.FieldID)
plan.OutputFieldIds = append(plan.OutputFieldIds, field.FieldID)
} else {
if field.IsPrimaryKey && !addPrimaryKey {
qt.OutputFieldsId = append(qt.OutputFieldsId, field.FieldID)
plan.OutputFieldIds = append(plan.OutputFieldIds, field.FieldID)
addPrimaryKey = true
}
}
}
if !findField {
errMsg := "Field " + reqField + " not exist"
return errors.New(errMsg)
}
}
}
log.Debug("translate output fields to field ids", zap.Any("OutputFieldsID", qt.OutputFieldsId))
qt.RetrieveRequest.SerializedExprPlan, err = proto.Marshal(plan)
if err != nil {
return err
}
travelTimestamp := qt.query.TravelTimestamp
if travelTimestamp == 0 {
travelTimestamp = qt.BeginTs()
} else {
durationSeconds := tsoutil.CalculateDuration(qt.BeginTs(), travelTimestamp) / 1000
if durationSeconds > Params.CommonCfg.RetentionDuration {
duration := time.Second * time.Duration(durationSeconds)
return fmt.Errorf("only support to travel back to %s so far", duration.String())
}
}
guaranteeTimestamp := qt.query.GuaranteeTimestamp
if guaranteeTimestamp == 0 {
guaranteeTimestamp = qt.BeginTs()
}
qt.TravelTimestamp = travelTimestamp
qt.GuaranteeTimestamp = guaranteeTimestamp
deadline, ok := qt.TraceCtx().Deadline()
if ok {
qt.RetrieveRequest.TimeoutTimestamp = tsoutil.ComposeTSByTime(deadline, 0)
}
qt.ResultChannelID = Params.ProxyCfg.RetrieveResultChannelNames[0]
qt.DbID = 0 // todo(yukun)
qt.CollectionID = info.collID
qt.PartitionIDs = make([]UniqueID, 0)
log.Debug("Validate partition names.",
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
t.PartitionIDs = make([]UniqueID, 0)
partitionsMap, err := globalMetaCache.GetPartitions(ctx, collectionName)
if err != nil {
log.Debug("Failed to get partitions in collection.", zap.Any("collectionName", collectionName),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
log.Warn("failed to get partitions in collection.", zap.String("collection name", collectionName),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
return err
}
log.Info("Get partitions in collection.", zap.Any("collectionName", collectionName),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
log.Debug("Get partitions in collection.", zap.Any("collectionName", collectionName),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
// Check if partitions are valid partitions in collection
partitionsRecord := make(map[UniqueID]bool)
for _, partitionName := range qt.query.PartitionNames {
for _, partitionName := range t.request.PartitionNames {
pattern := fmt.Sprintf("^%s$", partitionName)
re, err := regexp.Compile(pattern)
if err != nil {
log.Debug("Failed to compile partition name regex expression.", zap.Any("partitionName", partitionName),
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
log.Debug("failed to compile partition name regex expression.", zap.Any("partition name", partitionName),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
return errors.New("invalid partition names")
}
found := false
for name, pID := range partitionsMap {
if re.MatchString(name) {
if _, exist := partitionsRecord[pID]; !exist {
qt.PartitionIDs = append(qt.PartitionIDs, pID)
t.PartitionIDs = append(t.PartitionIDs, pID)
partitionsRecord[pID] = true
}
found = true
@ -226,176 +128,355 @@ func (qt *queryTask) PreExecute(ctx context.Context) error {
}
if !found {
// FIXME(wxyu): undefined behavior
errMsg := fmt.Sprintf("PartitonName: %s not found", partitionName)
errMsg := fmt.Sprintf("partition name: %s not found", partitionName)
return errors.New(errMsg)
}
}
if !t.checkIfLoaded(collID, t.PartitionIDs) {
return fmt.Errorf("collection:%v or partition:%v not loaded into memory", collectionName, t.request.GetPartitionNames())
}
schema, _ := globalMetaCache.GetCollectionSchema(ctx, collectionName)
if t.ids != nil {
pkField := ""
for _, field := range schema.Fields {
if field.IsPrimaryKey {
pkField = field.Name
}
}
t.request.Expr = IDs2Expr(pkField, t.ids.GetIntId().Data)
}
if t.request.Expr == "" {
return fmt.Errorf("query expression is empty")
}
plan, err := createExprPlan(schema, t.request.Expr)
if err != nil {
return err
}
t.request.OutputFields, err = translateOutputFields(t.request.OutputFields, schema, true)
if err != nil {
return err
}
log.Debug("translate output fields", zap.Any("OutputFields", t.request.OutputFields),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
if len(t.request.OutputFields) == 0 {
for _, field := range schema.Fields {
if field.FieldID >= 100 && field.DataType != schemapb.DataType_FloatVector && field.DataType != schemapb.DataType_BinaryVector {
t.OutputFieldsId = append(t.OutputFieldsId, field.FieldID)
}
}
} else {
addPrimaryKey := false
for _, reqField := range t.request.OutputFields {
findField := false
for _, field := range schema.Fields {
if reqField == field.Name {
if field.IsPrimaryKey {
addPrimaryKey = true
}
findField = true
t.OutputFieldsId = append(t.OutputFieldsId, field.FieldID)
plan.OutputFieldIds = append(plan.OutputFieldIds, field.FieldID)
} else {
if field.IsPrimaryKey && !addPrimaryKey {
t.OutputFieldsId = append(t.OutputFieldsId, field.FieldID)
plan.OutputFieldIds = append(plan.OutputFieldIds, field.FieldID)
addPrimaryKey = true
}
}
}
if !findField {
return fmt.Errorf("field %s not exist", reqField)
}
}
}
log.Debug("translate output fields to field ids", zap.Any("OutputFieldsID", t.OutputFieldsId),
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
t.RetrieveRequest.SerializedExprPlan, err = proto.Marshal(plan)
if err != nil {
return err
}
if t.request.TravelTimestamp == 0 {
t.TravelTimestamp = t.BeginTs()
} else {
durationSeconds := tsoutil.CalculateDuration(t.BeginTs(), t.request.TravelTimestamp) / 1000
if durationSeconds > Params.CommonCfg.RetentionDuration {
duration := time.Second * time.Duration(durationSeconds)
return fmt.Errorf("only support to travel back to %s so far", duration.String())
}
t.TravelTimestamp = t.request.TravelTimestamp
}
if t.request.GuaranteeTimestamp == 0 {
t.GuaranteeTimestamp = t.BeginTs()
} else {
t.GuaranteeTimestamp = t.request.GuaranteeTimestamp
}
deadline, ok := t.TraceCtx().Deadline()
if ok {
t.TimeoutTimestamp = tsoutil.ComposeTSByTime(deadline, 0)
}
t.DbID = 0 // TODO
log.Info("Query PreExecute done.",
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
return nil
}
func (qt *queryTask) Execute(ctx context.Context) error {
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy execute query %d", qt.ID()))
func (t *queryTask) Execute(ctx context.Context) error {
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy execute query %d", t.ID()))
defer tr.Elapse("done")
var tsMsg msgstream.TsMsg = &msgstream.RetrieveMsg{
RetrieveRequest: *qt.RetrieveRequest,
BaseMsg: msgstream.BaseMsg{
Ctx: ctx,
HashValues: []uint32{uint32(Params.ProxyCfg.ProxyID)},
BeginTimestamp: qt.Base.Timestamp,
EndTimestamp: qt.Base.Timestamp,
},
}
msgPack := msgstream.MsgPack{
BeginTs: qt.Base.Timestamp,
EndTs: qt.Base.Timestamp,
Msgs: make([]msgstream.TsMsg, 1),
}
msgPack.Msgs[0] = tsMsg
stream, err := qt.chMgr.getDQLStream(qt.CollectionID)
if err != nil {
err = qt.chMgr.createDQLStream(qt.CollectionID)
executeQuery := func(withCache bool) error {
shards, err := globalMetaCache.GetShards(ctx, withCache, t.collectionName, t.qc)
if err != nil {
qt.result.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
qt.result.Status.Reason = err.Error()
return err
}
stream, err = qt.chMgr.getDQLStream(qt.CollectionID)
if err != nil {
qt.result.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
qt.result.Status.Reason = err.Error()
return err
}
}
tr.Record("get used message stream")
err = stream.Produce(&msgPack)
if err != nil {
log.Debug("Failed to send retrieve request.",
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
t.resultBuf = make(chan *internalpb.RetrieveResults, len(shards))
t.toReduceResults = make([]*internalpb.RetrieveResults, 0, len(shards))
t.runningGroup, t.runningGroupCtx = errgroup.WithContext(ctx)
for _, shard := range shards {
s := shard
t.runningGroup.Go(func() error {
log.Debug("proxy starting to query one shard",
zap.Int64("collectionID", t.CollectionID),
zap.String("collection name", t.collectionName),
zap.String("shard channel", s.GetChannelName()),
zap.Uint64("timeoutTs", t.TimeoutTimestamp))
err := t.queryShard(t.runningGroupCtx, s)
if err != nil {
return err
}
return nil
})
}
err = t.runningGroup.Wait()
return err
}
err := executeQuery(WithCache)
if err == errInvalidShardLeaders {
log.Warn("invalid shard leaders cache, updating shardleader caches and retry search")
return executeQuery(WithoutCache)
}
if err != nil {
return err
}
log.Debug("proxy sent one retrieveMsg",
zap.Int64("collectionID", qt.CollectionID),
zap.Int64("msgID", tsMsg.ID()),
zap.Int("length of search msg", len(msgPack.Msgs)),
zap.Uint64("timeoutTs", qt.RetrieveRequest.TimeoutTimestamp))
tr.Record("send retrieve request to message stream")
log.Info("Query Execute done.",
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
return err
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
return nil
}
func (qt *queryTask) PostExecute(ctx context.Context) error {
func (t *queryTask) PostExecute(ctx context.Context) error {
tr := timerecord.NewTimeRecorder("queryTask PostExecute")
defer func() {
tr.Elapse("done")
}()
select {
case <-qt.TraceCtx().Done():
log.Debug("proxy", zap.Int64("Query: wait to finish failed, timeout!, taskID:", qt.ID()))
return fmt.Errorf("queryTask:wait to finish failed, timeout : %d", qt.ID())
case retrieveResults := <-qt.resultBuf:
filterRetrieveResults := make([]*internalpb.RetrieveResults, 0)
var reason string
for _, partialRetrieveResult := range retrieveResults {
if partialRetrieveResult.Status.ErrorCode == commonpb.ErrorCode_Success {
filterRetrieveResults = append(filterRetrieveResults, partialRetrieveResult)
} else {
reason += partialRetrieveResult.Status.Reason + "\n"
}
}
if len(filterRetrieveResults) == 0 {
qt.result = &milvuspb.QueryResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: reason,
},
CollectionName: qt.collectionName,
}
log.Debug("Query failed on all querynodes.",
zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
return errors.New(reason)
}
var err error
qt.result, err = mergeRetrieveResults(filterRetrieveResults)
if err != nil {
return err
}
qt.result.CollectionName = qt.collectionName
if len(qt.result.FieldsData) > 0 {
qt.result.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
} else {
log.Info("Query result is nil", zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
qt.result.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_EmptyCollection,
Reason: reason,
}
return nil
}
schema, err := globalMetaCache.GetCollectionSchema(ctx, qt.query.CollectionName)
if err != nil {
return err
}
for i := 0; i < len(qt.result.FieldsData); i++ {
for _, field := range schema.Fields {
if field.FieldID == qt.OutputFieldsId[i] {
qt.result.FieldsData[i].FieldName = field.Name
qt.result.FieldsData[i].FieldId = field.FieldID
qt.result.FieldsData[i].Type = field.DataType
var err error
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
for {
select {
case <-t.TraceCtx().Done():
log.Warn("proxy", zap.Int64("Query: wait to finish failed, timeout!, taskID:", t.ID()))
return
case <-t.runningGroupCtx.Done():
log.Debug("all queries are finished or canceled", zap.Any("taskID", t.ID()))
close(t.resultBuf)
for res := range t.resultBuf {
t.toReduceResults = append(t.toReduceResults, res)
log.Debug("proxy receives one query result", zap.Int64("sourceID", res.GetBase().GetSourceID()), zap.Any("taskID", t.ID()))
}
wg.Done()
return
}
}
}()
wg.Wait()
t.result, err = mergeRetrieveResults(t.toReduceResults)
if err != nil {
return err
}
t.result.CollectionName = t.collectionName
if len(t.result.FieldsData) > 0 {
t.result.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
} else {
log.Info("Query result is nil", zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "query"))
t.result.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_EmptyCollection,
Reason: "emptly collection", // TODO
}
return nil
}
schema, err := globalMetaCache.GetCollectionSchema(ctx, t.request.CollectionName)
if err != nil {
return err
}
for i := 0; i < len(t.result.FieldsData); i++ {
for _, field := range schema.Fields {
if field.FieldID == t.OutputFieldsId[i] {
t.result.FieldsData[i].FieldName = field.Name
t.result.FieldsData[i].FieldId = field.FieldID
t.result.FieldsData[i].Type = field.DataType
}
}
}
log.Info("Query PostExecute done", zap.Any("requestID", qt.Base.MsgID), zap.Any("requestType", "query"))
log.Info("Query PostExecute done", zap.Any("requestID", t.Base.MsgID), zap.String("requestType", "query"))
return nil
}
func (qt *queryTask) getChannels() ([]pChan, error) {
collID, err := globalMetaCache.GetCollectionID(qt.ctx, qt.query.CollectionName)
if err != nil {
return nil, err
}
var channels []pChan
channels, err = qt.chMgr.getChannels(collID)
if err != nil {
err := qt.chMgr.createDMLMsgStream(collID)
if err != nil {
return nil, err
func (t *queryTask) queryShard(ctx context.Context, leaders *querypb.ShardLeadersList) error {
query := func(nodeID UniqueID, qn types.QueryNode) error {
req := &querypb.QueryRequest{
Req: t.RetrieveRequest,
DmlChannel: leaders.GetChannelName(),
}
return qt.chMgr.getChannels(collID)
result, err := qn.Query(ctx, req)
if err != nil {
log.Warn("QueryNode query returns error", zap.Int64("nodeID", nodeID),
zap.Error(err))
return errInvalidShardLeaders
}
if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("QueryNode query result error", zap.Int64("nodeID", nodeID),
zap.String("reason", result.GetStatus().GetReason()))
return fmt.Errorf("fail to Query, QueryNode ID = %d, reason=%s", nodeID, result.GetStatus().GetReason())
}
log.Debug("get query result", zap.Int64("nodeID", nodeID), zap.String("channelID", leaders.GetChannelName()))
t.resultBuf <- result
return nil
}
return channels, nil
err := t.queryShardPolicy(t.TraceCtx(), t.getQueryNodePolicy, query, leaders)
if err != nil {
log.Warn("fail to Query to all shard leaders", zap.Int64("taskID", t.ID()), zap.Any("shard leaders", leaders.GetNodeIds()))
return err
}
return nil
}
func (qt *queryTask) getVChannels() ([]vChan, error) {
collID, err := globalMetaCache.GetCollectionID(qt.ctx, qt.query.CollectionName)
if err != nil {
return nil, err
}
var channels []vChan
channels, err = qt.chMgr.getVChannels(collID)
if err != nil {
err := qt.chMgr.createDMLMsgStream(collID)
func (t *queryTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []UniqueID) bool {
// If request to search partitions
if len(searchPartitionIDs) > 0 {
resp, err := t.qc.ShowPartitions(t.ctx, &querypb.ShowPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: t.Base.MsgID,
Timestamp: t.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
CollectionID: collectionID,
PartitionIDs: searchPartitionIDs,
})
if err != nil {
return nil, err
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("requestID", t.Base.MsgID),
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", searchPartitionIDs),
zap.String("requestType", "search"),
zap.Error(err))
return false
}
return qt.chMgr.getVChannels(collID)
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", searchPartitionIDs),
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.String("reason", resp.GetStatus().GetReason()))
return false
}
// Current logic: show partitions won't return error if the given partitions are all loaded
return true
}
return channels, nil
// If request to search collection
resp, err := t.qc.ShowCollections(t.ctx, &querypb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: t.Base.MsgID,
Timestamp: t.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
})
if err != nil {
log.Warn("fail to show collections by QueryCoord",
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.Error(err))
return false
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("fail to show collections by QueryCoord",
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.String("reason", resp.GetStatus().GetReason()))
return false
}
loaded := false
for index, collID := range resp.CollectionIDs {
if collID == collectionID && resp.GetInMemoryPercentages()[index] >= int64(100) {
loaded = true
break
}
}
if !loaded {
resp, err := t.qc.ShowPartitions(t.ctx, &querypb.ShowPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: t.Base.MsgID,
Timestamp: t.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
CollectionID: collectionID,
})
if err != nil {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("requestID", t.Base.MsgID),
zap.Int64("collectionID", collectionID),
zap.String("requestType", "search"),
zap.Error(err))
return false
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("collectionID", collectionID),
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.String("reason", resp.GetStatus().GetReason()))
return false
}
if len(resp.GetPartitionIDs()) > 0 {
log.Warn("collection not fully loaded, search on these partitions", zap.Int64s("partitionIDs", resp.GetPartitionIDs()))
return true
}
}
return loaded
}
// IDs2Expr converts ids slices to bool expresion with specified field name
@ -447,39 +528,39 @@ func mergeRetrieveResults(retrieveResults []*internalpb.RetrieveResults) (*milvu
return ret, nil
}
func (qt *queryTask) TraceCtx() context.Context {
return qt.ctx
func (t *queryTask) TraceCtx() context.Context {
return t.ctx
}
func (qt *queryTask) ID() UniqueID {
return qt.Base.MsgID
func (t *queryTask) ID() UniqueID {
return t.Base.MsgID
}
func (qt *queryTask) SetID(uid UniqueID) {
qt.Base.MsgID = uid
func (t *queryTask) SetID(uid UniqueID) {
t.Base.MsgID = uid
}
func (qt *queryTask) Name() string {
func (t *queryTask) Name() string {
return RetrieveTaskName
}
func (qt *queryTask) Type() commonpb.MsgType {
return qt.Base.MsgType
func (t *queryTask) Type() commonpb.MsgType {
return t.Base.MsgType
}
func (qt *queryTask) BeginTs() Timestamp {
return qt.Base.Timestamp
func (t *queryTask) BeginTs() Timestamp {
return t.Base.Timestamp
}
func (qt *queryTask) EndTs() Timestamp {
return qt.Base.Timestamp
func (t *queryTask) EndTs() Timestamp {
return t.Base.Timestamp
}
func (qt *queryTask) SetTs(ts Timestamp) {
qt.Base.Timestamp = ts
func (t *queryTask) SetTs(ts Timestamp) {
t.Base.Timestamp = ts
}
func (qt *queryTask) OnEnqueue() error {
qt.Base.MsgType = commonpb.MsgType_Retrieve
func (t *queryTask) OnEnqueue() error {
t.Base.MsgType = commonpb.MsgType_Retrieve
return nil
}

View File

@ -3,16 +3,15 @@ package proxy
import (
"context"
"fmt"
"strconv"
"sync"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
@ -25,25 +24,35 @@ import (
)
func TestQueryTask_all(t *testing.T) {
var err error
Params.Init()
Params.ProxyCfg.RetrieveResultChannelNames = []string{funcutil.GenRandomStr()}
rc := NewRootCoordMock()
var (
err error
ctx = context.TODO()
rc = NewRootCoordMock()
qc = NewQueryCoordMock(withValidShardLeaders())
qn = &QueryNodeMock{}
shardsNum = int32(2)
collectionName = t.Name() + funcutil.GenRandomStr()
expr = fmt.Sprintf("%s > 0", testInt64Field)
hitNum = 10
)
mockGetQueryNodePolicy := func(ctx context.Context, address string) (types.QueryNode, error) {
return qn, nil
}
rc.Start()
defer rc.Stop()
ctx := context.Background()
qc.Start()
defer qc.Stop()
err = InitMetaCache(rc)
assert.NoError(t, err)
shardsNum := int32(2)
prefix := "TestQueryTask_all"
dbName := ""
collectionName := prefix + funcutil.GenRandomStr()
fieldName2Types := map[string]schemapb.DataType{
testBoolField: schemapb.DataType_Bool,
testInt32Field: schemapb.DataType_Int32,
@ -56,9 +65,6 @@ func TestQueryTask_all(t *testing.T) {
fieldName2Types[testBinaryVecField] = schemapb.DataType_BinaryVector
}
expr := fmt.Sprintf("%s > 0", testInt64Field)
hitNum := 10
schema := constructCollectionSchemaByDataType(collectionName, fieldName2Types, testInt64Field, false)
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
@ -66,165 +72,66 @@ func TestQueryTask_all(t *testing.T) {
createColT := &createCollectionTask{
Condition: NewTaskCondition(ctx),
CreateCollectionRequest: &milvuspb.CreateCollectionRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
Schema: marshaledSchema,
ShardsNum: shardsNum,
},
ctx: ctx,
rootCoord: rc,
result: nil,
schema: nil,
}
assert.NoError(t, createColT.OnEnqueue())
assert.NoError(t, createColT.PreExecute(ctx))
assert.NoError(t, createColT.Execute(ctx))
assert.NoError(t, createColT.PostExecute(ctx))
dmlChannelsFunc := getDmlChannelsFunc(ctx, rc)
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, query.GetChannels, nil, factory)
defer chMgr.removeAllDMLStream()
defer chMgr.removeAllDQLStream()
require.NoError(t, createColT.OnEnqueue())
require.NoError(t, createColT.PreExecute(ctx))
require.NoError(t, createColT.Execute(ctx))
require.NoError(t, createColT.PostExecute(ctx))
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
assert.NoError(t, err)
qc := NewQueryCoordMock()
qc.Start()
defer qc.Stop()
status, err := qc.LoadCollection(ctx, &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.ProxyID,
MsgType: commonpb.MsgType_LoadCollection,
SourceID: Params.ProxyCfg.ProxyID,
},
DbID: 0,
CollectionID: collectionID,
Schema: nil,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
require.NoError(t, err)
require.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
// test begins
task := &queryTask{
Condition: NewTaskCondition(ctx),
RetrieveRequest: &internalpb.RetrieveRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Retrieve,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.ProxyID,
MsgType: commonpb.MsgType_Retrieve,
SourceID: Params.ProxyCfg.ProxyID,
},
ResultChannelID: strconv.Itoa(int(Params.ProxyCfg.ProxyID)),
DbID: 0,
CollectionID: collectionID,
PartitionIDs: nil,
SerializedExprPlan: nil,
OutputFieldsId: make([]int64, len(fieldName2Types)),
TravelTimestamp: 0,
GuaranteeTimestamp: 0,
CollectionID: collectionID,
OutputFieldsId: make([]int64, len(fieldName2Types)),
},
ctx: ctx,
resultBuf: make(chan []*internalpb.RetrieveResults),
ctx: ctx,
result: &milvuspb.QueryResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
FieldsData: nil,
},
query: &milvuspb.QueryRequest{
request: &milvuspb.QueryRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Retrieve,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.ProxyID,
MsgType: commonpb.MsgType_Retrieve,
SourceID: Params.ProxyCfg.ProxyID,
},
DbName: dbName,
CollectionName: collectionName,
Expr: expr,
OutputFields: nil,
PartitionNames: nil,
TravelTimestamp: 0,
GuaranteeTimestamp: 0,
CollectionName: collectionName,
Expr: expr,
},
chMgr: chMgr,
qc: qc,
ids: nil,
qc: qc,
getQueryNodePolicy: mockGetQueryNodePolicy,
queryShardPolicy: roundRobinPolicy,
}
for i := 0; i < len(fieldName2Types); i++ {
task.RetrieveRequest.OutputFieldsId[i] = int64(common.StartOfUserFieldID + i)
}
// simple mock for query node
// TODO(dragondriver): should we replace this mock using RocksMq or MemMsgStream?
err = chMgr.createDQLStream(collectionID)
assert.NoError(t, err)
stream, err := chMgr.getDQLStream(collectionID)
assert.NoError(t, err)
var wg sync.WaitGroup
wg.Add(1)
consumeCtx, cancel := context.WithCancel(ctx)
go func() {
defer wg.Done()
for {
select {
case <-consumeCtx.Done():
return
case pack, ok := <-stream.Chan():
assert.True(t, ok)
if pack == nil {
continue
}
for _, msg := range pack.Msgs {
_, ok := msg.(*msgstream.RetrieveMsg)
assert.True(t, ok)
// TODO(dragondriver): construct result according to the request
result1 := &internalpb.RetrieveResults{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_RetrieveResult,
MsgID: 0,
Timestamp: 0,
SourceID: 0,
},
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
ResultChannelID: strconv.Itoa(int(Params.ProxyCfg.ProxyID)),
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: generateInt64Array(hitNum),
},
},
},
SealedSegmentIDsRetrieved: nil,
ChannelIDsRetrieved: nil,
GlobalSealedSegmentIDs: nil,
}
fieldID := common.StartOfUserFieldID
for fieldName, dataType := range fieldName2Types {
result1.FieldsData = append(result1.FieldsData, generateFieldData(dataType, fieldName, int64(fieldID), hitNum))
fieldID++
}
// send search result
task.resultBuf <- []*internalpb.RetrieveResults{result1}
}
}
}
}()
assert.NoError(t, task.OnEnqueue())
// test query task with timeout
@ -236,11 +143,29 @@ func TestQueryTask_all(t *testing.T) {
assert.NoError(t, task.PreExecute(ctx))
// after preExecute
assert.Greater(t, task.TimeoutTimestamp, typeutil.ZeroTimestamp)
result1 := &internalpb.RetrieveResults{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_RetrieveResult},
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{Data: generateInt64Array(hitNum)},
},
},
}
fieldID := common.StartOfUserFieldID
for fieldName, dataType := range fieldName2Types {
result1.FieldsData = append(result1.FieldsData, generateFieldData(dataType, fieldName, int64(fieldID), hitNum))
fieldID++
}
qn.withQueryResult = result1
task.ctx = ctx
assert.NoError(t, task.Execute(ctx))
assert.NoError(t, task.PostExecute(ctx))
cancel()
wg.Wait()
assert.NoError(t, task.PostExecute(ctx))
}

View File

@ -23,16 +23,10 @@ import (
"fmt"
"sync"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/util/funcutil"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/opentracing/opentracing-go"
oplog "github.com/opentracing/opentracing-go/log"
@ -385,25 +379,10 @@ type taskScheduler struct {
cancel context.CancelFunc
msFactory msgstream.Factory
searchResultCh chan *internalpb.SearchResults
retrieveResultCh chan *internalpb.RetrieveResults
}
type schedOpt func(*taskScheduler)
func schedOptWithSearchResultCh(ch chan *internalpb.SearchResults) schedOpt {
return func(sched *taskScheduler) {
sched.searchResultCh = ch
}
}
func schedOptWithRetrieveResultCh(ch chan *internalpb.RetrieveResults) schedOpt {
return func(sched *taskScheduler) {
sched.retrieveResultCh = ch
}
}
func newTaskScheduler(ctx context.Context,
idAllocatorIns idAllocatorInterface,
tsoAllocatorIns tsoAllocator,
@ -551,265 +530,6 @@ func (sched *taskScheduler) queryLoop() {
}
}
type resultBufHeader struct {
msgID UniqueID
usedVChans map[interface{}]struct{} // set of vChan
receivedVChansSet map[interface{}]struct{} // set of vChan
receivedSealedSegmentIDsSet map[interface{}]struct{} // set of UniqueID
receivedGlobalSegmentIDsSet map[interface{}]struct{} // set of UniqueID
haveError bool
}
type searchResultBuf struct {
resultBufHeader
resultBuf []*internalpb.SearchResults
}
type queryResultBuf struct {
resultBufHeader
resultBuf []*internalpb.RetrieveResults
}
func newSearchResultBuf(msgID UniqueID) *searchResultBuf {
return &searchResultBuf{
resultBufHeader: resultBufHeader{
usedVChans: make(map[interface{}]struct{}),
receivedVChansSet: make(map[interface{}]struct{}),
receivedSealedSegmentIDsSet: make(map[interface{}]struct{}),
receivedGlobalSegmentIDsSet: make(map[interface{}]struct{}),
haveError: false,
msgID: msgID,
},
resultBuf: make([]*internalpb.SearchResults, 0),
}
}
func newQueryResultBuf(msgID UniqueID) *queryResultBuf {
return &queryResultBuf{
resultBufHeader: resultBufHeader{
usedVChans: make(map[interface{}]struct{}),
receivedVChansSet: make(map[interface{}]struct{}),
receivedSealedSegmentIDsSet: make(map[interface{}]struct{}),
receivedGlobalSegmentIDsSet: make(map[interface{}]struct{}),
haveError: false,
msgID: msgID,
},
resultBuf: make([]*internalpb.RetrieveResults, 0),
}
}
func (sr *resultBufHeader) readyToReduce() bool {
if sr.haveError {
log.Debug("Proxy searchResultBuf readyToReduce", zap.Any("haveError", true))
return true
}
log.Debug("check if result buf is ready to reduce",
zap.String("role", typeutil.ProxyRole),
zap.Int64("MsgID", sr.msgID),
zap.Any("receivedVChansSet", funcutil.SetToSlice(sr.receivedVChansSet)),
zap.Any("usedVChans", funcutil.SetToSlice(sr.usedVChans)),
zap.Any("receivedSealedSegmentIDsSet", funcutil.SetToSlice(sr.receivedSealedSegmentIDsSet)),
zap.Any("receivedGlobalSegmentIDsSet", funcutil.SetToSlice(sr.receivedGlobalSegmentIDsSet)))
ret1 := funcutil.SetContain(sr.receivedVChansSet, sr.usedVChans)
if !ret1 {
return false
}
return funcutil.SetContain(sr.receivedSealedSegmentIDsSet, sr.receivedGlobalSegmentIDsSet)
}
func (sr *resultBufHeader) addPartialResult(vchans []vChan, searchSegIDs, globalSegIDs []UniqueID) {
for _, vchan := range vchans {
sr.receivedVChansSet[vchan] = struct{}{}
}
for _, sealedSegment := range searchSegIDs {
sr.receivedSealedSegmentIDsSet[sealedSegment] = struct{}{}
}
for _, globalSegment := range globalSegIDs {
sr.receivedGlobalSegmentIDsSet[globalSegment] = struct{}{}
}
}
func (sr *searchResultBuf) addPartialResult(result *internalpb.SearchResults) {
sr.resultBuf = append(sr.resultBuf, result)
if result.Status.ErrorCode != commonpb.ErrorCode_Success {
sr.haveError = true
return
}
sr.resultBufHeader.addPartialResult(result.ChannelIDsSearched, result.SealedSegmentIDsSearched,
result.GlobalSealedSegmentIDs)
}
func (qr *queryResultBuf) addPartialResult(result *internalpb.RetrieveResults) {
qr.resultBuf = append(qr.resultBuf, result)
if result.Status.ErrorCode != commonpb.ErrorCode_Success {
qr.haveError = true
return
}
qr.resultBufHeader.addPartialResult(result.ChannelIDsRetrieved, result.SealedSegmentIDsRetrieved,
result.GlobalSealedSegmentIDs)
}
func (sched *taskScheduler) collectionResultLoopV2() {
defer sched.wg.Done()
searchResultBufs := make(map[UniqueID]*searchResultBuf)
searchResultBufFlags := newIDCache(Params.ProxyCfg.BufFlagExpireTime, Params.ProxyCfg.BufFlagCleanupInterval) // if value is true, we can ignore searchResult
queryResultBufs := make(map[UniqueID]*queryResultBuf)
queryResultBufFlags := newIDCache(Params.ProxyCfg.BufFlagExpireTime, Params.ProxyCfg.BufFlagCleanupInterval) // if value is true, we can ignore queryResult
processSearchResult := func(results *internalpb.SearchResults) error {
reqID := results.Base.MsgID
ignoreThisResult, ok := searchResultBufFlags.Get(reqID)
if !ok {
searchResultBufFlags.Set(reqID, false)
ignoreThisResult = false
}
if ignoreThisResult {
log.Debug("got a search result, but we should ignore", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
return nil
}
log.Debug("got a search result", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
t := sched.getTaskByReqID(reqID)
if t == nil {
log.Debug("got a search result, but not in task scheduler", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
delete(searchResultBufs, reqID)
searchResultBufFlags.Set(reqID, true)
}
st, ok := t.(*searchTask)
if !ok {
log.Debug("got a search result, but the related task is not of search task", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
delete(searchResultBufs, reqID)
searchResultBufFlags.Set(reqID, true)
return nil
}
resultBuf, ok := searchResultBufs[reqID]
if !ok {
log.Debug("first receive search result of this task", zap.String("role", typeutil.ProxyRole), zap.Int64("reqID", reqID))
resultBuf = newSearchResultBuf(reqID)
vchans, err := st.getVChannels()
if err != nil {
delete(searchResultBufs, reqID)
log.Warn("failed to get virtual channels", zap.String("role", typeutil.ProxyRole), zap.Error(err), zap.Int64("reqID", reqID))
return err
}
for _, vchan := range vchans {
resultBuf.usedVChans[vchan] = struct{}{}
}
searchResultBufs[reqID] = resultBuf
}
resultBuf.addPartialResult(results)
colName := t.(*searchTask).query.CollectionName
log.Debug("process search result", zap.String("role", typeutil.ProxyRole), zap.String("collection", colName), zap.Int64("reqID", reqID), zap.Int("answer cnt", len(searchResultBufs[reqID].resultBuf)))
if resultBuf.readyToReduce() {
log.Debug("process search result, ready to reduce", zap.String("role", typeutil.ProxyRole), zap.Int64("reqID", reqID))
searchResultBufFlags.Set(reqID, true)
st.resultBuf <- resultBuf.resultBuf
delete(searchResultBufs, reqID)
}
return nil
}
processRetrieveResult := func(results *internalpb.RetrieveResults) error {
reqID := results.Base.MsgID
ignoreThisResult, ok := queryResultBufFlags.Get(reqID)
if !ok {
queryResultBufFlags.Set(reqID, false)
ignoreThisResult = false
}
if ignoreThisResult {
log.Debug("got a retrieve result, but we should ignore", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
return nil
}
log.Debug("got a retrieve result", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
t := sched.getTaskByReqID(reqID)
if t == nil {
log.Debug("got a retrieve result, but not in task scheduler", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
delete(queryResultBufs, reqID)
queryResultBufFlags.Set(reqID, true)
}
st, ok := t.(*queryTask)
if !ok {
log.Debug("got a retrieve result, but the related task is not of retrieve task", zap.String("role", typeutil.ProxyRole), zap.Int64("ReqID", reqID))
delete(queryResultBufs, reqID)
queryResultBufFlags.Set(reqID, true)
return nil
}
resultBuf, ok := queryResultBufs[reqID]
if !ok {
log.Debug("first receive retrieve result of this task", zap.String("role", typeutil.ProxyRole), zap.Int64("reqID", reqID))
resultBuf = newQueryResultBuf(reqID)
vchans, err := st.getVChannels()
if err != nil {
delete(queryResultBufs, reqID)
log.Warn("failed to get virtual channels", zap.String("role", typeutil.ProxyRole), zap.Error(err), zap.Int64("reqID", reqID))
return err
}
for _, vchan := range vchans {
resultBuf.usedVChans[vchan] = struct{}{}
}
queryResultBufs[reqID] = resultBuf
}
resultBuf.addPartialResult(results)
colName := t.(*queryTask).query.CollectionName
log.Debug("process retrieve result", zap.String("role", typeutil.ProxyRole), zap.String("collection", colName), zap.Int64("reqID", reqID), zap.Int("answer cnt", len(queryResultBufs[reqID].resultBuf)))
if resultBuf.readyToReduce() {
log.Debug("process retrieve result, ready to reduce", zap.String("role", typeutil.ProxyRole), zap.Int64("reqID", reqID))
queryResultBufFlags.Set(reqID, true)
st.resultBuf <- resultBuf.resultBuf
delete(queryResultBufs, reqID)
}
return nil
}
for {
select {
case <-sched.ctx.Done():
log.Info("task scheduler's result loop of Proxy exit", zap.String("reason", "context done"))
return
case sr, ok := <-sched.searchResultCh:
if !ok {
log.Info("task scheduler's result loop of Proxy exit", zap.String("reason", "search result channel closed"))
return
}
if err := processSearchResult(sr); err != nil {
log.Warn("failed to process search result", zap.Error(err))
}
case rr, ok := <-sched.retrieveResultCh:
if !ok {
log.Info("task scheduler's result loop of Proxy exit", zap.String("reason", "retrieve result channel closed"))
return
}
if err := processRetrieveResult(rr); err != nil {
log.Warn("failed to process retrieve result", zap.Error(err))
}
}
}
}
func (sched *taskScheduler) Start() error {
sched.wg.Add(1)
go sched.definitionLoop()
@ -820,10 +540,6 @@ func (sched *taskScheduler) Start() error {
sched.wg.Add(1)
go sched.queryLoop()
sched.wg.Add(1)
// go sched.collectResultLoop()
go sched.collectionResultLoopV2()
return nil
}

View File

@ -6,14 +6,15 @@ import (
"fmt"
"regexp"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/distance"
@ -34,94 +35,108 @@ import (
type searchTask struct {
Condition
*internalpb.SearchRequest
ctx context.Context
resultBuf chan []*internalpb.SearchResults
ctx context.Context
result *milvuspb.SearchResults
query *milvuspb.SearchRequest
chMgr channelsMgr
request *milvuspb.SearchRequest
qc types.QueryCoord
tr *timerecord.TimeRecorder
collectionName string
tr *timerecord.TimeRecorder
collectionID UniqueID
resultBuf chan *internalpb.SearchResults
toReduceResults []*internalpb.SearchResults
runningGroup *errgroup.Group
runningGroupCtx context.Context
getQueryNodePolicy getQueryNodePolicy
searchShardPolicy pickShardPolicy
}
func (st *searchTask) PreExecute(ctx context.Context) error {
sp, ctx := trace.StartSpanFromContextWithOperationName(st.TraceCtx(), "Proxy-Search-PreExecute")
defer sp.Finish()
st.Base.MsgType = commonpb.MsgType_Search
st.Base.SourceID = Params.ProxyCfg.ProxyID
func (t *searchTask) PreExecute(ctx context.Context) error {
sp, ctx := trace.StartSpanFromContextWithOperationName(t.TraceCtx(), "Proxy-Search-PreExecute")
if t.getQueryNodePolicy == nil {
t.getQueryNodePolicy = defaultGetQueryNodePolicy
}
if t.searchShardPolicy == nil {
t.searchShardPolicy = roundRobinPolicy
}
defer sp.Finish()
t.Base.MsgType = commonpb.MsgType_Search
t.Base.SourceID = Params.ProxyCfg.ProxyID
collectionName := t.request.CollectionName
if err := validateCollectionName(collectionName); err != nil {
return err
}
collectionName := st.query.CollectionName
collID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
if err != nil { // err is not nil if collection not exists
return err
}
st.collectionID = collID
t.CollectionID = collID
t.collectionName = collectionName
t.PartitionIDs = []UniqueID{}
if err := validateCollectionName(st.query.CollectionName); err != nil {
return err
}
for _, tag := range st.query.PartitionNames {
for _, tag := range t.request.PartitionNames {
if err := validatePartitionTag(tag, false); err != nil {
return err
}
}
// check if collection was already loaded into query node
showResp, err := st.qc.ShowCollections(st.ctx, &querypb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: st.Base.MsgID,
Timestamp: st.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
DbID: 0, // TODO(dragondriver)
})
partitionsMap, err := globalMetaCache.GetPartitions(ctx, collectionName)
if err != nil {
return err
}
if showResp.Status.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(showResp.Status.Reason)
}
log.Debug("successfully get collections from QueryCoord",
zap.String("target collection name", collectionName),
zap.Int64("target collection ID", collID),
zap.Any("collections", showResp.CollectionIDs),
)
collectionLoaded := false
for _, collectionID := range showResp.CollectionIDs {
if collectionID == collID {
collectionLoaded = true
break
partitionsRecord := make(map[UniqueID]bool)
for _, partitionName := range t.request.PartitionNames {
pattern := fmt.Sprintf("^%s$", partitionName)
re, err := regexp.Compile(pattern)
if err != nil {
return errors.New("invalid partition names")
}
found := false
for name, pID := range partitionsMap {
if re.MatchString(name) {
if _, exist := partitionsRecord[pID]; !exist {
t.PartitionIDs = append(t.PartitionIDs, pID)
partitionsRecord[pID] = true
}
found = true
}
}
if !found {
return fmt.Errorf("partition name %s not found", partitionName)
}
}
if !collectionLoaded {
return fmt.Errorf("collection %v was not loaded into memory", collectionName)
// check if collection/partitions are loaded into query node
if !t.checkIfLoaded(collID, t.PartitionIDs) {
return fmt.Errorf("collection:%v or partitions:%v not loaded into memory", collectionName, t.request.GetPartitionNames())
}
// TODO(dragondriver): necessary to check if partition was loaded into query node?
st.Base.MsgType = commonpb.MsgType_Search
t.Base.MsgType = commonpb.MsgType_Search
schema, _ := globalMetaCache.GetCollectionSchema(ctx, collectionName)
outputFields, err := translateOutputFields(st.query.OutputFields, schema, false)
outputFields, err := translateOutputFields(t.request.OutputFields, schema, false)
if err != nil {
return err
}
log.Debug("translate output fields", zap.Any("OutputFields", outputFields))
st.query.OutputFields = outputFields
t.request.OutputFields = outputFields
if st.query.GetDslType() == commonpb.DslType_BoolExprV1 {
annsField, err := funcutil.GetAttrByKeyFromRepeatedKV(AnnsFieldKey, st.query.SearchParams)
if t.request.GetDslType() == commonpb.DslType_BoolExprV1 {
annsField, err := funcutil.GetAttrByKeyFromRepeatedKV(AnnsFieldKey, t.request.SearchParams)
if err != nil {
return errors.New(AnnsFieldKey + " not found in search_params")
}
topKStr, err := funcutil.GetAttrByKeyFromRepeatedKV(TopKKey, st.query.SearchParams)
topKStr, err := funcutil.GetAttrByKeyFromRepeatedKV(TopKKey, t.request.SearchParams)
if err != nil {
return errors.New(TopKKey + " not found in search_params")
}
@ -130,16 +145,16 @@ func (st *searchTask) PreExecute(ctx context.Context) error {
return errors.New(TopKKey + " " + topKStr + " is not invalid")
}
metricType, err := funcutil.GetAttrByKeyFromRepeatedKV(MetricTypeKey, st.query.SearchParams)
metricType, err := funcutil.GetAttrByKeyFromRepeatedKV(MetricTypeKey, t.request.SearchParams)
if err != nil {
return errors.New(MetricTypeKey + " not found in search_params")
}
searchParams, err := funcutil.GetAttrByKeyFromRepeatedKV(SearchParamsKey, st.query.SearchParams)
searchParams, err := funcutil.GetAttrByKeyFromRepeatedKV(SearchParamsKey, t.request.SearchParams)
if err != nil {
return errors.New(SearchParamsKey + " not found in search_params")
}
roundDecimalStr, err := funcutil.GetAttrByKeyFromRepeatedKV(RoundDecimalKey, st.query.SearchParams)
roundDecimalStr, err := funcutil.GetAttrByKeyFromRepeatedKV(RoundDecimalKey, t.request.SearchParams)
if err != nil {
roundDecimalStr = "-1"
}
@ -161,22 +176,22 @@ func (st *searchTask) PreExecute(ctx context.Context) error {
log.Debug("create query plan",
//zap.Any("schema", schema),
zap.String("dsl", st.query.Dsl),
zap.String("dsl", t.request.Dsl),
zap.String("anns field", annsField),
zap.Any("query info", queryInfo))
plan, err := createQueryPlan(schema, st.query.Dsl, annsField, queryInfo)
plan, err := createQueryPlan(schema, t.request.Dsl, annsField, queryInfo)
if err != nil {
log.Debug("failed to create query plan",
zap.Error(err),
//zap.Any("schema", schema),
zap.String("dsl", st.query.Dsl),
zap.String("dsl", t.request.Dsl),
zap.String("anns field", annsField),
zap.Any("query info", queryInfo))
return fmt.Errorf("failed to create query plan: %v", err)
}
for _, name := range st.query.OutputFields {
for _, name := range t.request.OutputFields {
hitField := false
for _, field := range schema.Fields {
if field.Name == name {
@ -184,7 +199,7 @@ func (st *searchTask) PreExecute(ctx context.Context) error {
return errors.New("search doesn't support vector field as output_fields")
}
st.SearchRequest.OutputFieldsId = append(st.SearchRequest.OutputFieldsId, field.FieldID)
t.SearchRequest.OutputFieldsId = append(t.SearchRequest.OutputFieldsId, field.FieldID)
plan.OutputFieldIds = append(plan.OutputFieldIds, field.FieldID)
hitField = true
break
@ -196,238 +211,315 @@ func (st *searchTask) PreExecute(ctx context.Context) error {
}
}
st.SearchRequest.DslType = commonpb.DslType_BoolExprV1
st.SearchRequest.SerializedExprPlan, err = proto.Marshal(plan)
t.SearchRequest.DslType = commonpb.DslType_BoolExprV1
t.SearchRequest.SerializedExprPlan, err = proto.Marshal(plan)
if err != nil {
return err
}
log.Debug("Proxy::searchTask::PreExecute", zap.Any("plan.OutputFieldIds", plan.OutputFieldIds),
zap.Any("plan", plan.String()))
}
travelTimestamp := st.query.TravelTimestamp
travelTimestamp := t.request.TravelTimestamp
if travelTimestamp == 0 {
travelTimestamp = st.BeginTs()
travelTimestamp = t.BeginTs()
} else {
durationSeconds := tsoutil.CalculateDuration(st.BeginTs(), travelTimestamp) / 1000
durationSeconds := tsoutil.CalculateDuration(t.BeginTs(), travelTimestamp) / 1000
if durationSeconds > Params.CommonCfg.RetentionDuration {
duration := time.Second * time.Duration(durationSeconds)
return fmt.Errorf("only support to travel back to %s so far", duration.String())
}
}
guaranteeTimestamp := st.query.GuaranteeTimestamp
guaranteeTimestamp := t.request.GuaranteeTimestamp
if guaranteeTimestamp == 0 {
guaranteeTimestamp = st.BeginTs()
guaranteeTimestamp = t.BeginTs()
}
st.SearchRequest.TravelTimestamp = travelTimestamp
st.SearchRequest.GuaranteeTimestamp = guaranteeTimestamp
deadline, ok := st.TraceCtx().Deadline()
t.TravelTimestamp = travelTimestamp
t.GuaranteeTimestamp = guaranteeTimestamp
deadline, ok := t.TraceCtx().Deadline()
if ok {
st.SearchRequest.TimeoutTimestamp = tsoutil.ComposeTSByTime(deadline, 0)
t.SearchRequest.TimeoutTimestamp = tsoutil.ComposeTSByTime(deadline, 0)
}
st.SearchRequest.ResultChannelID = Params.ProxyCfg.SearchResultChannelNames[0]
st.SearchRequest.DbID = 0 // todo
st.SearchRequest.CollectionID = collID
st.SearchRequest.PartitionIDs = make([]UniqueID, 0)
partitionsMap, err := globalMetaCache.GetPartitions(ctx, collectionName)
if err != nil {
return err
}
partitionsRecord := make(map[UniqueID]bool)
for _, partitionName := range st.query.PartitionNames {
pattern := fmt.Sprintf("^%s$", partitionName)
re, err := regexp.Compile(pattern)
if err != nil {
return errors.New("invalid partition names")
}
found := false
for name, pID := range partitionsMap {
if re.MatchString(name) {
if _, exist := partitionsRecord[pID]; !exist {
st.PartitionIDs = append(st.PartitionIDs, pID)
partitionsRecord[pID] = true
}
found = true
}
}
if !found {
errMsg := fmt.Sprintf("PartitonName: %s not found", partitionName)
return errors.New(errMsg)
}
}
st.SearchRequest.Dsl = st.query.Dsl
st.SearchRequest.PlaceholderGroup = st.query.PlaceholderGroup
t.DbID = 0 // todo
t.SearchRequest.Dsl = t.request.Dsl
t.SearchRequest.PlaceholderGroup = t.request.PlaceholderGroup
log.Info("search PreExecute done.",
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "search"))
return nil
}
func (st *searchTask) Execute(ctx context.Context) error {
sp, ctx := trace.StartSpanFromContextWithOperationName(st.TraceCtx(), "Proxy-Search-Execute")
func (t *searchTask) Execute(ctx context.Context) error {
sp, ctx := trace.StartSpanFromContextWithOperationName(t.TraceCtx(), "Proxy-Search-Execute")
defer sp.Finish()
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy execute search %d", st.ID()))
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy execute search %d", t.ID()))
defer tr.Elapse("done")
var tsMsg msgstream.TsMsg = &msgstream.SearchMsg{
SearchRequest: *st.SearchRequest,
BaseMsg: msgstream.BaseMsg{
Ctx: ctx,
HashValues: []uint32{uint32(Params.ProxyCfg.ProxyID)},
BeginTimestamp: st.Base.Timestamp,
EndTimestamp: st.Base.Timestamp,
},
}
msgPack := msgstream.MsgPack{
BeginTs: st.Base.Timestamp,
EndTs: st.Base.Timestamp,
Msgs: make([]msgstream.TsMsg, 1),
}
msgPack.Msgs[0] = tsMsg
executeSearch := func(withCache bool) error {
shards, err := globalMetaCache.GetShards(ctx, withCache, t.collectionName, t.qc)
if err != nil {
return err
}
collectionName := st.query.CollectionName
info, err := globalMetaCache.GetCollectionInfo(ctx, collectionName)
if err != nil { // err is not nil if collection not exists
t.resultBuf = make(chan *internalpb.SearchResults, len(shards))
t.toReduceResults = make([]*internalpb.SearchResults, 0, len(shards))
t.runningGroup, t.runningGroupCtx = errgroup.WithContext(ctx)
// TODO: try to merge rpc send to different shard leaders.
// If two shard leader is on the same querynode maybe we should merge request to save rpc
for _, shard := range shards {
s := shard
t.runningGroup.Go(func() error {
log.Debug("proxy starting to query one shard",
zap.Int64("collectionID", t.CollectionID),
zap.String("collection name", t.collectionName),
zap.String("shard channel", s.GetChannelName()),
zap.Uint64("timeoutTs", t.TimeoutTimestamp))
err := t.searchShard(t.runningGroupCtx, s)
if err != nil {
return err
}
return nil
})
}
err = t.runningGroup.Wait()
return err
}
st.collectionName = info.schema.Name
stream, err := st.chMgr.getDQLStream(info.collID)
if err != nil {
err = st.chMgr.createDQLStream(info.collID)
if err != nil {
st.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}
return err
}
stream, err = st.chMgr.getDQLStream(info.collID)
if err != nil {
st.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}
return err
}
err := executeSearch(WithCache)
if err == errInvalidShardLeaders {
log.Warn("invalid shard leaders from cache, updating shardleader caches and retry search")
return executeSearch(WithoutCache)
}
tr.Record("get used message stream")
err = stream.Produce(&msgPack)
if err != nil {
log.Debug("proxy", zap.String("send search request failed", err.Error()))
return err
}
st.tr.Record("send message done")
log.Debug("proxy sent one searchMsg",
zap.Int64("collectionID", st.CollectionID),
zap.Int64("msgID", tsMsg.ID()),
zap.Int("length of search msg", len(msgPack.Msgs)),
zap.Uint64("timeoutTs", st.SearchRequest.TimeoutTimestamp))
sendMsgDur := tr.Record("send search msg to message stream")
metrics.ProxySendMessageLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
metrics.SearchLabel).Observe(float64(sendMsgDur.Milliseconds()))
return err
log.Info("Search Execute done.",
zap.Any("requestID", t.Base.MsgID), zap.Any("requestType", "search"))
return nil
}
func (st *searchTask) PostExecute(ctx context.Context) error {
sp, ctx := trace.StartSpanFromContextWithOperationName(st.TraceCtx(), "Proxy-Search-PostExecute")
func (t *searchTask) PostExecute(ctx context.Context) error {
sp, ctx := trace.StartSpanFromContextWithOperationName(t.TraceCtx(), "Proxy-Search-PostExecute")
defer sp.Finish()
tr := timerecord.NewTimeRecorder("searchTask PostExecute")
defer func() {
tr.Elapse("done")
}()
for {
select {
case <-st.TraceCtx().Done():
log.Debug("Proxy searchTask PostExecute Loop exit caused by ctx.Done", zap.Int64("taskID", st.ID()))
return fmt.Errorf("searchTask:wait to finish failed, timeout: %d", st.ID())
case searchResults := <-st.resultBuf:
// fmt.Println("searchResults: ", searchResults)
filterSearchResults := make([]*internalpb.SearchResults, 0)
var filterReason string
errNum := 0
for _, partialSearchResult := range searchResults {
if partialSearchResult.Status.ErrorCode == commonpb.ErrorCode_Success {
filterSearchResults = append(filterSearchResults, partialSearchResult)
// For debugging, please don't delete.
// printSearchResult(partialSearchResult)
} else {
errNum++
filterReason += partialSearchResult.Status.Reason + "\n"
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
for {
select {
case <-t.TraceCtx().Done():
log.Debug("wait to finish timeout!", zap.Int64("taskID", t.ID()))
return
case <-t.runningGroupCtx.Done():
log.Debug("all searches are finished or canceled", zap.Any("taskID", t.ID()))
close(t.resultBuf)
for res := range t.resultBuf {
t.toReduceResults = append(t.toReduceResults, res)
log.Debug("proxy receives one query result", zap.Int64("sourceID", res.GetBase().GetSourceID()), zap.Any("taskID", t.ID()))
}
wg.Done()
return
}
}
}()
wg.Wait()
tr.Record("decodeResultStart")
validSearchResults, err := decodeSearchResults(t.toReduceResults)
if err != nil {
return err
}
metrics.ProxyDecodeSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
log.Debug("proxy search post execute stage 2", zap.Any("len(validSearchResults)", len(validSearchResults)))
if len(validSearchResults) <= 0 {
log.Warn("search result is empty", zap.Any("requestID", t.Base.MsgID), zap.String("requestType", "search"))
t.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "search result is empty",
},
CollectionName: t.collectionName,
}
// add information if any
if len(t.toReduceResults) > 0 {
t.result.Results = &schemapb.SearchResultData{
NumQueries: t.toReduceResults[0].NumQueries,
Topks: make([]int64, t.toReduceResults[0].NumQueries),
}
}
return nil
}
tr.Record("reduceResultStart")
t.result, err = reduceSearchResultData(validSearchResults, t.toReduceResults[0].NumQueries, t.toReduceResults[0].TopK, t.toReduceResults[0].MetricType)
if err != nil {
return err
}
metrics.ProxyReduceSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SuccessLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
t.result.CollectionName = t.collectionName
schema, err := globalMetaCache.GetCollectionSchema(ctx, t.request.CollectionName)
if err != nil {
return err
}
if len(t.request.OutputFields) != 0 && len(t.result.Results.FieldsData) != 0 {
for k, fieldName := range t.request.OutputFields {
for _, field := range schema.Fields {
if t.result.Results.FieldsData[k] != nil && field.Name == fieldName {
t.result.Results.FieldsData[k].FieldName = field.Name
t.result.Results.FieldsData[k].FieldId = field.FieldID
t.result.Results.FieldsData[k].Type = field.DataType
}
}
log.Debug("Proxy Search PostExecute stage1",
zap.Any("len(filterSearchResults)", len(filterSearchResults)))
metrics.ProxyWaitForSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SearchLabel).Observe(float64(st.tr.RecordSpan().Milliseconds()))
tr.Record("Proxy Search PostExecute stage1 done")
if len(filterSearchResults) <= 0 || errNum > 0 {
st.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: filterReason,
},
CollectionName: st.collectionName,
}
return fmt.Errorf("QueryNode search fail, reason %s: id %d", filterReason, st.ID())
}
tr.Record("decodeResultStart")
validSearchResults, err := decodeSearchResults(filterSearchResults)
if err != nil {
return err
}
metrics.ProxyDecodeSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
log.Debug("Proxy Search PostExecute stage2", zap.Any("len(validSearchResults)", len(validSearchResults)))
if len(validSearchResults) <= 0 {
filterReason += "empty search result\n"
log.Debug("Proxy Search PostExecute stage2 failed", zap.Any("filterReason", filterReason))
st.result = &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: filterReason,
},
Results: &schemapb.SearchResultData{
NumQueries: searchResults[0].NumQueries,
Topks: make([]int64, searchResults[0].NumQueries),
},
CollectionName: st.collectionName,
}
return nil
}
tr.Record("reduceResultStart")
st.result, err = reduceSearchResultData(validSearchResults, searchResults[0].NumQueries, searchResults[0].TopK, searchResults[0].MetricType)
if err != nil {
return err
}
metrics.ProxyReduceSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SuccessLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
st.result.CollectionName = st.collectionName
schema, err := globalMetaCache.GetCollectionSchema(ctx, st.query.CollectionName)
if err != nil {
return err
}
if len(st.query.OutputFields) != 0 && len(st.result.Results.FieldsData) != 0 {
for k, fieldName := range st.query.OutputFields {
for _, field := range schema.Fields {
if st.result.Results.FieldsData[k] != nil && field.Name == fieldName {
st.result.Results.FieldsData[k].FieldName = field.Name
st.result.Results.FieldsData[k].FieldId = field.FieldID
st.result.Results.FieldsData[k].Type = field.DataType
}
}
}
}
return nil
}
}
log.Info("Search post execute done", zap.Any("requestID", t.Base.MsgID), zap.String("requestType", "search"))
return nil
}
func (t *searchTask) searchShard(ctx context.Context, leaders *querypb.ShardLeadersList) error {
search := func(nodeID UniqueID, qn types.QueryNode) error {
req := &querypb.SearchRequest{
Req: t.SearchRequest,
DmlChannel: leaders.GetChannelName(),
}
result, err := qn.Search(ctx, req)
if err != nil {
log.Warn("QueryNode search returns error", zap.Int64("nodeID", nodeID),
zap.Error(err))
return errInvalidShardLeaders
}
if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("QueryNode search result error", zap.Int64("nodeID", nodeID),
zap.String("reason", result.GetStatus().GetReason()))
return fmt.Errorf("fail to Search, QueryNode ID=%d, reason=%s", nodeID, result.GetStatus().GetReason())
}
t.resultBuf <- result
return nil
}
err := t.searchShardPolicy(t.TraceCtx(), t.getQueryNodePolicy, search, leaders)
if err != nil {
log.Warn("fail to search to all shard leaders", zap.Any("shard leaders", leaders.GetNodeIds()))
return err
}
return nil
}
func (t *searchTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []UniqueID) bool {
// If request to search partitions
if len(searchPartitionIDs) > 0 {
resp, err := t.qc.ShowPartitions(t.ctx, &querypb.ShowPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: t.Base.MsgID,
Timestamp: t.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
CollectionID: collectionID,
PartitionIDs: searchPartitionIDs,
})
if err != nil {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("requestID", t.Base.MsgID),
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", searchPartitionIDs),
zap.String("requestType", "search"),
zap.Error(err))
return false
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", searchPartitionIDs),
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.String("reason", resp.GetStatus().GetReason()))
return false
}
// Current logic: show partitions won't return error if the given partitions are all loaded
return true
}
// If request to search collection
resp, err := t.qc.ShowCollections(t.ctx, &querypb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: t.Base.MsgID,
Timestamp: t.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
})
if err != nil {
log.Warn("fail to show collections by QueryCoord",
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.Error(err))
return false
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("fail to show collections by QueryCoord",
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.String("reason", resp.GetStatus().GetReason()))
return false
}
loaded := false
for index, collID := range resp.CollectionIDs {
if collID == collectionID && resp.GetInMemoryPercentages()[index] >= int64(100) {
loaded = true
break
}
}
if !loaded {
resp, err := t.qc.ShowPartitions(t.ctx, &querypb.ShowPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: t.Base.MsgID,
Timestamp: t.Base.Timestamp,
SourceID: Params.ProxyCfg.ProxyID,
},
CollectionID: collectionID,
})
if err != nil {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("requestID", t.Base.MsgID),
zap.Int64("collectionID", collectionID),
zap.String("requestType", "search"),
zap.Error(err))
return false
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("fail to show partitions by QueryCoord",
zap.Int64("collectionID", collectionID),
zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "search"),
zap.String("reason", resp.GetStatus().GetReason()))
return false
}
if len(resp.GetPartitionIDs()) > 0 {
log.Warn("collection not fully loaded, search on these partitions", zap.Int64s("partitionIDs", resp.GetPartitionIDs()))
return true
}
}
return loaded
}
func decodeSearchResults(searchResults []*internalpb.SearchResults) ([]*schemapb.SearchResultData, error) {
@ -457,18 +549,22 @@ func checkSearchResultData(data *schemapb.SearchResultData, nq int64, topk int64
if data.TopK != topk {
return fmt.Errorf("search result's topk(%d) mis-match with %d", data.TopK, topk)
}
if len(data.Ids.GetIntId().Data) != (int)(nq*topk) {
return fmt.Errorf("search result's id length %d invalid", len(data.Ids.GetIntId().Data))
expectedLength := (int)(nq * topk)
if len(data.Ids.GetIntId().Data) != expectedLength {
return fmt.Errorf("search result's ID length invalid, ID length=%d, expectd length=%d",
len(data.Ids.GetIntId().Data), expectedLength)
}
if len(data.Scores) != (int)(nq*topk) {
return fmt.Errorf("search result's score length %d invalid", len(data.Scores))
if len(data.Scores) != expectedLength {
return fmt.Errorf("search result's score length invalid, score length=%d, expectedLength=%d",
len(data.Scores), expectedLength)
}
return nil
}
func selectSearchResultData(dataArray []*schemapb.SearchResultData, offsets []int64, topk int64, qi int64) int {
sel := -1
maxDistance := minFloat32
maxDistance := minFloat32 // distance here means score :)
for i, offset := range offsets { // query num, the number of ways to merge
if offset >= topk {
continue
@ -518,11 +614,12 @@ func reduceSearchResultData(searchResultData []*schemapb.SearchResultData, nq in
for i, sData := range searchResultData {
log.Debug("reduceSearchResultData",
zap.Int("i", i),
zap.Int("result No.", i),
zap.Int64("nq", sData.NumQueries),
zap.Int64("topk", sData.TopK),
zap.Any("len(FieldsData)", len(sData.FieldsData)))
if err := checkSearchResultData(sData, nq, topk); err != nil {
log.Warn("invalid search results", zap.Error(err))
return ret, err
}
//printSearchResultData(sData, strconv.FormatInt(int64(i), 10))
@ -577,7 +674,7 @@ func reduceSearchResultData(searchResultData []*schemapb.SearchResultData, nq in
ret.Results.Scores[k] *= -1
}
}
// printSearchResultData(ret.Results, "proxy reduce result")
return ret, nil
}
@ -605,79 +702,79 @@ func reduceSearchResultData(searchResultData []*schemapb.SearchResultData, nq in
// }
// }
func (st *searchTask) TraceCtx() context.Context {
return st.ctx
func (t *searchTask) TraceCtx() context.Context {
return t.ctx
}
func (st *searchTask) ID() UniqueID {
return st.Base.MsgID
func (t *searchTask) ID() UniqueID {
return t.Base.MsgID
}
func (st *searchTask) SetID(uid UniqueID) {
st.Base.MsgID = uid
func (t *searchTask) SetID(uid UniqueID) {
t.Base.MsgID = uid
}
func (st *searchTask) Name() string {
func (t *searchTask) Name() string {
return SearchTaskName
}
func (st *searchTask) Type() commonpb.MsgType {
return st.Base.MsgType
func (t *searchTask) Type() commonpb.MsgType {
return t.Base.MsgType
}
func (st *searchTask) BeginTs() Timestamp {
return st.Base.Timestamp
func (t *searchTask) BeginTs() Timestamp {
return t.Base.Timestamp
}
func (st *searchTask) EndTs() Timestamp {
return st.Base.Timestamp
func (t *searchTask) EndTs() Timestamp {
return t.Base.Timestamp
}
func (st *searchTask) SetTs(ts Timestamp) {
st.Base.Timestamp = ts
func (t *searchTask) SetTs(ts Timestamp) {
t.Base.Timestamp = ts
}
func (st *searchTask) OnEnqueue() error {
st.Base = &commonpb.MsgBase{}
st.Base.MsgType = commonpb.MsgType_Search
st.Base.SourceID = Params.ProxyCfg.ProxyID
func (t *searchTask) OnEnqueue() error {
t.Base = &commonpb.MsgBase{}
t.Base.MsgType = commonpb.MsgType_Search
t.Base.SourceID = Params.ProxyCfg.ProxyID
return nil
}
func (st *searchTask) getChannels() ([]pChan, error) {
collID, err := globalMetaCache.GetCollectionID(st.ctx, st.query.CollectionName)
if err != nil {
return nil, err
}
// func (t *searchTaskV2) getChannels() ([]pChan, error) {
// collID, err := globalMetaCache.GetCollectionID(t.ctx, t.request.CollectionName)
// if err != nil {
// return nil, err
// }
//
// var channels []pChan
// channels, err = t.chMgr.getChannels(collID)
// if err != nil {
// err := t.chMgr.createDMLMsgStream(collID)
// if err != nil {
// return nil, err
// }
// return t.chMgr.getChannels(collID)
// }
//
// return channels, nil
// }
var channels []pChan
channels, err = st.chMgr.getChannels(collID)
if err != nil {
err := st.chMgr.createDMLMsgStream(collID)
if err != nil {
return nil, err
}
return st.chMgr.getChannels(collID)
}
return channels, nil
}
func (st *searchTask) getVChannels() ([]vChan, error) {
collID, err := globalMetaCache.GetCollectionID(st.ctx, st.query.CollectionName)
if err != nil {
return nil, err
}
var channels []vChan
channels, err = st.chMgr.getVChannels(collID)
if err != nil {
err := st.chMgr.createDMLMsgStream(collID)
if err != nil {
return nil, err
}
return st.chMgr.getVChannels(collID)
}
return channels, nil
}
// func (t *searchTaskV2) getVChannels() ([]vChan, error) {
// collID, err := globalMetaCache.GetCollectionID(t.ctx, t.request.CollectionName)
// if err != nil {
// return nil, err
// }
//
// var channels []vChan
// channels, err = t.chMgr.getVChannels(collID)
// if err != nil {
// err := t.chMgr.createDMLMsgStream(collID)
// if err != nil {
// return nil, err
// }
// return t.chMgr.getVChannels(collID)
// }
//
// return channels, nil
// }

File diff suppressed because it is too large Load Diff

View File

@ -33,14 +33,23 @@ func defaultChannelAllocatePolicy() ChannelAllocatePolicy {
}
// ChannelAllocatePolicy helper function definition to allocate dmChannel to queryNode
type ChannelAllocatePolicy func(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64) error
type ChannelAllocatePolicy func(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error
func shuffleChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64) error {
func shuffleChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error {
if len(reqs) == 0 {
return nil
}
var onlineNodeIDs []int64
for {
onlineNodeIDs := cluster.onlineNodeIDs()
if replicaID == -1 {
onlineNodeIDs = cluster.onlineNodeIDs()
} else {
replica, err := metaCache.getReplicaByID(replicaID)
if err != nil {
return err
}
onlineNodeIDs = replica.GetNodeIds()
}
if len(onlineNodeIDs) == 0 {
err := errors.New("no online QueryNode to allocate")
log.Error("shuffleChannelsToQueryNode failed", zap.Error(err))
@ -54,6 +63,11 @@ func shuffleChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChan
var availableNodeIDs []int64
nodeID2NumChannels := make(map[int64]int)
for _, nodeID := range onlineNodeIDs {
// nodeID not in includeNodeIDs
if len(includeNodeIDs) > 0 && !nodeIncluded(nodeID, includeNodeIDs) {
continue
}
// nodeID in excludeNodeIDs
if nodeIncluded(nodeID, excludeNodeIDs) {
continue

View File

@ -18,6 +18,8 @@ package querycoord
import (
"context"
"math/rand"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
@ -41,7 +43,12 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
clusterSession := sessionutil.NewSession(context.Background(), Params.EtcdCfg.MetaRootPath, etcdCli)
clusterSession.Init(typeutil.QueryCoordRole, Params.QueryCoordCfg.Address, true, false)
clusterSession.Register()
meta, err := newMeta(baseCtx, kv, nil, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(baseCtx, kv, nil, idAllocator)
assert.Nil(t, err)
cluster := &queryNodeCluster{
ctx: baseCtx,
@ -73,7 +80,7 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
}
reqs := []*querypb.WatchDmChannelsRequest{firstReq, secondReq}
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, meta, false, nil)
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil, -1)
assert.NotNil(t, err)
node, err := startQueryNodeServer(baseCtx)
@ -83,7 +90,7 @@ func TestShuffleChannelsToQueryNode(t *testing.T) {
cluster.registerNode(baseCtx, nodeSession, nodeID, disConnect)
waitQueryNodeOnline(cluster, nodeID)
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, meta, false, nil)
err = shuffleChannelsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil, -1)
assert.Nil(t, err)
assert.Equal(t, nodeID, firstReq.NodeID)

View File

@ -30,6 +30,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/funcutil"
)
@ -51,7 +52,7 @@ type channelUnsubscribeHandler struct {
}
// newChannelUnsubscribeHandler create a new handler service to unsubscribe channels
func newChannelUnsubscribeHandler(ctx context.Context, kv *etcdkv.EtcdKV, factory msgstream.Factory) (*channelUnsubscribeHandler, error) {
func newChannelUnsubscribeHandler(ctx context.Context, kv *etcdkv.EtcdKV, factory dependency.Factory) (*channelUnsubscribeHandler, error) {
childCtx, cancel := context.WithCancel(ctx)
handler := &channelUnsubscribeHandler{
ctx: childCtx,

View File

@ -21,8 +21,9 @@ import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"math"
"path/filepath"
"sort"
"strconv"
"sync"
@ -73,10 +74,10 @@ type Cluster interface {
offlineNodeIDs() []int64
hasNode(nodeID int64) bool
allocateSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error
allocateChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, wait bool, excludeNodeIDs []int64) error
allocateSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error
allocateChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error
assignNodesToReplicas(ctx context.Context, replicas []*milvuspb.ReplicaInfo) error
assignNodesToReplicas(ctx context.Context, replicas []*milvuspb.ReplicaInfo, collectionSize uint64) error
getSessionVersion() int64
@ -267,6 +268,7 @@ func (c *queryNodeCluster) watchDmChannels(ctx context.Context, nodeID int64, in
CollectionID: info.CollectionID,
DmChannel: info.ChannelName,
NodeIDLoaded: nodeID,
ReplicaID: in.ReplicaID,
}
}
@ -696,24 +698,81 @@ func (c *queryNodeCluster) isOnline(nodeID int64) (bool, error) {
// }
//}
func (c *queryNodeCluster) allocateSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
return c.segmentAllocator(ctx, reqs, c, c.clusterMeta, wait, excludeNodeIDs, includeNodeIDs)
func (c *queryNodeCluster) allocateSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error {
return c.segmentAllocator(ctx, reqs, c, c.clusterMeta, wait, excludeNodeIDs, includeNodeIDs, replicaID)
}
func (c *queryNodeCluster) allocateChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, wait bool, excludeNodeIDs []int64) error {
return c.channelAllocator(ctx, reqs, c, c.clusterMeta, wait, excludeNodeIDs)
func (c *queryNodeCluster) allocateChannelsToQueryNode(ctx context.Context, reqs []*querypb.WatchDmChannelsRequest, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error {
return c.channelAllocator(ctx, reqs, c, c.clusterMeta, wait, excludeNodeIDs, includeNodeIDs, replicaID)
}
func (c *queryNodeCluster) assignNodesToReplicas(ctx context.Context, replicas []*milvuspb.ReplicaInfo) error {
nodes := c.onlineNodeIDs()
if len(nodes) < len(replicas) {
return errors.New("no enough nodes to create replicas")
// Return error if no enough nodes/resources to create replicas
func (c *queryNodeCluster) assignNodesToReplicas(ctx context.Context, replicas []*milvuspb.ReplicaInfo, collectionSize uint64) error {
nodeIds := c.onlineNodeIDs()
if len(nodeIds) < len(replicas) {
return fmt.Errorf("no enough nodes to create replicas, node_num=%d replica_num=%d", len(nodeIds), len(replicas))
}
for _, node := range nodes {
idx := rand.Int() % len(replicas)
replicas[idx].NodeIds = append(replicas[idx].NodeIds, node)
nodeInfos, err := getNodeInfos(c, nodeIds)
if err != nil {
return err
}
if len(nodeInfos) < len(replicas) {
return fmt.Errorf("no enough nodes to create replicas, node_num=%d replica_num=%d", len(nodeInfos), len(replicas))
}
sort.Slice(nodeInfos, func(i, j int) bool {
return nodeInfos[i].totalMem-nodeInfos[i].memUsage > nodeInfos[j].totalMem-nodeInfos[j].memUsage
})
memCapCount := make([]uint64, len(replicas))
for _, info := range nodeInfos {
i := 0
minMemCap := uint64(math.MaxUint64)
for j, memCap := range memCapCount {
if memCap < minMemCap {
minMemCap = memCap
i = j
}
}
replicas[i].NodeIds = append(replicas[i].NodeIds, info.id)
memCapCount[i] += info.totalMem - info.memUsage
}
for _, memCap := range memCapCount {
if memCap < collectionSize {
return fmt.Errorf("no enough memory to load collection/partitions, collectionSize=%v, replicasNum=%v", collectionSize, len(replicas))
}
}
return nil
}
// It's a helper method to concurrently get nodes' info
// Remove nodes that it can't connect to
func getNodeInfos(cluster *queryNodeCluster, nodeIds []UniqueID) ([]*queryNode, error) {
nodeCh := make(chan *queryNode, len(nodeIds))
wg := sync.WaitGroup{}
for _, id := range nodeIds {
wg.Add(1)
go func(id UniqueID) {
defer wg.Done()
info, err := cluster.getNodeInfoByID(id)
if err != nil {
return
}
nodeCh <- info.(*queryNode)
}(id)
}
wg.Wait()
close(nodeCh)
nodes := make([]*queryNode, 0, len(nodeCh))
for node := range nodeCh {
nodes = append(nodes, node)
}
return nodes, nil
}

View File

@ -21,8 +21,10 @@ import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"path"
"strconv"
"sync/atomic"
"testing"
"github.com/milvus-io/milvus/internal/util/dependency"
@ -412,7 +414,12 @@ func TestReloadClusterFromKV(t *testing.T) {
factory := dependency.NewDefaultFactory(true)
handler, err := newChannelUnsubscribeHandler(ctx, kv, factory)
assert.Nil(t, err)
meta, err := newMeta(ctx, kv, factory, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, factory, idAllocator)
assert.Nil(t, err)
cluster := &queryNodeCluster{

View File

@ -56,6 +56,8 @@ func (broker *globalMetaBroker) releaseDQLMessageStream(ctx context.Context, col
},
CollectionID: collectionID,
}
// TODO(yah01): check whether RootCoord returns error if QueryChannel not exists
res, err := broker.rootCoord.ReleaseDQLMessageStream(ctx2, releaseDQLMessageStreamReq)
if err != nil {
log.Error("releaseDQLMessageStream occur error", zap.Int64("collectionID", collectionID), zap.Error(err))
@ -421,13 +423,14 @@ func (broker *globalMetaBroker) generateSegmentLoadInfo(ctx context.Context,
schema *schemapb.CollectionSchema) *querypb.SegmentLoadInfo {
segmentID := segmentBinlog.SegmentID
segmentLoadInfo := &querypb.SegmentLoadInfo{
SegmentID: segmentID,
PartitionID: partitionID,
CollectionID: collectionID,
BinlogPaths: segmentBinlog.FieldBinlogs,
NumOfRows: segmentBinlog.NumOfRows,
Statslogs: segmentBinlog.Statslogs,
Deltalogs: segmentBinlog.Deltalogs,
SegmentID: segmentID,
PartitionID: partitionID,
CollectionID: collectionID,
BinlogPaths: segmentBinlog.FieldBinlogs,
NumOfRows: segmentBinlog.NumOfRows,
Statslogs: segmentBinlog.Statslogs,
Deltalogs: segmentBinlog.Deltalogs,
InsertChannel: segmentBinlog.InsertChannel,
}
if setIndex {
// if index not exist, load binlog to query node

View File

@ -0,0 +1,57 @@
package querycoord
import "sort"
type balancer interface {
addNode(nodeID int64) ([]*balancePlan, error)
removeNode(nodeID int64) []*balancePlan
rebalance() []*balancePlan
}
type balancePlan struct {
nodeID int64
sourceReplica int64
targetReplica int64
}
type replicaBalancer struct {
meta Meta
}
func newReplicaBalancer(meta Meta) *replicaBalancer {
return &replicaBalancer{meta}
}
func (b *replicaBalancer) addNode(nodeID int64) ([]*balancePlan, error) {
// allocate this node to all collections replicas
var ret []*balancePlan
collections := b.meta.showCollections()
for _, c := range collections {
replicas, err := b.meta.getReplicasByCollectionID(c.GetCollectionID())
if err != nil {
return nil, err
}
if len(replicas) == 0 {
continue
}
sort.Slice(replicas, func(i, j int) bool {
return len(replicas[i].GetNodeIds()) < len(replicas[j].GetNodeIds())
})
ret = append(ret, &balancePlan{
nodeID: nodeID,
sourceReplica: -1,
targetReplica: replicas[0].GetReplicaID(),
})
}
return ret, nil
}
func (b *replicaBalancer) removeNode(nodeID int64) []*balancePlan {
// for this version, querynode does not support move from a replica to another
return nil
}
func (b *replicaBalancer) rebalance() []*balancePlan {
return nil
}

View File

@ -884,6 +884,7 @@ func (qc *QueryCoord) LoadBalance(ctx context.Context, req *querypb.LoadBalanceR
zap.Int64s("source nodeIDs", req.SourceNodeIDs),
zap.Int64s("dst nodeIDs", req.DstNodeIDs),
zap.Int64s("balanced segments", req.SealedSegmentIDs),
zap.Int64("collectionID", req.CollectionID),
zap.Int64("msgID", req.Base.MsgID))
status := &commonpb.Status{
@ -930,6 +931,7 @@ func (qc *QueryCoord) LoadBalance(ctx context.Context, req *querypb.LoadBalanceR
zap.Int64s("source nodeIDs", req.SourceNodeIDs),
zap.Int64s("dst nodeIDs", req.DstNodeIDs),
zap.Int64s("balanced segments", req.SealedSegmentIDs),
zap.Int64("collectionID", req.CollectionID),
zap.Int64("msgID", req.Base.MsgID))
return status, nil
@ -1018,22 +1020,132 @@ func (qc *QueryCoord) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
// GetReplicas gets replicas of a certain collection
func (qc *QueryCoord) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasRequest) (*milvuspb.GetReplicasResponse, error) {
// TODO: to impl
log.Debug("GetReplicas received",
zap.String("role", typeutil.QueryCoordRole),
zap.Int64("collectionID", req.CollectionID),
zap.Int64("msgID", req.Base.MsgID))
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
if qc.stateCode.Load() != internalpb.StateCode_Healthy {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
err := errors.New("QueryCoord is not healthy")
status.Reason = err.Error()
log.Error("GetReplicasResponse failed", zap.String("role", typeutil.QueryCoordRole), zap.Int64("msgID", req.Base.MsgID), zap.Error(err))
return &milvuspb.GetReplicasResponse{
Status: status,
}, nil
}
replicas, err := qc.meta.getReplicasByCollectionID(req.CollectionID)
if err != nil {
status.ErrorCode = commonpb.ErrorCode_MetaFailed
status.Reason = err.Error()
log.Error("GetReplicasResponse failed to get replicas",
zap.String("role", typeutil.QueryCoordRole),
zap.Int64("collectionID", req.CollectionID),
zap.Int64("msgID", req.Base.MsgID),
zap.Error(err))
return &milvuspb.GetReplicasResponse{
Status: status,
}, nil
}
if req.WithShardNodes {
shardNodes := make(map[string]map[UniqueID]struct{})
segments := qc.meta.showSegmentInfos(req.CollectionID, nil)
for _, segment := range segments {
nodes, ok := shardNodes[segment.DmChannel]
if !ok {
nodes = make(map[UniqueID]struct{})
}
for _, nodeID := range segment.NodeIds {
nodes[nodeID] = struct{}{}
}
shardNodes[segment.DmChannel] = nodes
}
for _, replica := range replicas {
for _, shard := range replica.ShardReplicas {
for nodeID := range shardNodes[shard.DmChannelName] {
shard.NodeIds = append(shard.NodeIds, nodeID)
}
}
}
}
return &milvuspb.GetReplicasResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "Not implemented",
},
Status: status,
Replicas: replicas,
}, nil
}
// GetShardLeaders gets shard leaders of a certain collection
func (qc *QueryCoord) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeadersRequest) (*querypb.GetShardLeadersResponse, error) {
// TODO: to impl
log.Debug("GetShardLeaders received",
zap.String("role", typeutil.QueryCoordRole),
zap.Int64("collectionID", req.CollectionID),
zap.Int64("msgID", req.Base.MsgID))
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
if qc.stateCode.Load() != internalpb.StateCode_Healthy {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
err := errors.New("QueryCoord is not healthy")
status.Reason = err.Error()
log.Error("GetShardLeadersResponse failed", zap.String("role", typeutil.QueryCoordRole), zap.Int64("msgID", req.Base.MsgID), zap.Error(err))
return &querypb.GetShardLeadersResponse{
Status: status,
}, nil
}
replicas, err := qc.meta.getReplicasByCollectionID(req.CollectionID)
if err != nil {
status.ErrorCode = commonpb.ErrorCode_MetaFailed
status.Reason = err.Error()
log.Error("GetShardLeadersResponse failed to get replicas",
zap.String("role", typeutil.QueryCoordRole),
zap.Int64("collectionID", req.CollectionID),
zap.Int64("msgID", req.Base.MsgID),
zap.Error(err))
return &querypb.GetShardLeadersResponse{
Status: status,
}, nil
}
shards := make(map[string]*querypb.ShardLeadersList)
for _, replica := range replicas {
for _, shard := range replica.ShardReplicas {
list, ok := shards[shard.DmChannelName]
if !ok {
list = &querypb.ShardLeadersList{
ChannelName: shard.DmChannelName,
NodeIds: make([]int64, 0),
NodeAddrs: make([]string, 0),
}
}
list.NodeIds = append(list.NodeIds, shard.LeaderID)
list.NodeAddrs = append(list.NodeAddrs, shard.LeaderAddr)
shards[shard.DmChannelName] = list
}
}
shardLeaderLists := make([]*querypb.ShardLeadersList, 0, len(shards))
for _, shard := range shards {
shardLeaderLists = append(shardLeaderLists, shard)
}
return &querypb.GetShardLeadersResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "Not implemented",
},
Status: status,
Shards: shardLeaderLists,
}, nil
}

View File

@ -24,7 +24,9 @@ import (
"time"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"go.uber.org/zap"
"github.com/stretchr/testify/assert"
@ -120,8 +122,9 @@ func TestGrpcTask(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.Nil(t, err)
@ -132,9 +135,10 @@ func TestGrpcTask(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
assert.Nil(t, err)
@ -227,8 +231,9 @@ func TestGrpcTask(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
assert.Nil(t, err)
@ -431,9 +436,10 @@ func TestGrpcTaskEnqueueFail(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.Nil(t, err)
@ -444,8 +450,9 @@ func TestGrpcTaskEnqueueFail(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.Nil(t, err)
@ -456,8 +463,9 @@ func TestGrpcTaskEnqueueFail(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
assert.Nil(t, err)
@ -489,9 +497,10 @@ func TestGrpcTaskEnqueueFail(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
@ -543,8 +552,9 @@ func TestLoadBalanceTask(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, res.ErrorCode)
@ -606,9 +616,10 @@ func TestGrpcTaskBeforeHealthy(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.Nil(t, err)
@ -654,8 +665,9 @@ func TestGrpcTaskBeforeHealthy(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.Nil(t, err)
@ -756,6 +768,24 @@ func TestGrpcTaskBeforeHealthy(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, res.Status.ErrorCode)
})
t.Run("Test GetReplicas", func(t *testing.T) {
resp, err := unHealthyCoord.GetReplicas(ctx, &milvuspb.GetReplicasRequest{
Base: &commonpb.MsgBase{},
CollectionID: defaultCollectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
t.Run("Test GetShardLeaders", func(t *testing.T) {
resp, err := unHealthyCoord.GetShardLeaders(ctx, &querypb.GetShardLeadersRequest{
Base: &commonpb.MsgBase{},
CollectionID: defaultCollectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
unHealthyCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
@ -789,8 +819,9 @@ func Test_RepeatedLoadSameCollection(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
//first load defaultCollectionID
@ -824,17 +855,19 @@ func Test_LoadCollectionAndLoadPartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
loadPartitionReq := &querypb.LoadPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
//first load defaultCollectionID
@ -854,6 +887,131 @@ func Test_LoadCollectionAndLoadPartitions(t *testing.T) {
assert.Nil(t, err)
}
func TestLoadCollectionWithReplicas(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
loadCollectionReq := &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 3,
}
// load collection with 3 replicas, but no enough querynodes
assert.Equal(t, 2, len(queryCoord.cluster.onlineNodeIDs()))
status, err := queryCoord.LoadCollection(ctx, loadCollectionReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
// Now it should can load collection with 3 replicas
node3, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node3.queryNodeID)
status, err = queryCoord.LoadCollection(ctx, loadCollectionReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
waitLoadCollectionDone(ctx, queryCoord, defaultCollectionID)
replicas, err := queryCoord.meta.getReplicasByCollectionID(loadCollectionReq.CollectionID)
assert.NoError(t, err)
for i := range replicas {
log.Info("replicas",
zap.Int64("collectionID", replicas[i].CollectionID),
zap.Int64("id", replicas[i].ReplicaID),
zap.Int64s("nodeIds", replicas[i].NodeIds))
}
assert.Equal(t, 3, len(replicas))
for i := range replicas {
assert.Equal(t, loadCollectionReq.CollectionID, replicas[i].CollectionID)
}
status, err = queryCoord.ReleaseCollection(ctx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ReleaseCollection,
},
CollectionID: loadCollectionReq.CollectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
node1.stop()
node2.stop()
node3.stop()
queryCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
}
func Test_LoadPartitionsWithReplicas(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
loadPartitionsReq := &querypb.LoadPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 3,
}
// load collection with 3 replicas, but no enough querynodes
assert.Equal(t, 2, len(queryCoord.cluster.onlineNodeIDs()))
status, err := queryCoord.LoadPartitions(ctx, loadPartitionsReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
// Now it should can load collection with 3 replicas
node3, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node3.queryNodeID)
status, err = queryCoord.LoadPartitions(ctx, loadPartitionsReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
waitLoadPartitionDone(ctx, queryCoord,
loadPartitionsReq.CollectionID, loadPartitionsReq.PartitionIDs)
status, err = queryCoord.ReleasePartitions(ctx, &querypb.ReleasePartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ReleaseCollection,
},
CollectionID: loadPartitionsReq.CollectionID,
PartitionIDs: loadPartitionsReq.PartitionIDs,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
node1.stop()
node2.stop()
node3.stop()
queryCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
}
func Test_RepeatedLoadSamePartitions(t *testing.T) {
refreshParams()
ctx := context.Background()
@ -868,9 +1026,10 @@ func Test_RepeatedLoadSamePartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
//first load defaultPartitionID
@ -904,9 +1063,10 @@ func Test_RepeatedLoadDifferentPartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
//first load defaultPartitionID
@ -920,9 +1080,10 @@ func Test_RepeatedLoadDifferentPartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID + 1},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID + 1},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
status, err = queryCoord.LoadPartitions(ctx, failLoadRequest)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
@ -948,17 +1109,19 @@ func Test_LoadPartitionsAndLoadCollection(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
loadPartitionReq := &querypb.LoadPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
//first load defaultPartitionID
@ -992,8 +1155,9 @@ func Test_LoadAndReleaseCollection(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releaseCollectionReq := &querypb.ReleaseCollectionRequest{
@ -1034,9 +1198,10 @@ func Test_LoadAndReleasePartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releasePartitionReq := &querypb.ReleasePartitionsRequest{
@ -1078,8 +1243,9 @@ func Test_LoadCollectionAndReleasePartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releasePartitionReq := &querypb.ReleasePartitionsRequest{
@ -1121,9 +1287,10 @@ func Test_LoadPartitionsAndReleaseCollection(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releaseCollectionReq := &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{
@ -1163,8 +1330,9 @@ func Test_RepeatedReleaseCollection(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releaseCollectionReq := &querypb.ReleaseCollectionRequest{
@ -1210,9 +1378,10 @@ func Test_RepeatedReleaseSamePartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releasePartitionReq := &querypb.ReleasePartitionsRequest{
@ -1259,9 +1428,10 @@ func Test_RepeatedReleaseDifferentPartitions(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID, defaultPartitionID + 1},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID, defaultPartitionID + 1},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
releasePartitionReq := &querypb.ReleasePartitionsRequest{
@ -1294,3 +1464,150 @@ func Test_RepeatedReleaseDifferentPartitions(t *testing.T) {
err = removeAllSession()
assert.Nil(t, err)
}
func TestGetReplicas(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node3, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node3.queryNodeID)
// First, load collection with replicas
loadCollectionReq := &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 3,
}
status, err := queryCoord.LoadCollection(ctx, loadCollectionReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
waitLoadCollectionDone(ctx, queryCoord, defaultCollectionID)
getReplicasReq := &milvuspb.GetReplicasRequest{
Base: &commonpb.MsgBase{},
CollectionID: loadCollectionReq.CollectionID,
}
resp, err := queryCoord.GetReplicas(ctx, getReplicasReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, 3, len(resp.Replicas))
for i := range resp.Replicas {
assert.Equal(t, 1, len(resp.Replicas[i].NodeIds))
for j := 0; j < i; j++ {
assert.NotEqual(t,
resp.Replicas[i].NodeIds[0],
resp.Replicas[j].NodeIds[0])
}
}
getReplicasReq.WithShardNodes = true
resp, err = queryCoord.GetReplicas(ctx, getReplicasReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, 3, len(resp.Replicas))
for i := range resp.Replicas {
assert.Equal(t, 1, len(resp.Replicas[i].NodeIds))
for j := range resp.Replicas[i].ShardReplicas {
assert.Equal(t,
resp.Replicas[i].NodeIds[0],
resp.Replicas[i].ShardReplicas[j].LeaderID)
}
for j := 0; j < i; j++ {
assert.NotEqual(t,
resp.Replicas[i].NodeIds[0],
resp.Replicas[j].NodeIds[0])
}
}
// GetReplicas after release collection, it should return meta failed
status, err = queryCoord.ReleaseCollection(ctx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{},
CollectionID: loadCollectionReq.CollectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
resp, err = queryCoord.GetReplicas(ctx, getReplicasReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_MetaFailed, resp.Status.ErrorCode)
node1.stop()
node2.stop()
node3.stop()
queryCoord.Stop()
}
func TestGetShardLeaders(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node3, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node3.queryNodeID)
// First, load collection with replicas
loadCollectionReq := &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 3,
}
status, err := queryCoord.LoadCollection(ctx, loadCollectionReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
waitLoadCollectionDone(ctx, queryCoord, defaultCollectionID)
getShardLeadersReq := &querypb.GetShardLeadersRequest{
Base: &commonpb.MsgBase{},
CollectionID: loadCollectionReq.CollectionID,
}
resp, err := queryCoord.GetShardLeaders(ctx, getShardLeadersReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
totalLeaders := 0
for i := 0; i < len(resp.Shards); i++ {
totalLeaders += len(resp.Shards[i].NodeIds)
}
assert.Equal(t, 0, totalLeaders%3)
// GetShardLeaders after release collection, it should return meta failed
status, err = queryCoord.ReleaseCollection(ctx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{},
CollectionID: loadCollectionReq.CollectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
resp, err = queryCoord.GetShardLeaders(ctx, getShardLeadersReq)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_MetaFailed, resp.Status.ErrorCode)
node1.stop()
node2.stop()
node3.stop()
queryCoord.Stop()
}

View File

@ -19,18 +19,18 @@ package querycoord
import (
"context"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/storage"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/allocator"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/tsoutil"
)
var indexCheckerTestDir = "/tmp/milvus_test/index_checker"
@ -42,7 +42,12 @@ func TestReloadFromKV(t *testing.T) {
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
meta, err := newMeta(baseCtx, kv, nil, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(baseCtx, kv, nil, idAllocator)
assert.Nil(t, err)
segmentInfo := &querypb.SegmentInfo{
@ -98,7 +103,13 @@ func TestCheckIndexLoop(t *testing.T) {
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
meta, err := newMeta(ctx, kv, nil, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
rootCoord := newRootCoordMock(ctx)
@ -168,7 +179,12 @@ func TestHandoffNotExistSegment(t *testing.T) {
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
meta, err := newMeta(ctx, kv, nil, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
rootCoord := newRootCoordMock(ctx)
@ -222,20 +238,19 @@ func TestProcessHandoffAfterIndexDone(t *testing.T) {
defer etcdCli.Close()
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
meta, err := newMeta(ctx, kv, nil, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
taskScheduler := &TaskScheduler{
ctx: ctx,
cancel: cancel,
client: kv,
triggerTaskQueue: newTaskQueue(),
}
idAllocatorKV := tsoutil.NewTSOKVBase(etcdCli, Params.EtcdCfg.KvRootPath, "queryCoordTaskID")
idAllocator := allocator.NewGlobalIDAllocator("idTimestamp", idAllocatorKV)
err = idAllocator.Initialize()
assert.Nil(t, err)
taskScheduler.taskIDAllocator = func() (UniqueID, error) {
return idAllocator.AllocOne()
taskIDAllocator: idAllocator,
}
indexChecker, err := newIndexChecker(ctx, kv, meta, nil, taskScheduler, nil)
assert.Nil(t, err)

View File

@ -20,7 +20,9 @@ import (
"context"
"errors"
"fmt"
"math/rand"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
@ -39,6 +41,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/funcutil"
)
@ -72,6 +75,7 @@ type Meta interface {
showSegmentInfos(collectionID UniqueID, partitionIDs []UniqueID) []*querypb.SegmentInfo
getSegmentInfoByID(segmentID UniqueID) (*querypb.SegmentInfo, error)
getSegmentInfosByNode(nodeID int64) []*querypb.SegmentInfo
getSegmentInfosByNodeAndCollection(nodeID, collectionID int64) []*querypb.SegmentInfo
getPartitionStatesByID(collectionID UniqueID, partitionID UniqueID) (*querypb.PartitionStates, error)
@ -106,7 +110,7 @@ type MetaReplica struct {
ctx context.Context
cancel context.CancelFunc
client kv.MetaKv // client of a reliable kv service, i.e. etcd client
msFactory msgstream.Factory
factory dependency.Factory
idAllocator func() (UniqueID, error)
//sync.RWMutex
@ -126,7 +130,7 @@ type MetaReplica struct {
replicas *ReplicaInfos
}
func newMeta(ctx context.Context, kv kv.MetaKv, factory msgstream.Factory, idAllocator func() (UniqueID, error)) (Meta, error) {
func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAllocator func() (UniqueID, error)) (Meta, error) {
childCtx, cancel := context.WithCancel(ctx)
collectionInfos := make(map[UniqueID]*querypb.CollectionInfo)
queryChannelInfos := make(map[UniqueID]*querypb.QueryChannelInfo)
@ -138,7 +142,7 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory msgstream.Factory, idAll
ctx: childCtx,
cancel: cancel,
client: kv,
msFactory: factory,
factory: factory,
idAllocator: idAllocator,
collectionInfos: collectionInfos,
@ -161,6 +165,8 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory msgstream.Factory, idAll
func (m *MetaReplica) reloadFromKV() error {
log.Debug("start reload from kv")
log.Info("recovery collections...")
collectionKeys, collectionValues, err := m.client.LoadWithPrefix(collectionMetaPrefix)
if err != nil {
return err
@ -176,6 +182,9 @@ func (m *MetaReplica) reloadFromKV() error {
return err
}
m.collectionInfos[collectionID] = collectionInfo
log.Debug("recovery collection",
zap.Int64("collectionID", collectionID))
}
metrics.QueryCoordNumCollections.WithLabelValues().Set(float64(len(m.collectionInfos)))
@ -215,12 +224,120 @@ func (m *MetaReplica) reloadFromKV() error {
m.dmChannelInfos[dmChannel] = dmChannelWatchInfo
}
// Compatibility for old meta format
// For collections that don't have replica(s), create 1 replica for them
// Add replica into meta storage and rewrite collection
dmChannels := make(map[UniqueID][]*querypb.DmChannelWatchInfo) // CollectionID -> []*DmChannelWatchInfo
for _, dmc := range m.dmChannelInfos {
dmChannels[dmc.CollectionID] = append(dmChannels[dmc.CollectionID], dmc)
}
for _, collectionInfo := range m.collectionInfos {
if len(collectionInfo.ReplicaIds) == 0 {
replica, err := m.generateReplica(collectionInfo.CollectionID, collectionInfo.PartitionIDs)
if err != nil {
return err
}
segments := m.showSegmentInfos(collectionInfo.CollectionID, collectionInfo.PartitionIDs)
// remove duplicates
nodes := make(map[UniqueID]struct{})
for _, segment := range segments {
for _, nodeID := range segment.NodeIds {
nodes[nodeID] = struct{}{}
}
}
for nodeID := range nodes {
replica.NodeIds = append(replica.NodeIds, nodeID)
}
shardReplicas := make([]*milvuspb.ShardReplica, 0, len(dmChannels[collectionInfo.CollectionID]))
for _, dmc := range dmChannels[collectionInfo.CollectionID] {
shardReplicas = append(shardReplicas, &milvuspb.ShardReplica{
LeaderID: dmc.NodeIDLoaded,
// LeaderAddr: Will set it after the cluster is reloaded
DmChannelName: dmc.DmChannel,
})
}
err = m.addReplica(replica)
if err != nil {
log.Error("failed to add replica for old collection info format without replicas info",
zap.Int64("collectionID", replica.CollectionID),
zap.Error(err))
return err
}
// DO NOT insert the replica into m.replicas
// it will be recovered below
}
}
replicaKeys, replicaValues, err := m.client.LoadWithPrefix(ReplicaMetaPrefix)
if err != nil {
return err
}
for i := range replicaKeys {
replicaInfo := &milvuspb.ReplicaInfo{}
err = proto.Unmarshal([]byte(replicaValues[i]), replicaInfo)
if err != nil {
return err
}
m.replicas.Insert(replicaInfo)
}
//TODO::update partition states
log.Debug("reload from kv finished")
return nil
}
// Compatibility for old meta format, this retrieves node address from cluster.
// The leader address is not always valid
func reloadShardLeaderAddress(meta Meta, cluster Cluster) error {
collections := meta.showCollections()
reloadShardLeaderAddressFunc := func(idx int) error {
collection := collections[idx]
replicas, err := meta.getReplicasByCollectionID(collection.CollectionID)
if err != nil {
return err
}
for _, replica := range replicas {
isModified := false
for _, shard := range replica.ShardReplicas {
if len(shard.LeaderAddr) == 0 {
nodeInfo, err := cluster.getNodeInfoByID(shard.LeaderID)
if err != nil {
log.Warn("failed to retrieve the node's address",
zap.Int64("nodeID", shard.LeaderID),
zap.Error(err))
continue
}
shard.LeaderAddr = nodeInfo.(*queryNode).address
isModified = true
}
}
if isModified {
err := meta.setReplicaInfo(replica)
if err != nil {
return err
}
}
}
return nil
}
concurrencyLevel := len(collections)
if concurrencyLevel > runtime.NumCPU() {
concurrencyLevel = runtime.NumCPU()
}
return funcutil.ProcessFuncParallel(len(collections), concurrencyLevel,
reloadShardLeaderAddressFunc, "reloadShardLeaderAddressFunc")
}
func (m *MetaReplica) setKvClient(kv kv.MetaKv) {
m.client = kv
}
@ -304,7 +421,8 @@ func (m *MetaReplica) addCollection(collectionID UniqueID, loadType querypb.Load
PartitionStates: partitionStates,
LoadType: loadType,
Schema: schema,
// ReplicaIDs: replicas,
ReplicaIds: make([]int64, 0),
ReplicaNumber: 0,
}
err := saveGlobalCollectionInfo(collectionID, newCollection, m.client)
if err != nil {
@ -369,7 +487,14 @@ func (m *MetaReplica) addPartitions(collectionID UniqueID, partitionIDs []Unique
}
func (m *MetaReplica) releaseCollection(collectionID UniqueID) error {
err := removeCollectionMeta(collectionID, m.client)
collection, err := m.getCollectionInfoByID(collectionID)
if err != nil {
log.Warn("the collection has been released",
zap.Int64("collectionID", collectionID))
return nil
}
err = removeCollectionMeta(collectionID, collection.ReplicaIds, m.client)
if err != nil {
log.Warn("remove collectionInfo from etcd failed", zap.Int64("collectionID", collectionID), zap.Any("error", err.Error()))
return err
@ -733,6 +858,16 @@ func (m *MetaReplica) getSegmentInfosByNode(nodeID int64) []*querypb.SegmentInfo
}
return res
}
func (m *MetaReplica) getSegmentInfosByNodeAndCollection(nodeID, collectionID int64) []*querypb.SegmentInfo {
var res []*querypb.SegmentInfo
segments := m.segmentsInfo.getSegments()
for _, segment := range segments {
if segment.GetNodeID() == nodeID && segment.GetCollectionID() == collectionID {
res = append(res, segment)
}
}
return res
}
func (m *MetaReplica) getCollectionInfoByID(collectionID UniqueID) (*querypb.CollectionInfo, error) {
m.collectionMu.RLock()
@ -742,7 +877,7 @@ func (m *MetaReplica) getCollectionInfoByID(collectionID UniqueID) (*querypb.Col
return proto.Clone(info).(*querypb.CollectionInfo), nil
}
return nil, errors.New("getCollectionInfoByID: can't find collectionID in collectionInfo")
return nil, fmt.Errorf("getCollectionInfoByID: can't find collectionID=%v in collectionInfo", collectionID)
}
func (m *MetaReplica) getPartitionStatesByID(collectionID UniqueID, partitionID UniqueID) (*querypb.PartitionStates, error) {
@ -872,7 +1007,7 @@ func (m *MetaReplica) getQueryStreamByID(collectionID UniqueID, queryChannel str
if stream, ok := m.queryStreams[collectionID]; ok {
queryStream = stream
} else {
queryStream, err = m.msFactory.NewMsgStream(m.ctx)
queryStream, err = m.factory.NewMsgStream(m.ctx)
if err != nil {
log.Error("updateGlobalSealedSegmentInfos: create msgStream failed", zap.Error(err))
return nil, err
@ -932,6 +1067,7 @@ func (m *MetaReplica) setLoadPercentage(collectionID UniqueID, partitionID Uniqu
}
} else {
findPartition := false
info.InMemoryPercentage = 0
for _, partitionState := range info.PartitionStates {
if partitionState.PartitionID == partitionID {
findPartition = true
@ -941,16 +1077,19 @@ func (m *MetaReplica) setLoadPercentage(collectionID UniqueID, partitionID Uniqu
partitionState.State = querypb.PartitionState_PartialInMemory
}
partitionState.InMemoryPercentage = percentage
err := saveGlobalCollectionInfo(collectionID, info, m.client)
if err != nil {
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
return err
}
}
info.InMemoryPercentage += partitionState.InMemoryPercentage
}
if !findPartition {
return errors.New("setLoadPercentage: can't find partitionID in collectionInfos")
}
info.InMemoryPercentage /= int64(len(info.PartitionIDs))
err := saveGlobalCollectionInfo(collectionID, info, m.client)
if err != nil {
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
return err
}
}
m.collectionInfos[collectionID] = info
@ -1042,7 +1181,24 @@ func (m *MetaReplica) generateReplica(collectionID int64, partitionIds []int64)
}
func (m *MetaReplica) addReplica(replica *milvuspb.ReplicaInfo) error {
err := saveReplicaInfo(replica, m.client)
collectionInfo, err := m.getCollectionInfoByID(replica.CollectionID)
if err != nil {
return err
}
collectionInfo.ReplicaIds = append(collectionInfo.ReplicaIds, replica.ReplicaID)
collectionInfo.ReplicaNumber++
err = saveGlobalCollectionInfo(collectionInfo.CollectionID, collectionInfo, m.client)
if err != nil {
return err
}
m.collectionMu.Lock()
m.collectionInfos[collectionInfo.CollectionID] = collectionInfo
m.collectionMu.Unlock()
err = saveReplicaInfo(replica, m.client)
if err != nil {
return err
}
@ -1091,6 +1247,10 @@ func (m *MetaReplica) getReplicasByCollectionID(collectionID int64) ([]*milvuspb
replicas = append(replicas, replica)
}
rand.Shuffle(len(replicas), func(i, j int) {
replicas[i], replicas[j] = replicas[j], replicas[i]
})
return replicas, nil
}
@ -1158,14 +1318,21 @@ func saveReplicaInfo(info *milvuspb.ReplicaInfo, kv kv.MetaKv) error {
return kv.Save(key, string(infoBytes))
}
func removeCollectionMeta(collectionID UniqueID, kv kv.MetaKv) error {
func removeCollectionMeta(collectionID UniqueID, replicas []UniqueID, kv kv.MetaKv) error {
var prefixes []string
collectionInfosPrefix := fmt.Sprintf("%s/%d", collectionMetaPrefix, collectionID)
prefixes = append(prefixes, collectionInfosPrefix)
dmChannelInfosPrefix := fmt.Sprintf("%s/%d", dmChannelMetaPrefix, collectionID)
prefixes = append(prefixes, dmChannelInfosPrefix)
deltaChannelInfosPrefix := fmt.Sprintf("%s/%d", deltaChannelMetaPrefix, collectionID)
prefixes = append(prefixes, deltaChannelInfosPrefix)
for _, replicaID := range replicas {
replicaPrefix := fmt.Sprintf("%s/%d", ReplicaMetaPrefix, replicaID)
prefixes = append(prefixes, replicaPrefix)
}
return kv.MultiRemoveWithPrefix(prefixes)
}

View File

@ -20,6 +20,8 @@ import (
"context"
"errors"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"github.com/golang/protobuf/proto"
@ -68,7 +70,12 @@ func TestReplica_Release(t *testing.T) {
assert.Nil(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
meta, err := newMeta(context.Background(), etcdKV, nil, nil)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(context.Background(), etcdKV, nil, idAllocator)
assert.Nil(t, err)
err = meta.addCollection(1, querypb.LoadType_LoadCollection, nil)
require.NoError(t, err)
@ -294,13 +301,20 @@ func TestReloadMetaFromKV(t *testing.T) {
assert.Nil(t, err)
defer etcdCli.Close()
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta := &MetaReplica{
client: kv,
idAllocator: idAllocator,
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
deltaChannelInfos: map[UniqueID][]*datapb.VchannelInfo{},
segmentsInfo: newSegmentsInfo(kv),
replicas: NewReplicaInfos(),
}
kvs := make(map[string]string)
@ -349,10 +363,17 @@ func TestReloadMetaFromKV(t *testing.T) {
assert.Equal(t, 1, len(meta.collectionInfos))
assert.Equal(t, 1, len(meta.segmentsInfo.getSegments()))
_, ok := meta.collectionInfos[defaultCollectionID]
assert.Equal(t, true, ok)
collectionInfo, err = meta.getCollectionInfoByID(collectionInfo.CollectionID)
assert.NoError(t, err)
assert.Equal(t, 1, len(collectionInfo.ReplicaIds))
assert.Equal(t, int32(1), collectionInfo.ReplicaNumber)
segment := meta.segmentsInfo.getSegment(defaultSegmentID)
assert.NotNil(t, segment)
replicas, err := meta.getReplicasByCollectionID(collectionInfo.CollectionID)
assert.NoError(t, err)
assert.Equal(t, 1, len(replicas))
assert.Equal(t, collectionInfo.CollectionID, replicas[0].CollectionID)
}
func TestCreateQueryChannel(t *testing.T) {

View File

@ -344,20 +344,6 @@ func (data *dataCoordMock) GetRecoveryInfo(ctx context.Context, req *datapb.GetR
}, nil
}
if _, ok := data.partitionID2Segment[partitionID]; !ok {
segmentIDs := make([]UniqueID, 0)
for i := 0; i < data.channelNumPerCol; i++ {
segmentID := data.baseSegmentID
if _, ok := data.Segment2Binlog[segmentID]; !ok {
segmentBinlog := generateInsertBinLog(segmentID)
data.Segment2Binlog[segmentID] = segmentBinlog
}
segmentIDs = append(segmentIDs, segmentID)
data.baseSegmentID++
}
data.partitionID2Segment[partitionID] = segmentIDs
}
if _, ok := data.col2DmChannels[collectionID]; !ok {
channelInfos := make([]*datapb.VchannelInfo, 0)
data.collections = append(data.collections, collectionID)
@ -376,6 +362,21 @@ func (data *dataCoordMock) GetRecoveryInfo(ctx context.Context, req *datapb.GetR
data.col2DmChannels[collectionID] = channelInfos
}
if _, ok := data.partitionID2Segment[partitionID]; !ok {
segmentIDs := make([]UniqueID, 0)
for i := 0; i < data.channelNumPerCol; i++ {
segmentID := data.baseSegmentID
if _, ok := data.Segment2Binlog[segmentID]; !ok {
segmentBinlog := generateInsertBinLog(segmentID)
segmentBinlog.InsertChannel = data.col2DmChannels[collectionID][i].ChannelName
data.Segment2Binlog[segmentID] = segmentBinlog
}
segmentIDs = append(segmentIDs, segmentID)
data.baseSegmentID++
}
data.partitionID2Segment[partitionID] = segmentIDs
}
binlogs := make([]*datapb.SegmentBinlogs, 0)
for _, segmentID := range data.partitionID2Segment[partitionID] {
if _, ok := data.Segment2Binlog[segmentID]; ok {

View File

@ -39,6 +39,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/types"
@ -99,8 +100,9 @@ type QueryCoord struct {
stateCode atomic.Value
factory dependency.Factory
chunkManager storage.ChunkManager
factory dependency.Factory
chunkManager storage.ChunkManager
groupBalancer balancer
}
// Register register query service at etcd
@ -168,6 +170,7 @@ func (qc *QueryCoord) Init() error {
log.Error("query coordinator init meta failed", zap.Error(initError))
return
}
qc.groupBalancer = newReplicaBalancer(qc.meta)
// init channelUnsubscribeHandler
qc.handler, initError = newChannelUnsubscribeHandler(qc.loopCtx, qc.kvClient, qc.factory)
@ -183,7 +186,17 @@ func (qc *QueryCoord) Init() error {
return
}
qc.chunkManager, initError = qc.factory.NewVectorStorageChunkManager(qc.loopCtx)
// NOTE: ignore the returned error
// we only try best to reload the leader addresses
reloadShardLeaderAddress(qc.meta, qc.cluster)
qc.chunkManager, initError = storage.NewMinioChunkManager(qc.loopCtx,
storage.Address(Params.MinioCfg.Address),
storage.AccessKeyID(Params.MinioCfg.AccessKeyID),
storage.SecretAccessKeyID(Params.MinioCfg.SecretAccessKey),
storage.UseSSL(Params.MinioCfg.UseSSL),
storage.BucketName(Params.MinioCfg.BucketName),
storage.CreateBucket(true))
if initError != nil {
log.Error("query coordinator init cluster failed", zap.Error(initError))
@ -337,6 +350,13 @@ func (qc *QueryCoord) watchNodeLoop() {
defer qc.loopWg.Done()
log.Debug("QueryCoord start watch node loop")
unallocatedNodes := qc.getUnallocatedNodes()
for _, n := range unallocatedNodes {
if err := qc.allocateNode(n); err != nil {
log.Warn("unable to allcoate node", zap.Int64("nodeID", n), zap.Error(err))
}
}
offlineNodeIDs := qc.cluster.offlineNodeIDs()
if len(offlineNodeIDs) != 0 {
loadBalanceSegment := &querypb.LoadBalanceRequest{
@ -366,6 +386,70 @@ func (qc *QueryCoord) watchNodeLoop() {
qc.handleNodeEvent(ctx)
}
func (qc *QueryCoord) allocateNode(nodeID int64) error {
plans, err := qc.groupBalancer.addNode(nodeID)
if err != nil {
return err
}
for _, p := range plans {
if err := qc.applyBalancePlan(p); err != nil {
log.Warn("failed to apply balance plan", zap.Error(err), zap.Any("plan", p))
}
}
return nil
}
func (qc *QueryCoord) getUnallocatedNodes() []int64 {
onlines := qc.cluster.onlineNodeIDs()
var ret []int64
for _, n := range onlines {
replica, err := qc.meta.getReplicasByNodeID(n)
if err != nil {
log.Warn("failed to get replica", zap.Int64("nodeID", n), zap.Error(err))
continue
}
if replica == nil {
ret = append(ret, n)
}
}
return ret
}
func (qc *QueryCoord) applyBalancePlan(p *balancePlan) error {
if p.sourceReplica != -1 {
replica, err := qc.meta.getReplicaByID(p.sourceReplica)
if err != nil {
return err
}
replica = removeNodeFromReplica(replica, p.nodeID)
if err := qc.meta.setReplicaInfo(replica); err != nil {
return err
}
}
if p.targetReplica != -1 {
replica, err := qc.meta.getReplicaByID(p.targetReplica)
if err != nil {
return err
}
replica.NodeIds = append(replica.NodeIds, p.nodeID)
if err := qc.meta.setReplicaInfo(replica); err != nil {
return err
}
}
return nil
}
func removeNodeFromReplica(replica *milvuspb.ReplicaInfo, nodeID int64) *milvuspb.ReplicaInfo {
for i := 0; i < len(replica.NodeIds); i++ {
if replica.NodeIds[i] != nodeID {
continue
}
replica.NodeIds = append(replica.NodeIds[:i], replica.NodeIds[i+1:]...)
return replica
}
return replica
}
func (qc *QueryCoord) handleNodeEvent(ctx context.Context) {
for {
select {
@ -390,6 +474,10 @@ func (qc *QueryCoord) handleNodeEvent(ctx context.Context) {
err := qc.cluster.registerNode(ctx, event.Session, serverID, disConnect)
if err != nil {
log.Error("QueryCoord failed to register a QueryNode", zap.Int64("nodeID", serverID), zap.String("error info", err.Error()))
continue
}
if err := qc.allocateNode(serverID); err != nil {
log.Error("unable to allcoate node", zap.Int64("nodeID", serverID), zap.Error(err))
}
qc.metricsCacheManager.InvalidateSystemInfoMetrics()
case sessionutil.SessionDelEvent:
@ -480,81 +568,104 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
timer := time.NewTicker(time.Duration(Params.QueryCoordCfg.BalanceIntervalSeconds) * time.Second)
var collectionInfos []*querypb.CollectionInfo
pos := 0
for {
select {
case <-ctx.Done():
return
case <-timer.C:
onlineNodeIDs := qc.cluster.onlineNodeIDs()
if len(onlineNodeIDs) == 0 {
log.Error("[auto balance]there is no online QueryNode to do balance")
continue
if pos == len(collectionInfos) {
pos = 0
collectionInfos = qc.meta.showCollections()
}
// get mem info of online nodes from cluster
nodeID2MemUsageRate := make(map[int64]float64)
nodeID2MemUsage := make(map[int64]uint64)
nodeID2TotalMem := make(map[int64]uint64)
nodeID2SegmentInfos := make(map[int64]map[UniqueID]*querypb.SegmentInfo)
var availableNodeIDs []int64
for _, nodeID := range onlineNodeIDs {
nodeInfo, err := qc.cluster.getNodeInfoByID(nodeID)
loadBalanceTasks := make([]*loadBalanceTask, 0)
// balance at most 20 collections in a round
for i := 0; pos < len(collectionInfos) && i < 20; i, pos = i+1, pos+1 {
info := collectionInfos[pos]
replicas, err := qc.meta.getReplicasByCollectionID(info.GetCollectionID())
if err != nil {
log.Warn("loadBalanceSegmentLoop: get node info from QueryNode failed", zap.Int64("nodeID", nodeID), zap.Error(err))
log.Warn("unable to get replicas of collection", zap.Int64("collectionID", info.GetCollectionID()))
continue
}
updateSegmentInfoDone := true
leastSegmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
segmentInfos := qc.meta.getSegmentInfosByNode(nodeID)
for _, segmentInfo := range segmentInfos {
leastInfo, err := qc.cluster.getSegmentInfoByID(ctx, segmentInfo.SegmentID)
if err != nil {
log.Warn("[auto balance] failed to get segment information from QueryNode", zap.Int64("nodeID", nodeID), zap.Error(err))
updateSegmentInfoDone = false
break
for _, replica := range replicas {
// auto balance is executed on replica level
onlineNodeIDs := replica.GetNodeIds()
if len(onlineNodeIDs) == 0 {
log.Error("loadBalanceSegmentLoop: there are no online QueryNode to balance")
continue
}
leastSegmentInfos[segmentInfo.SegmentID] = leastInfo
}
if updateSegmentInfoDone {
nodeID2MemUsageRate[nodeID] = nodeInfo.(*queryNode).memUsageRate
nodeID2MemUsage[nodeID] = nodeInfo.(*queryNode).memUsage
nodeID2TotalMem[nodeID] = nodeInfo.(*queryNode).totalMem
availableNodeIDs = append(availableNodeIDs, nodeID)
nodeID2SegmentInfos[nodeID] = leastSegmentInfos
}
}
log.Debug("loadBalanceSegmentLoop: memory usage rate of all online QueryNode", zap.Any("mem rate", nodeID2MemUsageRate))
if len(availableNodeIDs) <= 1 {
log.Warn("loadBalanceSegmentLoop: there are too few available query nodes to balance", zap.Int64s("onlineNodeIDs", onlineNodeIDs), zap.Int64s("availableNodeIDs", availableNodeIDs))
continue
}
var availableNodeIDs []int64
nodeID2SegmentInfos := make(map[int64]map[UniqueID]*querypb.SegmentInfo)
for _, nodeID := range onlineNodeIDs {
if _, ok := nodeID2MemUsage[nodeID]; !ok {
nodeInfo, err := qc.cluster.getNodeInfoByID(nodeID)
if err != nil {
log.Warn("loadBalanceSegmentLoop: get node info from QueryNode failed", zap.Int64("nodeID", nodeID), zap.Error(err))
continue
}
nodeID2MemUsageRate[nodeID] = nodeInfo.(*queryNode).memUsageRate
nodeID2MemUsage[nodeID] = nodeInfo.(*queryNode).memUsage
nodeID2TotalMem[nodeID] = nodeInfo.(*queryNode).totalMem
}
// check which nodes need balance and determine which segments on these nodes need to be migrated to other nodes
memoryInsufficient := false
loadBalanceTasks := make([]*loadBalanceTask, 0)
for {
sort.Slice(availableNodeIDs, func(i, j int) bool {
return nodeID2MemUsageRate[availableNodeIDs[i]] > nodeID2MemUsageRate[availableNodeIDs[j]]
})
// the memoryUsageRate of the sourceNode is higher than other query node
sourceNodeID := availableNodeIDs[0]
dstNodeID := availableNodeIDs[len(availableNodeIDs)-1]
memUsageRateDiff := nodeID2MemUsageRate[sourceNodeID] - nodeID2MemUsageRate[dstNodeID]
// if memoryUsageRate of source node is greater than 90%, and the max memUsageDiff is greater than 30%
// then migrate the segments on source node to other query nodes
if nodeID2MemUsageRate[sourceNodeID] > Params.QueryCoordCfg.OverloadedMemoryThresholdPercentage ||
memUsageRateDiff > Params.QueryCoordCfg.MemoryUsageMaxDifferencePercentage {
segmentInfos := nodeID2SegmentInfos[sourceNodeID]
// select the segment that needs balance on the source node
selectedSegmentInfo, err := chooseSegmentToBalance(sourceNodeID, dstNodeID, segmentInfos, nodeID2MemUsage, nodeID2TotalMem, nodeID2MemUsageRate)
if err != nil {
// no enough memory on query nodes to balance, then notify proxy to stop insert
memoryInsufficient = true
break
updateSegmentInfoDone := true
leastSegmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
segmentInfos := qc.meta.getSegmentInfosByNodeAndCollection(nodeID, replica.GetCollectionID())
for _, segmentInfo := range segmentInfos {
leastInfo, err := qc.cluster.getSegmentInfoByID(ctx, segmentInfo.SegmentID)
if err != nil {
log.Warn("loadBalanceSegmentLoop: get segment info from QueryNode failed", zap.Int64("nodeID", nodeID), zap.Error(err))
updateSegmentInfoDone = false
break
}
leastSegmentInfos[segmentInfo.SegmentID] = leastInfo
}
if updateSegmentInfoDone {
availableNodeIDs = append(availableNodeIDs, nodeID)
nodeID2SegmentInfos[nodeID] = leastSegmentInfos
}
}
// select a segment to balance successfully, then recursive traversal whether there are other segments that can balance
if selectedSegmentInfo != nil {
log.Debug("loadBalanceSegmentLoop: memory usage rate of all online QueryNode", zap.Any("mem rate", nodeID2MemUsageRate))
if len(availableNodeIDs) <= 1 {
log.Warn("loadBalanceSegmentLoop: there are too few available query nodes to balance", zap.Int64s("onlineNodeIDs", onlineNodeIDs), zap.Int64s("availableNodeIDs", availableNodeIDs))
continue
}
// check which nodes need balance and determine which segments on these nodes need to be migrated to other nodes
memoryInsufficient := false
for {
sort.Slice(availableNodeIDs, func(i, j int) bool {
return nodeID2MemUsageRate[availableNodeIDs[i]] > nodeID2MemUsageRate[availableNodeIDs[j]]
})
// the memoryUsageRate of the sourceNode is higher than other query node
sourceNodeID := availableNodeIDs[0]
dstNodeID := availableNodeIDs[len(availableNodeIDs)-1]
memUsageRateDiff := nodeID2MemUsageRate[sourceNodeID] - nodeID2MemUsageRate[dstNodeID]
if nodeID2MemUsageRate[sourceNodeID] <= Params.QueryCoordCfg.OverloadedMemoryThresholdPercentage &&
memUsageRateDiff <= Params.QueryCoordCfg.MemoryUsageMaxDifferencePercentage {
break
}
// if memoryUsageRate of source node is greater than 90%, and the max memUsageDiff is greater than 30%
// then migrate the segments on source node to other query nodes
segmentInfos := nodeID2SegmentInfos[sourceNodeID]
// select the segment that needs balance on the source node
selectedSegmentInfo, err := chooseSegmentToBalance(sourceNodeID, dstNodeID, segmentInfos, nodeID2MemUsage, nodeID2TotalMem, nodeID2MemUsageRate)
if err != nil {
// no enough memory on query nodes to balance, then notify proxy to stop insert
memoryInsufficient = true
break
}
if selectedSegmentInfo == nil {
break
}
// select a segment to balance successfully, then recursive traversal whether there are other segments that can balance
req := &querypb.LoadBalanceRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadBalanceSegments,
@ -580,35 +691,28 @@ func (qc *QueryCoord) loadBalanceSegmentLoop() {
delete(nodeID2SegmentInfos[sourceNodeID], selectedSegmentInfo.SegmentID)
nodeID2SegmentInfos[dstNodeID][selectedSegmentInfo.SegmentID] = selectedSegmentInfo
continue
} else {
// moving any segment will not improve the balance status
break
}
if memoryInsufficient {
// no enough memory on query nodes to balance, then notify proxy to stop insert
//TODO:: xige-16
log.Warn("loadBalanceSegmentLoop: QueryNode has insufficient memory, stop inserting data")
}
}
}
for _, t := range loadBalanceTasks {
qc.scheduler.Enqueue(t)
log.Debug("loadBalanceSegmentLoop: enqueue a loadBalance task", zap.Any("task", t))
err := t.waitToFinish()
if err != nil {
// if failed, wait for next balance loop
// it may be that the collection/partition of the balanced segment has been released
// it also may be other abnormal errors
log.Error("loadBalanceSegmentLoop: balance task execute failed", zap.Any("task", t))
} else {
// all query node's memoryUsageRate is less than 90%, and the max memUsageDiff is less than 30%
break
log.Debug("loadBalanceSegmentLoop: balance task execute success", zap.Any("task", t))
}
}
if !memoryInsufficient {
for _, t := range loadBalanceTasks {
qc.scheduler.Enqueue(t)
log.Debug("loadBalanceSegmentLoop: enqueue a loadBalance task", zap.Any("task", t))
err := t.waitToFinish()
if err != nil {
// if failed, wait for next balance loop
// it may be that the collection/partition of the balanced segment has been released
// it also may be other abnormal errors
log.Error("loadBalanceSegmentLoop: balance task execute failed", zap.Any("task", t))
} else {
log.Debug("loadBalanceSegmentLoop: balance task execute success", zap.Any("task", t))
}
}
log.Debug("loadBalanceSegmentLoop: load balance Done in this loop", zap.Any("tasks", loadBalanceTasks))
} else {
// no enough memory on query nodes to balance, then notify proxy to stop insert
//TODO:: xige-16
log.Error("loadBalanceSegmentLoop: QueryNode has insufficient memory, stop inserting data")
}
log.Debug("loadBalanceSegmentLoop: load balance Done in this loop", zap.Any("tasks", loadBalanceTasks))
}
}
}

View File

@ -177,7 +177,7 @@ func TestWatchNodeLoop(t *testing.T) {
}
collectionBlobs, err := proto.Marshal(collectionInfo)
assert.Nil(t, err)
nodeKey := fmt.Sprintf("%s/%d", collectionMetaPrefix, 100)
nodeKey := fmt.Sprintf("%s/%d", collectionMetaPrefix, defaultCollectionID)
kvs[nodeKey] = string(collectionBlobs)
err = kv.MultiSave(kvs)
@ -582,9 +582,10 @@ func TestLoadBalanceSegmentLoop(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{partitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{partitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
baseTask := newBaseTask(baseCtx, querypb.TriggerCondition_GrpcRequest)
loadPartitionTask := &loadPartitionTask{

View File

@ -118,8 +118,9 @@ func TestQueryNode_MultiNode_stop(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
_, err = queryCoord.ReleaseCollection(baseCtx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{
@ -157,8 +158,9 @@ func TestQueryNode_MultiNode_reStart(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
queryNode1.stop()
err = removeNodeSession(queryNode1.queryNodeID)
@ -256,8 +258,9 @@ func TestSealedSegmentChangeAfterQueryNodeStop(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
})
queryNode2, err := startQueryNodeServer(baseCtx)

View File

@ -19,6 +19,7 @@ package querycoord
import (
"sync"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
@ -28,17 +29,17 @@ type ReplicaInfos struct {
globalGuard sync.RWMutex // We have to make sure atomically update replicas and index
// Persistent Info
replicas map[UniqueID]*milvuspb.ReplicaInfo // replica_id -> *ReplicaInfo
replicas map[UniqueID]*milvuspb.ReplicaInfo // replicaID -> *ReplicaInfo
// Non-persistent info
nodeIndex map[UniqueID][]*milvuspb.ReplicaInfo // node_id -> []*ReplicaInfo
nodeIndex map[UniqueID]map[UniqueID]*milvuspb.ReplicaInfo // nodeID, replicaID -> []*ReplicaInfo
}
func NewReplicaInfos() *ReplicaInfos {
return &ReplicaInfos{
globalGuard: sync.RWMutex{},
replicas: make(map[int64]*milvuspb.ReplicaInfo),
nodeIndex: make(map[int64][]*milvuspb.ReplicaInfo),
nodeIndex: make(map[int64]map[int64]*milvuspb.ReplicaInfo),
}
}
@ -47,7 +48,8 @@ func (rep *ReplicaInfos) Get(replicaID UniqueID) (*milvuspb.ReplicaInfo, bool) {
defer rep.globalGuard.RUnlock()
info, ok := rep.replicas[replicaID]
return info, ok
clone := proto.Clone(info).(*milvuspb.ReplicaInfo)
return clone, ok
}
// Make sure atomically update replica and index
@ -56,23 +58,25 @@ func (rep *ReplicaInfos) Insert(info *milvuspb.ReplicaInfo) {
defer rep.globalGuard.Unlock()
old, ok := rep.replicas[info.ReplicaID]
// This updates ReplicaInfo, not inserts a new one
// No need to update nodeIndex
if ok {
*old = *info
return
}
info = proto.Clone(info).(*milvuspb.ReplicaInfo)
rep.replicas[info.ReplicaID] = info
// This updates ReplicaInfo, not inserts a new one
if ok {
for _, nodeID := range old.NodeIds {
nodeReplicas := rep.nodeIndex[nodeID]
delete(nodeReplicas, old.ReplicaID)
}
}
for _, nodeID := range info.NodeIds {
replicas, ok := rep.nodeIndex[nodeID]
if !ok {
replicas = make([]*milvuspb.ReplicaInfo, 0)
rep.nodeIndex[nodeID] = replicas
replicas = make(map[UniqueID]*milvuspb.ReplicaInfo)
}
replicas = append(replicas, info)
replicas[info.ReplicaID] = info
rep.nodeIndex[nodeID] = replicas
}
}
@ -87,5 +91,24 @@ func (rep *ReplicaInfos) GetReplicasByNodeID(nodeID UniqueID) []*milvuspb.Replic
return nil
}
return replicas
clones := make([]*milvuspb.ReplicaInfo, 0, len(replicas))
for _, replica := range replicas {
clones = append(clones, proto.Clone(replica).(*milvuspb.ReplicaInfo))
}
return clones
}
func (rep *ReplicaInfos) Remove(replicaIds ...UniqueID) {
rep.globalGuard.Lock()
defer rep.globalGuard.Unlock()
for _, replicaID := range replicaIds {
delete(rep.replicas, replicaID)
}
for _, replicaIndex := range rep.nodeIndex {
for _, replicaID := range replicaIds {
delete(replicaIndex, replicaID)
}
}
}

View File

@ -34,12 +34,12 @@ func defaultSegAllocatePolicy() SegmentAllocatePolicy {
const shuffleWaitInterval = 1 * time.Second
// SegmentAllocatePolicy helper function definition to allocate Segment to queryNode
type SegmentAllocatePolicy func(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error
type SegmentAllocatePolicy func(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error
// shuffleSegmentsToQueryNode shuffle segments to online nodes
// returned are noded id for each segment, which satisfies:
// len(returnedNodeIds) == len(segmentIDs) && segmentIDs[i] is assigned to returnedNodeIds[i]
func shuffleSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
func shuffleSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error {
if len(reqs) == 0 {
return nil
}
@ -95,7 +95,7 @@ func shuffleSegmentsToQueryNode(ctx context.Context, reqs []*querypb.LoadSegment
}
}
func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) error {
func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegmentsRequest, cluster Cluster, metaCache Meta, wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) error {
// key = offset, value = segmentSize
if len(reqs) == 0 {
return nil
@ -115,7 +115,16 @@ func shuffleSegmentsToQueryNodeV2(ctx context.Context, reqs []*querypb.LoadSegme
totalMem := make(map[int64]uint64)
memUsage := make(map[int64]uint64)
memUsageRate := make(map[int64]float64)
onlineNodeIDs := cluster.onlineNodeIDs()
var onlineNodeIDs []int64
if replicaID == -1 {
onlineNodeIDs = cluster.onlineNodeIDs()
} else {
replica, err := metaCache.getReplicaByID(replicaID)
if err != nil {
return err
}
onlineNodeIDs = replica.GetNodeIds()
}
if len(onlineNodeIDs) == 0 && !wait {
err := errors.New("no online queryNode to allocate")
log.Error("shuffleSegmentsToQueryNode failed", zap.Error(err))

View File

@ -18,6 +18,8 @@ package querycoord
import (
"context"
"math/rand"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
@ -39,8 +41,14 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
clusterSession := sessionutil.NewSession(context.Background(), Params.EtcdCfg.MetaRootPath, etcdCli)
clusterSession.Init(typeutil.QueryCoordRole, Params.QueryCoordCfg.Address, true, false)
factory := dependency.NewDefaultFactory(true)
meta, err := newMeta(baseCtx, kv, factory, nil)
factory := dependency.NewDefaultFactory(true) //msgstream.NewPmsFactory()
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(baseCtx, kv, factory, idAllocator)
assert.Nil(t, err)
handler, err := newChannelUnsubscribeHandler(baseCtx, kv, factory)
assert.Nil(t, err)
@ -83,7 +91,7 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
reqs := []*querypb.LoadSegmentsRequest{firstReq, secondReq}
t.Run("Test shuffleSegmentsWithoutQueryNode", func(t *testing.T) {
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil)
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil, -1)
assert.NotNil(t, err)
})
@ -95,7 +103,7 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
waitQueryNodeOnline(cluster, node1ID)
t.Run("Test shuffleSegmentsToQueryNode", func(t *testing.T) {
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil)
err = shuffleSegmentsToQueryNode(baseCtx, reqs, cluster, meta, false, nil, nil, -1)
assert.Nil(t, err)
assert.Equal(t, node1ID, firstReq.DstNodeID)
@ -111,13 +119,13 @@ func TestShuffleSegmentsToQueryNode(t *testing.T) {
cluster.stopNode(node1ID)
t.Run("Test shuffleSegmentsToQueryNodeV2", func(t *testing.T) {
err = shuffleSegmentsToQueryNodeV2(baseCtx, reqs, cluster, meta, false, nil, nil)
err = shuffleSegmentsToQueryNodeV2(baseCtx, reqs, cluster, meta, false, nil, nil, -1)
assert.Nil(t, err)
assert.Equal(t, node2ID, firstReq.DstNodeID)
assert.Equal(t, node2ID, secondReq.DstNodeID)
err = shuffleSegmentsToQueryNodeV2(baseCtx, reqs, cluster, meta, true, nil, nil)
err = shuffleSegmentsToQueryNodeV2(baseCtx, reqs, cluster, meta, true, nil, nil, -1)
assert.Nil(t, err)
assert.Equal(t, node2ID, firstReq.DstNodeID)

View File

@ -45,8 +45,6 @@ func newSegmentsInfo(kv kv.TxnKV) *segmentsInfo {
func (s *segmentsInfo) loadSegments() error {
var err error
s.loadOnce.Do(func() {
s.mu.Lock()
defer s.mu.Unlock()
var values []string
_, values, err = s.kv.LoadWithPrefix(util.SegmentMetaPrefix)
if err != nil {
@ -60,6 +58,16 @@ func (s *segmentsInfo) loadSegments() error {
}
s.segmentIDMap[segment.GetSegmentID()] = segment
numRowsCnt += float64(segment.NumRows)
// Compatibility for old meta format
if len(segment.NodeIds) == 0 {
segment.NodeIds = append(segment.NodeIds, segment.NodeID)
}
// rewrite segment info
err = s.saveSegment(segment)
if err != nil {
return
}
}
metrics.QueryCoordNumEntities.WithLabelValues().Add(numRowsCnt)

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/timerecord"
@ -353,13 +354,20 @@ func (lct *loadCollectionTask) updateTaskProcess() {
}
func (lct *loadCollectionTask) preExecute(ctx context.Context) error {
if lct.ReplicaNumber < 1 {
log.Warn("replicaNumber is less than 1 for load collection request, will set it to 1",
zap.Int32("replicaNumber", lct.ReplicaNumber))
lct.ReplicaNumber = 1
}
collectionID := lct.CollectionID
schema := lct.Schema
lct.setResultInfo(nil)
log.Debug("start do loadCollectionTask",
zap.Int64("msgID", lct.getTaskID()),
zap.Int64("collectionID", collectionID),
zap.Stringer("schema", schema))
zap.Stringer("schema", schema),
zap.Int32("replicaNumber", lct.ReplicaNumber))
return nil
}
@ -367,22 +375,24 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
defer lct.reduceRetryCount()
collectionID := lct.CollectionID
toLoadPartitionIDs, err := lct.broker.showPartitionIDs(ctx, collectionID)
partitionIds, err := lct.broker.showPartitionIDs(ctx, collectionID)
if err != nil {
log.Error("loadCollectionTask: showPartition failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
lct.setResultInfo(err)
return err
}
log.Debug("loadCollectionTask: get collection's all partitionIDs", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toLoadPartitionIDs), zap.Int64("msgID", lct.Base.MsgID))
log.Debug("loadCollectionTask: get collection's all partitionIDs", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIds), zap.Int64("msgID", lct.Base.MsgID))
var (
loadSegmentReqs = []*querypb.LoadSegmentsRequest{}
watchDmChannelReqs = []*querypb.WatchDmChannelsRequest{}
deltaChannelInfos = []*datapb.VchannelInfo{}
dmChannelInfos = []*datapb.VchannelInfo{}
replicas = make([]*milvuspb.ReplicaInfo, lct.ReplicaNumber)
replicaIds = make([]int64, lct.ReplicaNumber)
segmentLoadInfos = make([]*querypb.SegmentLoadInfo, 0)
deltaChannelInfos = make([]*datapb.VchannelInfo, 0)
dmChannelInfos = make([]*datapb.VchannelInfo, 0)
collectionSize uint64
)
for _, partitionID := range toLoadPartitionIDs {
for _, partitionID := range partitionIds {
vChannelInfos, binlogs, err := lct.broker.getRecoveryInfo(lct.ctx, collectionID, partitionID)
if err != nil {
log.Error("loadCollectionTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
@ -392,21 +402,8 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
for _, segmentBinlog := range binlogs {
segmentLoadInfo := lct.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBinlog, true, lct.Schema)
msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: lct.Schema,
CollectionID: collectionID,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadCollection,
CollectionID: collectionID,
PartitionIDs: toLoadPartitionIDs,
},
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
collectionSize += uint64(segmentLoadInfo.SegmentSize)
segmentLoadInfos = append(segmentLoadInfos, segmentLoadInfo)
}
for _, info := range vChannelInfos {
@ -420,6 +417,7 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
dmChannelInfos = append(dmChannelInfos, info)
}
}
mergedDeltaChannels := mergeWatchDeltaChannelInfo(deltaChannelInfos)
// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
err = lct.meta.setDeltaChannel(collectionID, mergedDeltaChannels)
@ -429,40 +427,108 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
return err
}
//TODO:: queryNode receive dm message according partitionID cache
//TODO:: queryNode add partitionID to cache if receive create partition message from dmChannel
mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
for _, info := range mergedDmChannel {
msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_WatchDmChannels
watchRequest := &querypb.WatchDmChannelsRequest{
Base: msgBase,
CollectionID: collectionID,
//PartitionIDs: toLoadPartitionIDs,
Infos: []*datapb.VchannelInfo{info},
Schema: lct.Schema,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadCollection,
CollectionID: collectionID,
PartitionIDs: toLoadPartitionIDs,
},
for i := range replicas {
replica, err := lct.meta.generateReplica(lct.CollectionID, partitionIds)
if err != nil {
lct.setResultInfo(err)
return err
}
watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
replicas[i] = replica
replicaIds[i] = replica.ReplicaID
}
internalTasks, err := assignInternalTask(ctx, lct, lct.meta, lct.cluster, loadSegmentReqs, watchDmChannelReqs, false, nil, nil)
err = lct.cluster.assignNodesToReplicas(ctx, replicas, collectionSize)
if err != nil {
log.Error("loadCollectionTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
log.Error("failed to assign nodes to replicas",
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", partitionIds),
zap.Int64("msgID", lct.Base.MsgID),
zap.Int32("replicaNumber", lct.ReplicaNumber),
zap.Error(err))
lct.setResultInfo(err)
return err
}
for _, internalTask := range internalTasks {
lct.addChildTask(internalTask)
log.Debug("loadCollectionTask: add a childTask", zap.Int64("collectionID", collectionID), zap.Int32("task type", int32(internalTask.msgType())), zap.Int64("msgID", lct.Base.MsgID))
for _, replica := range replicas {
var (
loadSegmentReqs = []*querypb.LoadSegmentsRequest{}
watchDmChannelReqs = []*querypb.WatchDmChannelsRequest{}
)
for _, segmentLoadInfo := range segmentLoadInfos {
msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: lct.Schema,
CollectionID: collectionID,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadCollection,
CollectionID: collectionID,
PartitionIDs: partitionIds,
},
ReplicaID: replica.ReplicaID,
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
}
//TODO:: queryNode receive dm message according partitionID cache
//TODO:: queryNode add partitionID to cache if receive create partition message from dmChannel
for _, info := range mergedDmChannel {
msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_WatchDmChannels
watchRequest := &querypb.WatchDmChannelsRequest{
Base: msgBase,
CollectionID: collectionID,
//PartitionIDs: toLoadPartitionIDs,
Infos: []*datapb.VchannelInfo{info},
Schema: lct.Schema,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadCollection,
CollectionID: collectionID,
PartitionIDs: partitionIds,
},
ReplicaID: replica.GetReplicaID(),
}
watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
}
internalTasks, err := assignInternalTask(ctx, lct, lct.meta, lct.cluster, loadSegmentReqs, watchDmChannelReqs, false, nil, replica.GetNodeIds(), -1)
if err != nil {
log.Error("loadCollectionTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
lct.setResultInfo(err)
return err
}
for _, internalTask := range internalTasks {
lct.addChildTask(internalTask)
if task, ok := internalTask.(*watchDmChannelTask); ok {
nodeInfo, err := lct.cluster.getNodeInfoByID(task.NodeID)
if err != nil {
log.Error("loadCollectionTask: get shard leader node info failed",
zap.Int64("collectionID", collectionID),
zap.Int64("msgID", lct.Base.MsgID),
zap.Int64("nodeID", task.NodeID),
zap.Error(err))
lct.setResultInfo(err)
return err
}
replica.ShardReplicas = append(replica.ShardReplicas, &milvuspb.ShardReplica{
LeaderID: task.NodeID,
LeaderAddr: nodeInfo.(*queryNode).address,
DmChannelName: task.WatchDmChannelsRequest.Infos[0].ChannelName,
})
}
log.Debug("loadCollectionTask: add a childTask", zap.Int64("collectionID", collectionID), zap.Int32("task type", int32(internalTask.msgType())), zap.Int64("msgID", lct.Base.MsgID))
}
metrics.QueryCoordNumChildTasks.WithLabelValues().Add(float64(len(internalTasks)))
log.Debug("loadCollectionTask: assign child task done", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID))
}
metrics.QueryCoordNumChildTasks.WithLabelValues().Add(float64(len(internalTasks)))
log.Debug("loadCollectionTask: assign child task done", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID))
err = lct.meta.addCollection(collectionID, querypb.LoadType_LoadCollection, lct.Schema)
if err != nil {
@ -470,13 +536,23 @@ func (lct *loadCollectionTask) execute(ctx context.Context) error {
lct.setResultInfo(err)
return err
}
err = lct.meta.addPartitions(collectionID, toLoadPartitionIDs)
err = lct.meta.addPartitions(collectionID, partitionIds)
if err != nil {
log.Error("loadCollectionTask: add partitions to meta failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toLoadPartitionIDs), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
log.Error("loadCollectionTask: add partitions to meta failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIds), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
lct.setResultInfo(err)
return err
}
for _, replica := range replicas {
err = lct.meta.addReplica(replica)
if err != nil {
log.Error("failed to add replica", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIds), zap.Int64("msgID", lct.Base.MsgID), zap.Int32("replicaNumber", lct.ReplicaNumber))
lct.setResultInfo(err)
return err
}
}
log.Debug("LoadCollection execute done",
zap.Int64("msgID", lct.getTaskID()),
zap.Int64("collectionID", collectionID))
@ -596,6 +672,7 @@ func (rct *releaseCollectionTask) execute(ctx context.Context) error {
return err
}
// TODO(yah01): broadcast to all nodes? Or only nodes serve the collection
onlineNodeIDs := rct.cluster.onlineNodeIDs()
for _, nodeID := range onlineNodeIDs {
req := proto.Clone(rct.ReleaseCollectionRequest).(*querypb.ReleaseCollectionRequest)
@ -720,6 +797,12 @@ func (lpt *loadPartitionTask) updateTaskProcess() {
}
func (lpt *loadPartitionTask) preExecute(context.Context) error {
if lpt.ReplicaNumber < 1 {
log.Warn("replicaNumber is less than 1 for load partitions request, will set it to 1",
zap.Int32("replicaNumber", lpt.ReplicaNumber))
lpt.ReplicaNumber = 1
}
collectionID := lpt.CollectionID
lpt.setResultInfo(nil)
log.Debug("start do loadPartitionTask",
@ -733,10 +816,15 @@ func (lpt *loadPartitionTask) execute(ctx context.Context) error {
collectionID := lpt.CollectionID
partitionIDs := lpt.PartitionIDs
var loadSegmentReqs []*querypb.LoadSegmentsRequest
var watchDmChannelReqs []*querypb.WatchDmChannelsRequest
var deltaChannelInfos []*datapb.VchannelInfo
var dmChannelInfos []*datapb.VchannelInfo
var (
replicas = make([]*milvuspb.ReplicaInfo, lpt.ReplicaNumber)
replicaIds = make([]int64, lpt.ReplicaNumber)
segmentLoadInfos = make([]*querypb.SegmentLoadInfo, 0)
deltaChannelInfos = make([]*datapb.VchannelInfo, 0)
dmChannelInfos = make([]*datapb.VchannelInfo, 0)
collectionSize uint64
)
for _, partitionID := range partitionIDs {
vChannelInfos, binlogs, err := lpt.broker.getRecoveryInfo(lpt.ctx, collectionID, partitionID)
if err != nil {
@ -747,20 +835,8 @@ func (lpt *loadPartitionTask) execute(ctx context.Context) error {
for _, segmentBingLog := range binlogs {
segmentLoadInfo := lpt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, lpt.Schema)
msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: lpt.Schema,
CollectionID: collectionID,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadPartition,
CollectionID: collectionID,
PartitionIDs: partitionIDs,
},
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
segmentLoadInfos = append(segmentLoadInfos, segmentLoadInfo)
collectionSize += uint64(segmentLoadInfo.SegmentSize)
}
for _, info := range vChannelInfos {
@ -784,37 +860,105 @@ func (lpt *loadPartitionTask) execute(ctx context.Context) error {
}
mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
for _, info := range mergedDmChannel {
msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_WatchDmChannels
watchRequest := &querypb.WatchDmChannelsRequest{
Base: msgBase,
CollectionID: collectionID,
PartitionIDs: partitionIDs,
Infos: []*datapb.VchannelInfo{info},
Schema: lpt.Schema,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadPartition,
CollectionID: collectionID,
PartitionIDs: partitionIDs,
},
for i := range replicas {
replica, err := lpt.meta.generateReplica(lpt.CollectionID, partitionIDs)
if err != nil {
lpt.setResultInfo(err)
return err
}
watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
replicas[i] = replica
replicaIds[i] = replica.ReplicaID
}
internalTasks, err := assignInternalTask(ctx, lpt, lpt.meta, lpt.cluster, loadSegmentReqs, watchDmChannelReqs, false, nil, nil)
err = lpt.cluster.assignNodesToReplicas(ctx, replicas, collectionSize)
if err != nil {
log.Error("loadPartitionTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
log.Error("failed to assign nodes to replicas",
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", partitionIDs),
zap.Int64("msgID", lpt.Base.MsgID),
zap.Int32("replicaNumber", lpt.ReplicaNumber),
zap.Error(err))
lpt.setResultInfo(err)
return err
}
for _, internalTask := range internalTasks {
lpt.addChildTask(internalTask)
log.Debug("loadPartitionTask: add a childTask", zap.Int64("collectionID", collectionID), zap.Int32("task type", int32(internalTask.msgType())))
for _, replica := range replicas {
var (
loadSegmentReqs = []*querypb.LoadSegmentsRequest{}
watchDmChannelReqs = []*querypb.WatchDmChannelsRequest{}
)
for _, segmentLoadInfo := range segmentLoadInfos {
msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: lpt.Schema,
CollectionID: collectionID,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadPartition,
CollectionID: collectionID,
PartitionIDs: partitionIDs,
},
ReplicaID: replica.ReplicaID,
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
}
for _, info := range mergedDmChannel {
msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_WatchDmChannels
watchRequest := &querypb.WatchDmChannelsRequest{
Base: msgBase,
CollectionID: collectionID,
PartitionIDs: partitionIDs,
Infos: []*datapb.VchannelInfo{info},
Schema: lpt.Schema,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadPartition,
CollectionID: collectionID,
PartitionIDs: partitionIDs,
},
ReplicaID: replica.GetReplicaID(),
}
watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
}
internalTasks, err := assignInternalTask(ctx, lpt, lpt.meta, lpt.cluster, loadSegmentReqs, watchDmChannelReqs, false, nil, replica.GetNodeIds(), -1)
if err != nil {
log.Error("loadPartitionTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
lpt.setResultInfo(err)
return err
}
for _, internalTask := range internalTasks {
lpt.addChildTask(internalTask)
if task, ok := internalTask.(*watchDmChannelTask); ok {
nodeInfo, err := lpt.cluster.getNodeInfoByID(task.NodeID)
if err != nil {
log.Error("loadCollectionTask: get shard leader node info failed",
zap.Int64("collectionID", collectionID),
zap.Int64("msgID", lpt.Base.MsgID),
zap.Int64("nodeID", task.NodeID),
zap.Error(err))
lpt.setResultInfo(err)
return err
}
replica.ShardReplicas = append(replica.ShardReplicas, &milvuspb.ShardReplica{
LeaderID: task.NodeID,
LeaderAddr: nodeInfo.(*queryNode).address,
DmChannelName: task.WatchDmChannelsRequest.Infos[0].ChannelName,
})
}
log.Debug("loadPartitionTask: add a childTask", zap.Int64("collectionID", collectionID), zap.Int32("task type", int32(internalTask.msgType())))
}
metrics.QueryCoordNumChildTasks.WithLabelValues().Add(float64(len(internalTasks)))
log.Debug("loadPartitionTask: assign child task done", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID))
}
metrics.QueryCoordNumChildTasks.WithLabelValues().Add(float64(len(internalTasks)))
log.Debug("loadPartitionTask: assign child task done", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID))
err = lpt.meta.addCollection(collectionID, querypb.LoadType_LoadPartition, lpt.Schema)
if err != nil {
@ -830,6 +974,15 @@ func (lpt *loadPartitionTask) execute(ctx context.Context) error {
return err
}
for _, replica := range replicas {
err = lpt.meta.addReplica(replica)
if err != nil {
log.Error("failed to add replica", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID), zap.Int32("replicaNumber", lpt.ReplicaNumber))
lpt.setResultInfo(err)
return err
}
}
log.Debug("loadPartitionTask Execute done",
zap.Int64("msgID", lpt.getTaskID()),
zap.Int64("collectionID", collectionID),
@ -1098,6 +1251,7 @@ func (lst *loadSegmentTask) reschedule(ctx context.Context) ([]task, error) {
CollectionID: lst.GetCollectionID(),
PartitionIDs: lst.GetLoadMeta().GetPartitionIDs(),
},
ReplicaID: lst.ReplicaID,
}
loadSegmentReqs = append(loadSegmentReqs, req)
}
@ -1110,7 +1264,7 @@ func (lst *loadSegmentTask) reschedule(ctx context.Context) ([]task, error) {
if lst.getParentTask().getTriggerCondition() == querypb.TriggerCondition_NodeDown {
wait2AssignTaskSuccess = true
}
reScheduledTasks, err := assignInternalTask(ctx, lst.getParentTask(), lst.meta, lst.cluster, loadSegmentReqs, nil, wait2AssignTaskSuccess, lst.excludeNodeIDs, nil)
reScheduledTasks, err := assignInternalTask(ctx, lst.getParentTask(), lst.meta, lst.cluster, loadSegmentReqs, nil, wait2AssignTaskSuccess, lst.excludeNodeIDs, nil, lst.ReplicaID)
if err != nil {
log.Error("loadSegment reschedule failed", zap.Int64s("excludeNodes", lst.excludeNodeIDs), zap.Int64("taskID", lst.getTaskID()), zap.Error(err))
return nil, err
@ -1277,6 +1431,7 @@ func (wdt *watchDmChannelTask) reschedule(ctx context.Context) ([]task, error) {
CollectionID: collectionID,
PartitionIDs: wdt.GetLoadMeta().GetPartitionIDs(),
},
ReplicaID: wdt.GetReplicaID(),
}
watchDmChannelReqs = append(watchDmChannelReqs, req)
}
@ -1289,7 +1444,7 @@ func (wdt *watchDmChannelTask) reschedule(ctx context.Context) ([]task, error) {
if wdt.getParentTask().getTriggerCondition() == querypb.TriggerCondition_NodeDown {
wait2AssignTaskSuccess = true
}
reScheduledTasks, err := assignInternalTask(ctx, wdt.parentTask, wdt.meta, wdt.cluster, nil, watchDmChannelReqs, wait2AssignTaskSuccess, wdt.excludeNodeIDs, nil)
reScheduledTasks, err := assignInternalTask(ctx, wdt.parentTask, wdt.meta, wdt.cluster, nil, watchDmChannelReqs, wait2AssignTaskSuccess, wdt.excludeNodeIDs, nil, wdt.ReplicaID)
if err != nil {
log.Error("watchDmChannel reschedule failed", zap.Int64("taskID", wdt.getTaskID()), zap.Int64s("excludeNodes", wdt.excludeNodeIDs), zap.Error(err))
return nil, err
@ -1578,12 +1733,30 @@ func (ht *handoffTask) execute(ctx context.Context) error {
ht.setResultInfo(err)
return err
}
internalTasks, err := assignInternalTask(ctx, ht, ht.meta, ht.cluster, []*querypb.LoadSegmentsRequest{loadSegmentReq}, nil, true, nil, nil)
replicas, err := ht.meta.getReplicasByCollectionID(collectionID)
if err != nil {
log.Error("handoffTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64("segmentID", segmentID), zap.Error(err))
ht.setResultInfo(err)
return err
}
var internalTasks []task
for _, replica := range replicas {
if len(replica.NodeIds) == 0 {
log.Warn("handoffTask: find empty replica", zap.Int64("collectionID", collectionID), zap.Int64("segmentID", segmentID), zap.Int64("replicaID", replica.GetReplicaID()))
err := fmt.Errorf("replica %d of collection %d is empty", replica.GetReplicaID(), collectionID)
ht.setResultInfo(err)
return err
}
// we should copy a request because assignInternalTask will change DstNodeID of LoadSegmentRequest
clonedReq := proto.Clone(loadSegmentReq).(*querypb.LoadSegmentsRequest)
clonedReq.ReplicaID = replica.ReplicaID
tasks, err := assignInternalTask(ctx, ht, ht.meta, ht.cluster, []*querypb.LoadSegmentsRequest{clonedReq}, nil, true, nil, nil, replica.GetReplicaID())
if err != nil {
log.Error("handoffTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64("segmentID", segmentID), zap.Error(err))
ht.setResultInfo(err)
return err
}
internalTasks = append(internalTasks, tasks...)
}
for _, internalTask := range internalTasks {
ht.addChildTask(internalTask)
log.Debug("handoffTask: add a childTask", zap.Int32("task type", int32(internalTask.msgType())), zap.Int64("segmentID", segmentID))
@ -1629,9 +1802,10 @@ func (ht *handoffTask) rollBack(ctx context.Context) []task {
type loadBalanceTask struct {
*baseTask
*querypb.LoadBalanceRequest
broker *globalMetaBroker
cluster Cluster
meta Meta
broker *globalMetaBroker
cluster Cluster
meta Meta
replicaID int64
}
func (lbt *loadBalanceTask) msgBase() *commonpb.MsgBase {
@ -1660,16 +1834,76 @@ func (lbt *loadBalanceTask) preExecute(context.Context) error {
return nil
}
func (lbt *loadBalanceTask) checkForManualLoadBalance() error {
// check segments belong to the same collection
collectionID := lbt.GetCollectionID()
for _, sid := range lbt.SealedSegmentIDs {
segment, err := lbt.meta.getSegmentInfoByID(sid)
if err != nil {
return err
}
if collectionID == 0 {
collectionID = segment.GetCollectionID()
} else if collectionID != segment.GetCollectionID() {
err := errors.New("segments of a load balance task do not belong to the same collection")
return err
}
}
if collectionID == 0 {
err := errors.New("a load balance task has to specify a collectionID or pass segments of a collection")
return err
}
// check source and dst nodes belong to the same replica
var replicaID int64 = -1
for _, nodeID := range lbt.SourceNodeIDs {
replica, err := lbt.getReplica(nodeID, collectionID)
if err != nil {
return err
}
if replicaID == -1 {
replicaID = replica.GetReplicaID()
} else if replicaID != replica.GetReplicaID() {
err := errors.New("source nodes and destination nodes must be in the same replica group")
return err
}
}
if replicaID == -1 {
return errors.New("source nodes is empty")
}
for _, nodeID := range lbt.DstNodeIDs {
replica, err := lbt.getReplica(nodeID, collectionID)
if err != nil {
return err
}
if replicaID != replica.GetReplicaID() {
err := errors.New("source nodes and destination nodes must be in the same replica group")
return err
}
}
lbt.replicaID = replicaID
log.Debug("start do loadBalanceTask",
zap.Int32("trigger type", int32(lbt.triggerCondition)),
zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs),
zap.Any("balanceReason", lbt.BalanceReason),
zap.Int64("taskID", lbt.getTaskID()))
return nil
}
func (lbt *loadBalanceTask) execute(ctx context.Context) error {
defer lbt.reduceRetryCount()
if lbt.triggerCondition == querypb.TriggerCondition_NodeDown {
segmentID2Info := make(map[UniqueID]*querypb.SegmentInfo)
dmChannel2WatchInfo := make(map[string]*querypb.DmChannelWatchInfo)
loadSegmentReqs := make([]*querypb.LoadSegmentsRequest, 0)
watchDmChannelReqs := make([]*querypb.WatchDmChannelsRequest, 0)
recoveredCollectionIDs := make(map[UniqueID]struct{})
var internalTasks []task
for _, nodeID := range lbt.SourceNodeIDs {
segmentID2Info := make(map[UniqueID]*querypb.SegmentInfo)
dmChannel2WatchInfo := make(map[string]*querypb.DmChannelWatchInfo)
recoveredCollectionIDs := make(map[UniqueID]struct{})
segmentInfos := lbt.meta.getSegmentInfosByNode(nodeID)
for _, segmentInfo := range segmentInfos {
segmentID2Info[segmentInfo.SegmentID] = segmentInfo
@ -1680,113 +1914,127 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
dmChannel2WatchInfo[watchInfo.DmChannel] = watchInfo
recoveredCollectionIDs[watchInfo.CollectionID] = struct{}{}
}
}
for collectionID := range recoveredCollectionIDs {
collectionInfo, err := lbt.meta.getCollectionInfoByID(collectionID)
if err != nil {
log.Error("loadBalanceTask: get collectionInfo from meta failed", zap.Int64("collectionID", collectionID), zap.Error(err))
lbt.setResultInfo(err)
return err
}
schema := collectionInfo.Schema
var deltaChannelInfos []*datapb.VchannelInfo
var dmChannelInfos []*datapb.VchannelInfo
var toRecoverPartitionIDs []UniqueID
if collectionInfo.LoadType == querypb.LoadType_LoadCollection {
toRecoverPartitionIDs, err = lbt.broker.showPartitionIDs(ctx, collectionID)
for collectionID := range recoveredCollectionIDs {
loadSegmentReqs := make([]*querypb.LoadSegmentsRequest, 0)
watchDmChannelReqs := make([]*querypb.WatchDmChannelsRequest, 0)
collectionInfo, err := lbt.meta.getCollectionInfoByID(collectionID)
if err != nil {
log.Error("loadBalanceTask: show collection's partitionIDs failed", zap.Int64("collectionID", collectionID), zap.Error(err))
log.Error("loadBalanceTask: get collectionInfo from meta failed", zap.Int64("collectionID", collectionID), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
return err
}
} else {
toRecoverPartitionIDs = collectionInfo.PartitionIDs
}
log.Debug("loadBalanceTask: get collection's all partitionIDs", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toRecoverPartitionIDs))
schema := collectionInfo.Schema
var deltaChannelInfos []*datapb.VchannelInfo
var dmChannelInfos []*datapb.VchannelInfo
for _, partitionID := range toRecoverPartitionIDs {
vChannelInfos, binlogs, err := lbt.broker.getRecoveryInfo(lbt.ctx, collectionID, partitionID)
var toRecoverPartitionIDs []UniqueID
if collectionInfo.LoadType == querypb.LoadType_LoadCollection {
toRecoverPartitionIDs, err = lbt.broker.showPartitionIDs(ctx, collectionID)
if err != nil {
log.Error("loadBalanceTask: show collection's partitionIDs failed", zap.Int64("collectionID", collectionID), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
}
} else {
toRecoverPartitionIDs = collectionInfo.PartitionIDs
}
log.Debug("loadBalanceTask: get collection's all partitionIDs", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toRecoverPartitionIDs))
replica, err := lbt.getReplica(nodeID, collectionID)
if err != nil {
log.Error("loadBalanceTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Error(err))
lbt.setResultInfo(err)
return err
}
for _, partitionID := range toRecoverPartitionIDs {
vChannelInfos, binlogs, err := lbt.broker.getRecoveryInfo(lbt.ctx, collectionID, partitionID)
if err != nil {
log.Error("loadBalanceTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
}
for _, segmentBingLog := range binlogs {
segmentID := segmentBingLog.SegmentID
if info, ok := segmentID2Info[segmentID]; ok {
segmentLoadInfo := lbt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, schema)
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
for _, replica := range info.ReplicaIds {
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: schema,
CollectionID: collectionID,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: collectionInfo.LoadType,
CollectionID: collectionID,
PartitionIDs: toRecoverPartitionIDs,
},
ReplicaID: replica,
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
}
}
}
for _, info := range vChannelInfos {
deltaChannel, err := generateWatchDeltaChannelInfo(info)
if err != nil {
log.Error("loadBalanceTask: generateWatchDeltaChannelInfo failed", zap.Int64("collectionID", collectionID), zap.String("channelName", info.ChannelName), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
}
deltaChannelInfos = append(deltaChannelInfos, deltaChannel)
dmChannelInfos = append(dmChannelInfos, info)
}
}
mergedDeltaChannel := mergeWatchDeltaChannelInfo(deltaChannelInfos)
// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
err = lbt.meta.setDeltaChannel(collectionID, mergedDeltaChannel)
if err != nil {
log.Error("loadBalanceTask: set delta channel info meta failed", zap.Int64("collectionID", collectionID), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
}
for _, segmentBingLog := range binlogs {
segmentID := segmentBingLog.SegmentID
if _, ok := segmentID2Info[segmentID]; ok {
segmentLoadInfo := lbt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, schema)
mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
for channelName, vChannelInfo := range mergedDmChannel {
if info, ok := dmChannel2WatchInfo[channelName]; ok {
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
msgBase.MsgType = commonpb.MsgType_WatchDmChannels
watchRequest := &querypb.WatchDmChannelsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: schema,
CollectionID: collectionID,
Infos: []*datapb.VchannelInfo{vChannelInfo},
Schema: schema,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: collectionInfo.LoadType,
CollectionID: collectionID,
PartitionIDs: toRecoverPartitionIDs,
},
ReplicaID: info.ReplicaID,
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
watchRequest.PartitionIDs = toRecoverPartitionIDs
}
watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
}
}
for _, info := range vChannelInfos {
deltaChannel, err := generateWatchDeltaChannelInfo(info)
if err != nil {
log.Error("loadBalanceTask: generateWatchDeltaChannelInfo failed", zap.Int64("collectionID", collectionID), zap.String("channelName", info.ChannelName), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
}
deltaChannelInfos = append(deltaChannelInfos, deltaChannel)
dmChannelInfos = append(dmChannelInfos, info)
tasks, err := assignInternalTask(ctx, lbt, lbt.meta, lbt.cluster, loadSegmentReqs, watchDmChannelReqs, true, lbt.SourceNodeIDs, lbt.DstNodeIDs, replica.GetReplicaID())
if err != nil {
log.Error("loadBalanceTask: assign child task failed", zap.Int64("sourceNodeID", nodeID))
lbt.setResultInfo(err)
panic(err)
}
internalTasks = append(internalTasks, tasks...)
}
mergedDeltaChannel := mergeWatchDeltaChannelInfo(deltaChannelInfos)
// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
err = lbt.meta.setDeltaChannel(collectionID, mergedDeltaChannel)
if err != nil {
log.Error("loadBalanceTask: set delta channel info meta failed", zap.Int64("collectionID", collectionID), zap.Error(err))
lbt.setResultInfo(err)
panic(err)
}
mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
for channelName, vChannelInfo := range mergedDmChannel {
if _, ok := dmChannel2WatchInfo[channelName]; ok {
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_WatchDmChannels
watchRequest := &querypb.WatchDmChannelsRequest{
Base: msgBase,
CollectionID: collectionID,
Infos: []*datapb.VchannelInfo{vChannelInfo},
Schema: schema,
LoadMeta: &querypb.LoadMetaInfo{
LoadType: collectionInfo.LoadType,
CollectionID: collectionID,
PartitionIDs: toRecoverPartitionIDs,
},
}
if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
watchRequest.PartitionIDs = toRecoverPartitionIDs
}
watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
}
}
}
internalTasks, err := assignInternalTask(ctx, lbt, lbt.meta, lbt.cluster, loadSegmentReqs, watchDmChannelReqs, true, lbt.SourceNodeIDs, lbt.DstNodeIDs)
if err != nil {
log.Error("loadBalanceTask: assign child task failed", zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs))
lbt.setResultInfo(err)
panic(err)
}
for _, internalTask := range internalTasks {
lbt.addChildTask(internalTask)
@ -1796,6 +2044,10 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
}
if lbt.triggerCondition == querypb.TriggerCondition_LoadBalance {
if err := lbt.checkForManualLoadBalance(); err != nil {
lbt.setResultInfo(err)
return err
}
if len(lbt.SourceNodeIDs) == 0 {
err := errors.New("loadBalanceTask: empty source Node list to balance")
log.Error(err.Error())
@ -1879,17 +2131,21 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
log.Warn("loadBalanceTask: can't find binlog of segment to balance, may be has been compacted", zap.Int64("segmentID", segmentID))
continue
}
segmentBingLog := segmentID2Binlog[segmentID]
segmentLoadInfo := lbt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, collectionInfo.Schema)
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: collectionInfo.Schema,
CollectionID: collectionID,
for _, replica := range segmentInfo.ReplicaIds {
segmentBingLog := segmentID2Binlog[segmentID]
segmentLoadInfo := lbt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, collectionInfo.Schema)
msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_LoadSegments
loadSegmentReq := &querypb.LoadSegmentsRequest{
Base: msgBase,
Infos: []*querypb.SegmentLoadInfo{segmentLoadInfo},
Schema: collectionInfo.Schema,
CollectionID: collectionID,
ReplicaID: replica,
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
}
loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
}
for _, info := range dmChannelInfos {
@ -1909,7 +2165,7 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
return err
}
}
internalTasks, err := assignInternalTask(ctx, lbt, lbt.meta, lbt.cluster, loadSegmentReqs, nil, false, lbt.SourceNodeIDs, lbt.DstNodeIDs)
internalTasks, err := assignInternalTask(ctx, lbt, lbt.meta, lbt.cluster, loadSegmentReqs, nil, false, lbt.SourceNodeIDs, lbt.DstNodeIDs, lbt.replicaID)
if err != nil {
log.Error("loadBalanceTask: assign child task failed", zap.Any("balance request", lbt.LoadBalanceRequest))
lbt.setResultInfo(err)
@ -1930,6 +2186,19 @@ func (lbt *loadBalanceTask) execute(ctx context.Context) error {
return nil
}
func (lbt *loadBalanceTask) getReplica(nodeID, collectionID int64) (*milvuspb.ReplicaInfo, error) {
replicas, err := lbt.meta.getReplicasByNodeID(nodeID)
if err != nil {
return nil, err
}
for _, replica := range replicas {
if replica.GetCollectionID() == collectionID {
return replica, nil
}
}
return nil, fmt.Errorf("unable to find replicas of collection %d and node %d", collectionID, nodeID)
}
func (lbt *loadBalanceTask) postExecute(context.Context) error {
if lbt.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
lbt.clearChildTasks()
@ -1960,17 +2229,17 @@ func assignInternalTask(ctx context.Context,
parentTask task, meta Meta, cluster Cluster,
loadSegmentRequests []*querypb.LoadSegmentsRequest,
watchDmChannelRequests []*querypb.WatchDmChannelsRequest,
wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) ([]task, error) {
wait bool, excludeNodeIDs []int64, includeNodeIDs []int64, replicaID int64) ([]task, error) {
log.Debug("assignInternalTask: start assign task to query node")
internalTasks := make([]task, 0)
err := cluster.allocateSegmentsToQueryNode(ctx, loadSegmentRequests, wait, excludeNodeIDs, includeNodeIDs)
err := cluster.allocateSegmentsToQueryNode(ctx, loadSegmentRequests, wait, excludeNodeIDs, includeNodeIDs, replicaID)
if err != nil {
log.Error("assignInternalTask: assign segment to node failed", zap.Any("load segments requests", loadSegmentRequests))
return nil, err
}
log.Debug("assignInternalTask: assign segment to node success")
err = cluster.allocateChannelsToQueryNode(ctx, watchDmChannelRequests, wait, excludeNodeIDs)
err = cluster.allocateChannelsToQueryNode(ctx, watchDmChannelRequests, wait, excludeNodeIDs, includeNodeIDs, replicaID)
if err != nil {
log.Error("assignInternalTask: assign dmChannel to node failed", zap.Any("watch dmChannel requests", watchDmChannelRequests))
return nil, err

View File

@ -905,6 +905,8 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
sealedSegmentChangeInfos, err = meta.removeGlobalSealedSegInfos(collectionID, req.PartitionIDs)
default:
// save new segmentInfo when load segment
segments := make(map[UniqueID]*querypb.SegmentInfo)
for _, childTask := range triggerTask.getChildTask() {
if childTask.msgType() == commonpb.MsgType_LoadSegments {
req := childTask.(*loadSegmentTask).LoadSegmentsRequest
@ -912,18 +914,30 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
for _, loadInfo := range req.Infos {
collectionID := loadInfo.CollectionID
segmentID := loadInfo.SegmentID
segmentInfo := &querypb.SegmentInfo{
SegmentID: segmentID,
CollectionID: loadInfo.CollectionID,
PartitionID: loadInfo.PartitionID,
NodeID: dstNodeID,
SegmentState: commonpb.SegmentState_Sealed,
CompactionFrom: loadInfo.CompactionFrom,
segment, ok := segments[segmentID]
if !ok {
segment = &querypb.SegmentInfo{
SegmentID: segmentID,
CollectionID: loadInfo.CollectionID,
PartitionID: loadInfo.PartitionID,
NodeID: dstNodeID,
DmChannel: loadInfo.InsertChannel,
SegmentState: commonpb.SegmentState_Sealed,
CompactionFrom: loadInfo.CompactionFrom,
ReplicaIds: []UniqueID{req.ReplicaID},
NodeIds: []UniqueID{dstNodeID},
}
segments[segmentID] = segment
} else {
segment.ReplicaIds = append(segment.ReplicaIds, req.ReplicaID)
segment.NodeIds = append(segment.NodeIds, dstNodeID)
}
if _, ok := segmentInfosToSave[collectionID]; !ok {
segmentInfosToSave[collectionID] = make([]*querypb.SegmentInfo, 0)
}
segmentInfosToSave[collectionID] = append(segmentInfosToSave[collectionID], segmentInfo)
segmentInfosToSave[collectionID] = append(segmentInfosToSave[collectionID], segment)
}
}
}

View File

@ -226,6 +226,7 @@ func TestUnMarshalTask(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
ReplicaNumber: 1,
},
}
blobs, err := loadTask.marshal()
@ -478,6 +479,7 @@ func TestReloadTaskFromKV(t *testing.T) {
Timestamp: 1,
MsgType: commonpb.MsgType_LoadCollection,
},
ReplicaNumber: 1,
},
}
triggerBlobs, err := triggerTask.marshal()

View File

@ -35,8 +35,9 @@ func genLoadCollectionTask(ctx context.Context, queryCoord *QueryCoord) *loadCol
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
loadCollectionTask := &loadCollectionTask{
@ -55,9 +56,10 @@ func genLoadPartitionTask(ctx context.Context, queryCoord *QueryCoord) *loadPart
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
loadPartitionTask := &loadPartitionTask{
@ -162,8 +164,9 @@ func genWatchDmChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID i
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
baseParentTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
baseParentTask.taskID = 10
@ -219,8 +222,9 @@ func genLoadSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int6
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
CollectionID: defaultCollectionID,
Schema: genDefaultCollectionSchema(false),
ReplicaNumber: 1,
}
baseParentTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
baseParentTask.taskID = 10
@ -297,9 +301,15 @@ func TestTriggerTask(t *testing.T) {
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node, err := startQueryNodeServer(ctx)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node3, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node3.queryNodeID)
t.Run("Test LoadCollection", func(t *testing.T) {
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
@ -328,10 +338,38 @@ func TestTriggerTask(t *testing.T) {
assert.Nil(t, err)
})
err = node.stop()
queryCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
t.Run("Test LoadCollection With Replicas", func(t *testing.T) {
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
loadCollectionTask.ReplicaNumber = 3
err = queryCoord.scheduler.processTask(loadCollectionTask)
assert.Nil(t, err)
})
t.Run("Test ReleaseCollection With Replicas", func(t *testing.T) {
releaseCollectionTask := genReleaseCollectionTask(ctx, queryCoord)
err = queryCoord.scheduler.processTask(releaseCollectionTask)
assert.Nil(t, err)
})
t.Run("Test LoadPartition With Replicas", func(t *testing.T) {
loadPartitionTask := genLoadPartitionTask(ctx, queryCoord)
loadPartitionTask.ReplicaNumber = 3
err = queryCoord.scheduler.processTask(loadPartitionTask)
assert.Nil(t, err)
})
t.Run("Test ReleasePartition With Replicas", func(t *testing.T) {
releasePartitionTask := genReleaseCollectionTask(ctx, queryCoord)
err = queryCoord.scheduler.processTask(releasePartitionTask)
assert.Nil(t, err)
})
assert.NoError(t, node1.stop())
assert.NoError(t, node2.stop())
assert.NoError(t, node3.stop())
assert.NoError(t, queryCoord.Stop())
assert.NoError(t, removeAllSession())
}
func Test_LoadCollectionAfterLoadPartition(t *testing.T) {
@ -439,6 +477,30 @@ func Test_LoadCollectionExecuteFail(t *testing.T) {
assert.Nil(t, err)
}
func TestLoadCollectionNoEnoughNodeFail(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
loadCollectionTask.ReplicaNumber = 3
err = queryCoord.scheduler.processTask(loadCollectionTask)
assert.Error(t, err)
assert.NoError(t, node1.stop())
assert.NoError(t, node2.stop())
assert.NoError(t, queryCoord.Stop())
assert.NoError(t, removeAllSession())
}
func Test_LoadPartitionAssignTaskFail(t *testing.T) {
refreshParams()
ctx := context.Background()
@ -577,10 +639,11 @@ func Test_RescheduleDmChannel(t *testing.T) {
loadCollectionTask := watchDmChannelTask.parentTask
queryCoord.scheduler.triggerTaskQueue.addTask(loadCollectionTask)
waitTaskFinalState(loadCollectionTask, taskExpired)
waitTaskFinalState(loadCollectionTask, taskFailed)
queryCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
}
@ -604,7 +667,7 @@ func Test_RescheduleSegment(t *testing.T) {
loadCollectionTask := loadSegmentTask.parentTask
queryCoord.scheduler.triggerTaskQueue.addTask(loadCollectionTask)
waitTaskFinalState(loadCollectionTask, taskExpired)
waitTaskFinalState(loadCollectionTask, taskFailed)
queryCoord.Stop()
err = removeAllSession()
@ -703,7 +766,7 @@ func Test_AssignInternalTask(t *testing.T) {
loadSegmentRequests = append(loadSegmentRequests, req)
}
internalTasks, err := assignInternalTask(queryCoord.loopCtx, loadCollectionTask, queryCoord.meta, queryCoord.cluster, loadSegmentRequests, nil, false, nil, nil)
internalTasks, err := assignInternalTask(queryCoord.loopCtx, loadCollectionTask, queryCoord.meta, queryCoord.cluster, loadSegmentRequests, nil, false, nil, nil, -1)
assert.Nil(t, err)
assert.NotEqual(t, 1, len(internalTasks))
@ -870,6 +933,7 @@ func TestLoadBalanceSegmentsTask(t *testing.T) {
MsgType: commonpb.MsgType_LoadBalanceSegments,
},
SourceNodeIDs: []int64{node1.queryNodeID},
CollectionID: defaultCollectionID,
},
broker: queryCoord.broker,
cluster: queryCoord.cluster,

View File

@ -95,6 +95,29 @@ func (h *historical) retrieve(collID UniqueID, partIDs []UniqueID, vcm storage.C
return retrieveResults, retrieveSegmentIDs, retrievePartIDs, nil
}
// retrieveBySegmentIDs retrieves records from segments specified by their IDs
func (h *historical) retrieveBySegmentIDs(collID UniqueID, segmentIDs []UniqueID, vcm storage.ChunkManager, plan *RetrievePlan) (
retrieveResults []*segcorepb.RetrieveResults, err error) {
for _, segID := range segmentIDs {
seg, err := h.replica.getSegmentByID(segID)
if err != nil {
return nil, err
}
result, err := seg.retrieve(plan)
if err != nil {
return nil, err
}
err = seg.fillIndexedFieldsData(collID, vcm, result)
if err != nil {
return nil, err
}
retrieveResults = append(retrieveResults, result)
}
return retrieveResults, nil
}
// search will search all the target segments in historical
func (h *historical) search(searchReqs []*searchRequest, collID UniqueID, partIDs []UniqueID, plan *SearchPlan,
searchTs Timestamp) (searchResults []*SearchResult, searchSegmentIDs []UniqueID, searchPartIDs []UniqueID, err error) {

View File

@ -18,7 +18,6 @@ package querynode
import (
"context"
"errors"
"fmt"
"go.uber.org/zap"
@ -227,7 +226,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmC
log.Error(err.Error())
return status, nil
}
log.Debug("watchDmChannelsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID))
log.Debug("watchDmChannelsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID), zap.Int64("replicaID", in.GetReplicaID()))
waitFunc := func() (*commonpb.Status, error) {
err = dct.WaitToFinish()
@ -557,12 +556,119 @@ func (node *QueryNode) isHealthy() bool {
// Search performs replica search tasks.
func (node *QueryNode) Search(ctx context.Context, req *queryPb.SearchRequest) (*internalpb.SearchResults, error) {
return nil, errors.New("not implemented")
if !node.isHealthy() {
return &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.QueryNodeID),
},
}, nil
}
log.Debug("Received SearchRequest", zap.String("vchannel", req.GetDmlChannel()), zap.Int64s("segmentIDs", req.GetSegmentIDs()))
if node.queryShardService == nil {
return &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "queryShardService is nil",
},
}, nil
}
if !node.queryShardService.hasQueryShard(req.GetDmlChannel()) {
err := node.queryShardService.addQueryShard(req.Req.CollectionID, req.GetDmlChannel(), 0) // TODO: add replicaID in request or remove it in query shard
if err != nil {
return &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}, nil
}
}
qs, err := node.queryShardService.getQueryShard(req.GetDmlChannel())
if err != nil {
return &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}, nil
}
results, err := qs.search(ctx, req)
if err != nil {
log.Warn("QueryService failed to search", zap.String("vchannel", req.GetDmlChannel()), zap.Int64s("segmentIDs", req.GetSegmentIDs()), zap.Error(err))
return &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}, nil
}
log.Debug("Search Shard Done", zap.String("vchannel", req.GetDmlChannel()), zap.Int64s("segmentIDs", req.GetSegmentIDs()))
return results, err
}
// Query performs replica query tasks.
func (node *QueryNode) Query(ctx context.Context, req *queryPb.QueryRequest) (*internalpb.RetrieveResults, error) {
return nil, errors.New("not implemented")
if !node.isHealthy() {
return &internalpb.RetrieveResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.QueryNodeID),
},
}, nil
}
log.Debug("Received QueryRequest", zap.String("vchannel", req.GetDmlChannel()), zap.Int64s("segmentIDs", req.GetSegmentIDs()))
if node.queryShardService == nil {
return &internalpb.RetrieveResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "queryShardService is nil",
},
}, nil
}
if !node.queryShardService.hasQueryShard(req.GetDmlChannel()) {
err := node.queryShardService.addQueryShard(req.Req.CollectionID, req.GetDmlChannel(), 0) // TODO: add replicaID in request or remove it in query shard
if err != nil {
return &internalpb.RetrieveResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}, nil
}
}
qs, err := node.queryShardService.getQueryShard(req.GetDmlChannel())
if err != nil {
return &internalpb.RetrieveResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}, nil
}
results, err := qs.query(ctx, req)
if err != nil {
log.Warn("QueryService failed to query", zap.String("vchannel", req.GetDmlChannel()), zap.Int64s("segmentIDs", req.GetSegmentIDs()), zap.Error(err))
return &internalpb.RetrieveResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
}, nil
}
log.Debug("Query Shard Done", zap.String("vchannel", req.GetDmlChannel()), zap.Int64s("segmentIDs", req.GetSegmentIDs()))
return results, nil
}
// GetMetrics return system infos of the query node, such as total memory, memory usage, cpu usage ...

View File

@ -584,8 +584,16 @@ func TestImpl_Search(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
require.NoError(t, err)
_, err = node.Search(ctx, nil)
assert.Error(t, err)
req, err := genSimpleSearchRequest(IndexFaissIDMap)
require.NoError(t, err)
node.queryShardService.addQueryShard(defaultCollectionID, defaultDMLChannel, defaultReplicaID)
_, err = node.Search(ctx, &queryPb.SearchRequest{
Req: req,
DmlChannel: defaultDMLChannel,
})
assert.NoError(t, err)
}
func TestImpl_Query(t *testing.T) {
@ -595,6 +603,14 @@ func TestImpl_Query(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
require.NoError(t, err)
_, err = node.Query(ctx, nil)
assert.Error(t, err)
req, err := genSimpleRetrieveRequest()
require.NoError(t, err)
node.queryShardService.addQueryShard(defaultCollectionID, defaultDMLChannel, defaultReplicaID)
_, err = node.Query(ctx, &queryPb.QueryRequest{
Req: req,
DmlChannel: defaultDMLChannel,
})
assert.NoError(t, err)
}

View File

@ -79,6 +79,7 @@ const (
defaultCollectionID = UniqueID(0)
defaultPartitionID = UniqueID(1)
defaultSegmentID = UniqueID(2)
defaultReplicaID = UniqueID(10)
defaultCollectionName = "query-node-unittest-default-collection"
defaultPartitionName = "query-node-unittest-default-partition"
@ -1748,18 +1749,27 @@ func genSimpleQueryNodeWithMQFactory(ctx context.Context, fac dependency.Factory
// start task scheduler
go node.scheduler.Start()
/*
vectorStorage, err := node.factory.NewVectorStorageChunkManager(ctx)
if err != nil {
return nil, err
}
cacheStorage, err := node.factory.NewCacheStorageChunkManager(ctx)
if err != nil {
return nil, err
}*/
/*
qs := newQueryService(ctx, node.historical, node.streaming, vectorStorage, cacheStorage, fac)
defer qs.close()
node.queryService = qs
qs := newQueryService(ctx, node.historical, node.streaming, node.msFactory)
defer qs.close()
node.queryService = qs*/
vectorStorage, err := node.factory.NewVectorStorageChunkManager(ctx)
if err != nil {
return nil, err
}
cacheStorage, err := node.factory.NewCacheStorageChunkManager(ctx)
if err != nil {
return nil, err
}
qs := newQueryService(ctx, node.historical, node.streaming, vectorStorage, cacheStorage, fac)
defer qs.close()
node.queryService = qs
// init shard cluster service
node.ShardClusterService = newShardClusterService(node.etcdCli, node.session, node)
node.queryShardService = newQueryShardService(node.queryNodeLoopCtx, node.historical, node.streaming, node.ShardClusterService, node.factory)
node.UpdateStateCode(internalpb.StateCode_Healthy)

View File

@ -0,0 +1,139 @@
package querynode
import (
"fmt"
"sync"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
)
// queryChannel simple query channel wrapper in query shard service
type queryChannel struct {
closeCh chan struct{}
collectionID int64
streaming *streaming
queryMsgStream msgstream.MsgStream
asConsumeOnce sync.Once
closeOnce sync.Once
}
// AsConsumer do AsConsumer for query msgstream and seek if position is not nil
func (qc *queryChannel) AsConsumer(channelName string, subName string, position *internalpb.MsgPosition) error {
var err error
qc.asConsumeOnce.Do(func() {
qc.queryMsgStream.AsConsumer([]string{channelName}, subName)
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
if position == nil || len(position.MsgID) == 0 {
log.Debug("QueryNode AsConsumer", zap.String("channel", channelName), zap.String("sub name", subName))
} else {
err = qc.queryMsgStream.Seek([]*internalpb.MsgPosition{position})
if err == nil {
log.Debug("querynode seek query channel: ", zap.Any("consumeChannel", channelName),
zap.String("seek position", string(position.MsgID)))
}
}
})
return err
}
// Start start a goroutine for consume msg
func (qc *queryChannel) Start() {
go qc.queryMsgStream.Start()
go qc.consumeQuery()
}
// Stop all workers and msgstream
func (qc *queryChannel) Stop() {
qc.closeOnce.Do(func() {
qc.queryMsgStream.Close()
close(qc.closeCh)
})
}
func (qc *queryChannel) consumeQuery() {
for {
select {
case <-qc.closeCh:
log.Info("query channel worker quit", zap.Int64("collection id", qc.collectionID))
return
case msgPack, ok := <-qc.queryMsgStream.Chan():
if !ok {
log.Warn("Receive Query Msg from chan failed", zap.Int64("collectionID", qc.collectionID))
return
}
if !ok || msgPack == nil || len(msgPack.Msgs) == 0 {
continue
}
for _, msg := range msgPack.Msgs {
switch sm := msg.(type) {
case *msgstream.SealedSegmentsChangeInfoMsg:
qc.adjustByChangeInfo(sm)
default:
log.Warn("ignore msgs other than SegmentChangeInfo", zap.Any("msgType", msg.Type().String()))
}
}
}
}
}
func (qc *queryChannel) adjustByChangeInfo(msg *msgstream.SealedSegmentsChangeInfoMsg) {
for _, info := range msg.Infos {
// precheck collection id, if not the same collection, skip
for _, segment := range info.OnlineSegments {
if segment.CollectionID != qc.collectionID {
return
}
}
for _, segment := range info.OfflineSegments {
if segment.CollectionID != qc.collectionID {
return
}
}
// should handle segment change in shardCluster
// for OnlineSegments:
for _, segment := range info.OnlineSegments {
/*
// 1. update global sealed segments
q.globalSegmentManager.addGlobalSegmentInfo(segment)
// 2. update excluded segment, cluster have been loaded sealed segments,
// so we need to avoid getting growing segment from flow graph.*/
qc.streaming.replica.addExcludedSegments(segment.CollectionID, []*datapb.SegmentInfo{
{
ID: segment.SegmentID,
CollectionID: segment.CollectionID,
PartitionID: segment.PartitionID,
InsertChannel: segment.DmChannel,
NumOfRows: segment.NumRows,
// TODO: add status, remove query pb segment status, use common pb segment status?
DmlPosition: &internalpb.MsgPosition{
// use max timestamp to filter out dm messages
Timestamp: typeutil.MaxTimestamp,
},
},
})
}
/*
// for OfflineSegments:
for _, segment := range info.OfflineSegments {
// 1. update global sealed segments
q.globalSegmentManager.removeGlobalSealedSegmentInfo(segment.SegmentID)
}*/
log.Info("Successfully changed global sealed segment info ",
zap.Int64("collection ", qc.collectionID),
zap.Any("online segments ", info.OnlineSegments),
zap.Any("offline segments ", info.OfflineSegments))
}
}

View File

@ -0,0 +1,174 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querynode
import (
"math/rand"
"testing"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/mq/msgstream/mqwrapper"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/stretchr/testify/mock"
)
type mockQueryMsgStream struct {
mock.Mock
}
func (m *mockQueryMsgStream) Start() {
m.Called()
}
func (m *mockQueryMsgStream) Close() {
m.Called()
}
func (m *mockQueryMsgStream) AsProducer(channels []string) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) Produce(_ *msgstream.MsgPack) error {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) SetRepackFunc(repackFunc msgstream.RepackFunc) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) ComputeProduceChannelIndexes(tsMsgs []msgstream.TsMsg) [][]int32 {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) GetProduceChannels() []string {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) ProduceMark(_ *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) Broadcast(_ *msgstream.MsgPack) error {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) BroadcastMark(_ *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) AsConsumer(channels []string, subName string) {
m.Called(channels, subName)
}
func (m *mockQueryMsgStream) AsConsumerWithPosition(channels []string, subName string, position mqwrapper.SubscriptionInitialPosition) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) Chan() <-chan *msgstream.MsgPack {
args := m.Called()
return args.Get(0).(<-chan *msgstream.MsgPack)
}
func (m *mockQueryMsgStream) Seek(offset []*msgstream.MsgPosition) error {
args := m.Called(offset)
return args.Error(0)
}
func (m *mockQueryMsgStream) GetLatestMsgID(channel string) (msgstream.MessageID, error) {
panic("not implemented") // TODO: Implement
}
func TestQueryChannel_AsConsumer(t *testing.T) {
t.Run("AsConsumer with no seek", func(t *testing.T) {
mqs := &mockQueryMsgStream{}
mqs.On("Close").Return()
qc := &queryChannel{
closeCh: make(chan struct{}),
collectionID: defaultCollectionID,
streaming: nil,
queryMsgStream: mqs,
}
mqs.On("AsConsumer", []string{defaultDMLChannel}, defaultSubName).Return()
qc.AsConsumer(defaultDMLChannel, defaultSubName, nil)
qc.Stop()
mqs.AssertCalled(t, "AsConsumer", []string{defaultDMLChannel}, defaultSubName)
mqs.AssertNotCalled(t, "Seek")
mqs.AssertExpectations(t)
qc.Stop()
})
t.Run("AsConsumer with bad position", func(t *testing.T) {
mqs := &mockQueryMsgStream{}
mqs.On("Close").Return()
qc := &queryChannel{
closeCh: make(chan struct{}),
collectionID: defaultCollectionID,
streaming: nil,
queryMsgStream: mqs,
}
mqs.On("AsConsumer", []string{defaultDMLChannel}, defaultSubName).Return()
qc.AsConsumer(defaultDMLChannel, defaultSubName, &internalpb.MsgPosition{})
qc.Stop()
mqs.AssertCalled(t, "AsConsumer", []string{defaultDMLChannel}, defaultSubName)
mqs.AssertNotCalled(t, "Seek")
mqs.AssertExpectations(t)
})
t.Run("AsConsumer with position", func(t *testing.T) {
mqs := &mockQueryMsgStream{}
mqs.On("Close").Return()
qc := &queryChannel{
closeCh: make(chan struct{}),
collectionID: defaultCollectionID,
streaming: nil,
queryMsgStream: mqs,
}
msgID := make([]byte, 8)
rand.Read(msgID)
pos := &internalpb.MsgPosition{MsgID: msgID}
mqs.On("AsConsumer", []string{defaultDMLChannel}, defaultSubName).Return()
mqs.On("Seek", []*internalpb.MsgPosition{pos}).Return(nil)
qc.AsConsumer(defaultDMLChannel, defaultSubName, pos)
qc.Stop()
mqs.AssertCalled(t, "AsConsumer", []string{defaultDMLChannel}, defaultSubName)
mqs.AssertCalled(t, "Seek", []*internalpb.MsgPosition{pos})
mqs.AssertExpectations(t)
})
}

View File

@ -100,7 +100,7 @@ type QueryNode struct {
dataSyncService *dataSyncService
// internal services
queryService *queryService
//queryService *queryService
statsService *statsService
// segment loader
@ -119,6 +119,11 @@ type QueryNode struct {
vectorStorage storage.ChunkManager
cacheStorage storage.ChunkManager
etcdKV *etcdkv.EtcdKV
// shard cluster service, handle shard leader functions
ShardClusterService *ShardClusterService
//shard query service, handles shard-level query & search
queryShardService *queryShardService
}
// NewQueryNode will return a QueryNode with abnormal state.
@ -127,7 +132,6 @@ func NewQueryNode(ctx context.Context, factory dependency.Factory) *QueryNode {
node := &QueryNode{
queryNodeLoopCtx: ctx1,
queryNodeLoopCancel: cancel,
queryService: nil,
factory: factory,
}
@ -319,7 +323,7 @@ func (node *QueryNode) Init() error {
node.vectorStorage,
node.factory)
//node.statsService = newStatsService(node.queryNodeLoopCtx, node.historical.replica, node.factory)
// node.statsService = newStatsService(node.queryNodeLoopCtx, node.historical.replica, node.factory)
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, streamingReplica, historicalReplica, node.tSafeReplica, node.factory)
node.InitSegcore()
@ -329,13 +333,13 @@ func (node *QueryNode) Init() error {
// init services and manager
// TODO: pass node.streaming.replica to search service
node.queryService = newQueryService(node.queryNodeLoopCtx,
node.historical,
node.streaming,
node.vectorStorage,
node.cacheStorage,
node.factory,
qsOptWithSessionManager(node.sessionManager))
// node.queryService = newQueryService(node.queryNodeLoopCtx,
// node.historical,
// node.streaming,
// node.vectorStorage,
// node.cacheStorage,
// node.factory,
// qsOptWithSessionManager(node.sessionManager))
log.Debug("query node init successfully",
zap.Any("queryNodeID", Params.QueryNodeCfg.QueryNodeID),
@ -364,6 +368,11 @@ func (node *QueryNode) Start() error {
node.wg.Add(1)
go node.watchService(node.queryNodeLoopCtx)
// create shardClusterService for shardLeader functions.
node.ShardClusterService = newShardClusterService(node.etcdCli, node.session, node)
// create shard-level query service
node.queryShardService = newQueryShardService(node.queryNodeLoopCtx, node.historical, node.streaming, node.ShardClusterService, node.factory)
Params.QueryNodeCfg.CreatedTime = time.Now()
Params.QueryNodeCfg.UpdatedTime = time.Now()
@ -391,8 +400,13 @@ func (node *QueryNode) Stop() error {
if node.streaming != nil {
node.streaming.close()
}
if node.queryService != nil {
node.queryService.close()
/*
if node.queryService != nil {
node.queryService.close()
}*/
if node.queryShardService != nil {
node.queryShardService.close()
}
//if node.statsService != nil {
// node.statsService.close()
@ -454,43 +468,44 @@ func (node *QueryNode) watchChangeInfo() {
func (node *QueryNode) waitChangeInfo(segmentChangeInfos *querypb.SealedSegmentsChangeInfo) error {
fn := func() error {
for _, info := range segmentChangeInfos.Infos {
canDoLoadBalance := true
// make sure all query channel already received segment location changes
// Check online segments:
for _, segmentInfo := range info.OnlineSegments {
if node.queryService.hasQueryCollection(segmentInfo.CollectionID) {
qc, err := node.queryService.getQueryCollection(segmentInfo.CollectionID)
if err != nil {
canDoLoadBalance = false
break
}
if info.OnlineNodeID == Params.QueryNodeCfg.QueryNodeID && !qc.globalSegmentManager.hasGlobalSealedSegment(segmentInfo.SegmentID) {
canDoLoadBalance = false
break
/*
for _, info := range segmentChangeInfos.Infos {
canDoLoadBalance := true
// make sure all query channel already received segment location changes
// Check online segments:
for _, segmentInfo := range info.OnlineSegments {
if node.queryService.hasQueryCollection(segmentInfo.CollectionID) {
qc, err := node.queryService.getQueryCollection(segmentInfo.CollectionID)
if err != nil {
canDoLoadBalance = false
break
}
if info.OnlineNodeID == Params.QueryNodeCfg.QueryNodeID && !qc.globalSegmentManager.hasGlobalSealedSegment(segmentInfo.SegmentID) {
canDoLoadBalance = false
break
}
}
}
}
// Check offline segments:
for _, segmentInfo := range info.OfflineSegments {
if node.queryService.hasQueryCollection(segmentInfo.CollectionID) {
qc, err := node.queryService.getQueryCollection(segmentInfo.CollectionID)
if err != nil {
canDoLoadBalance = false
break
}
if info.OfflineNodeID == Params.QueryNodeCfg.QueryNodeID && qc.globalSegmentManager.hasGlobalSealedSegment(segmentInfo.SegmentID) {
canDoLoadBalance = false
break
// Check offline segments:
for _, segmentInfo := range info.OfflineSegments {
if node.queryService.hasQueryCollection(segmentInfo.CollectionID) {
qc, err := node.queryService.getQueryCollection(segmentInfo.CollectionID)
if err != nil {
canDoLoadBalance = false
break
}
if info.OfflineNodeID == Params.QueryNodeCfg.QueryNodeID && qc.globalSegmentManager.hasGlobalSealedSegment(segmentInfo.SegmentID) {
canDoLoadBalance = false
break
}
}
}
if canDoLoadBalance {
return nil
}
return errors.New(fmt.Sprintln("waitChangeInfo failed, infoID = ", segmentChangeInfos.Base.GetMsgID()))
}
if canDoLoadBalance {
return nil
}
return errors.New(fmt.Sprintln("waitChangeInfo failed, infoID = ", segmentChangeInfos.Base.GetMsgID()))
}
*/
return nil
}

View File

@ -18,7 +18,9 @@ package querynode
import (
"context"
"io/ioutil"
"math/rand"
"net/url"
"os"
"os/signal"
"strconv"
@ -29,6 +31,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/embed"
"github.com/milvus-io/milvus/internal/util/dependency"
@ -42,6 +45,8 @@ import (
"github.com/milvus-io/milvus/internal/util/sessionutil"
)
var embedetcdServer *embed.Etcd
// mock of query coordinator client
type queryCoordMock struct {
types.QueryCoord
@ -224,9 +229,41 @@ func newMessageStreamFactory() dependency.Factory {
return dependency.NewDefaultFactory(true)
}
func startEmbedEtcdServer() (*embed.Etcd, error) {
dir, err := ioutil.TempDir(os.TempDir(), "milvus_ut")
if err != nil {
return nil, err
}
defer os.RemoveAll(dir)
config := embed.NewConfig()
config.Dir = os.TempDir()
config.LogLevel = "warn"
config.LogOutputs = []string{"default"}
u, err := url.Parse("http://localhost:2389")
if err != nil {
return nil, err
}
config.LCUrls = []url.URL{*u}
u, err = url.Parse("http://localhost:2390")
if err != nil {
return nil, err
}
config.LPUrls = []url.URL{*u}
return embed.StartEtcd(config)
}
func TestMain(m *testing.M) {
setup()
Params.CommonCfg.QueryNodeStats = Params.CommonCfg.QueryNodeStats + strconv.Itoa(rand.Int())
// init embed etcd
var err error
embedetcdServer, err = startEmbedEtcdServer()
if err != nil {
os.Exit(1)
}
defer embedetcdServer.Close()
exitCode := m.Run()
os.Exit(exitCode)
}
@ -278,16 +315,17 @@ func genSimpleQueryNodeToTestWatchChangeInfo(ctx context.Context) (*QueryNode, e
return nil, err
}
err = node.queryService.addQueryCollection(defaultCollectionID)
if err != nil {
return nil, err
}
/*
err = node.queryService.addQueryCollection(defaultCollectionID)
if err != nil {
return nil, err
}
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
if err != nil {
return nil, err
}
qc.globalSegmentManager.addGlobalSegmentInfo(genSimpleSegmentInfo())
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
if err != nil {
return nil, err
}*/
//qc.globalSegmentManager.addGlobalSegmentInfo(genSimpleSegmentInfo())
return node, nil
}
@ -330,10 +368,11 @@ func TestQueryNode_adjustByChangeInfo(t *testing.T) {
segmentChangeInfos.Infos[0].OnlineSegments = nil
segmentChangeInfos.Infos[0].OfflineNodeID = Params.QueryNodeCfg.QueryNodeID
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
assert.NoError(t, err)
qc.globalSegmentManager.removeGlobalSealedSegmentInfo(defaultSegmentID)
/*
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
assert.NoError(t, err)
qc.globalSegmentManager.removeGlobalSealedSegmentInfo(defaultSegmentID)
*/
err = node.removeSegments(segmentChangeInfos)
assert.Error(t, err)
})
@ -402,9 +441,10 @@ func TestQueryNode_watchChangeInfo(t *testing.T) {
segmentChangeInfos.Infos[0].OnlineSegments = nil
segmentChangeInfos.Infos[0].OfflineNodeID = Params.QueryNodeCfg.QueryNodeID
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
assert.NoError(t, err)
qc.globalSegmentManager.removeGlobalSealedSegmentInfo(defaultSegmentID)
/*
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
assert.NoError(t, err)
qc.globalSegmentManager.removeGlobalSealedSegmentInfo(defaultSegmentID)*/
go node.watchChangeInfo()

View File

@ -20,11 +20,8 @@ import (
"context"
"math"
"math/rand"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/mq/msgstream"
@ -129,6 +126,7 @@ func sendSearchRequest(ctx context.Context, DIM int) error {
return err
}
/*
func TestSearch_Search(t *testing.T) {
const N = 10000
const DIM = 16
@ -258,3 +256,4 @@ func TestQueryService_addQueryCollection(t *testing.T) {
qs.close()
assert.Len(t, qs.queryCollections, 0)
}
*/

View File

@ -19,9 +19,26 @@ package querynode
import (
"context"
"errors"
"fmt"
"math"
"sync"
"time"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/golang/protobuf/proto"
"go.uber.org/atomic"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/proto/segcorepb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
type queryShard struct {
@ -30,27 +47,878 @@ type queryShard struct {
collectionID UniqueID
channel Channel
deltaChannel Channel
replicaID int64
clusterService *ShardClusterService
historical *historical
streaming *streaming
dmTSafeWatcher *tSafeWatcher
deltaTSafeWatcher *tSafeWatcher
watcherCond *sync.Cond
serviceDmTs atomic.Uint64
serviceDeltaTs atomic.Uint64
startTickerOnce sync.Once
ticker *time.Ticker // timed ticker for trigger timeout check
localChunkManager storage.ChunkManager
remoteChunkManager storage.ChunkManager
vectorChunkManager *storage.VectorChunkManager
localCacheEnabled bool
localCacheSize int64
}
func newQueryShard(
ctx context.Context,
collectionID UniqueID,
channel Channel,
replicaID int64,
clusterService *ShardClusterService,
historical *historical,
streaming *streaming,
localChunkManager storage.ChunkManager,
remoteChunkManager storage.ChunkManager,
localCacheEnabled bool,
) *queryShard {
ctx, cancel := context.WithCancel(ctx)
qs := &queryShard{
ctx: ctx,
cancel: cancel,
collectionID: collectionID,
channel: channel,
ctx: ctx,
cancel: cancel,
collectionID: collectionID,
channel: channel,
replicaID: replicaID,
clusterService: clusterService,
historical: historical,
streaming: streaming,
localChunkManager: localChunkManager,
remoteChunkManager: remoteChunkManager,
localCacheEnabled: localCacheEnabled,
localCacheSize: Params.QueryNodeCfg.CacheMemoryLimit,
watcherCond: sync.NewCond(&sync.Mutex{}),
}
deltaChannel, err := funcutil.ConvertChannelName(channel, Params.CommonCfg.RootCoordDml, Params.CommonCfg.RootCoordDelta)
if err != nil {
log.Warn("failed to convert dm channel to delta", zap.String("channel", channel), zap.Error(err))
}
qs.deltaChannel = deltaChannel
return qs
}
func (q *queryShard) search(ctx context.Context, req *querypb.SearchRequest) (*milvuspb.SearchResults, error) {
return nil, errors.New("not implemented")
// Close cleans query shard
func (q *queryShard) Close() {
q.cancel()
}
func (q *queryShard) query(ctx context.Context, req *querypb.QueryRequest) (*milvuspb.QueryResults, error) {
return nil, errors.New("not implemented")
func (q *queryShard) watchDMLTSafe() error {
q.dmTSafeWatcher = newTSafeWatcher()
err := q.streaming.tSafeReplica.registerTSafeWatcher(q.channel, q.dmTSafeWatcher)
if err != nil {
log.Warn("failed to register dml tsafe watcher", zap.String("channel", q.channel), zap.Error(err))
return err
}
go q.watchTs(q.dmTSafeWatcher.watcherChan(), q.dmTSafeWatcher.closeCh, tsTypeDML)
q.startTsTicker()
return nil
}
func (q *queryShard) watchDeltaTSafe() error {
q.deltaTSafeWatcher = newTSafeWatcher()
err := q.streaming.tSafeReplica.registerTSafeWatcher(q.deltaChannel, q.deltaTSafeWatcher)
if err != nil {
log.Warn("failed to register delta tsafe watcher", zap.String("channel", q.deltaChannel), zap.Error(err))
return err
}
go q.watchTs(q.deltaTSafeWatcher.watcherChan(), q.deltaTSafeWatcher.closeCh, tsTypeDelta)
q.startTsTicker()
return nil
}
func (q *queryShard) startTsTicker() {
q.startTickerOnce.Do(func() {
go func() {
q.ticker = time.NewTicker(time.Millisecond * 50) // check timeout every 50 milliseconds, need not to be to frequent
defer q.ticker.Stop()
for {
select {
case <-q.ticker.C:
q.watcherCond.L.Lock()
q.watcherCond.Broadcast()
q.watcherCond.L.Unlock()
case <-q.ctx.Done():
return
}
}
}()
})
}
type tsType int32
const (
tsTypeDML tsType = 1
tsTypeDelta tsType = 2
)
func (tp tsType) String() string {
switch tp {
case tsTypeDML:
return "DML tSafe"
case tsTypeDelta:
return "Delta tSafe"
}
return ""
}
func (q *queryShard) watchTs(channel <-chan bool, closeCh <-chan struct{}, tp tsType) {
for {
select {
case <-q.ctx.Done():
log.Debug("stop queryShard watcher due to ctx done", zap.Int64("collectionID", q.collectionID), zap.String("vChannel", q.channel))
return
case <-closeCh:
log.Debug("stop queryShard watcher due to watcher closed", zap.Int64("collectionID", q.collectionID), zap.String("vChannel", q.channel))
return
case _, ok := <-channel:
if !ok {
log.Warn("tsafe watcher channel closed", zap.Int64("collectionID", q.collectionID), zap.String("vChannel", q.channel))
return
}
ts, err := q.getNewTSafe(tp)
if err == nil {
q.watcherCond.L.Lock()
q.setServiceableTime(ts, tp)
q.watcherCond.Broadcast()
q.watcherCond.L.Unlock()
}
}
}
}
func (q *queryShard) getNewTSafe(tp tsType) (Timestamp, error) {
var channel string
switch tp {
case tsTypeDML:
channel = q.channel
case tsTypeDelta:
channel = q.deltaChannel
default:
return 0, errors.New("invalid ts type")
}
t := Timestamp(math.MaxInt64)
ts, err := q.streaming.tSafeReplica.getTSafe(channel)
if err != nil {
return 0, err
}
if ts <= t {
t = ts
}
return t, nil
}
func (q *queryShard) waitUntilServiceable(ctx context.Context, guaranteeTs Timestamp, tp tsType) {
q.watcherCond.L.Lock()
defer q.watcherCond.L.Unlock()
st := q.getServiceableTime(tp)
for guaranteeTs > st {
log.Debug("serviceable ts before guarantee ts", zap.Uint64("serviceable ts", st), zap.Uint64("guarantee ts", guaranteeTs), zap.String("channel", q.channel))
q.watcherCond.Wait()
if err := ctx.Err(); err != nil {
log.Warn("waitUntialServiceable timeout", zap.Uint64("serviceable ts", st), zap.Uint64("guarantee ts", guaranteeTs), zap.String("channel", q.channel))
return
}
st = q.getServiceableTime(tp)
}
log.Debug("wait serviceable ts done", zap.String("tsType", tp.String()), zap.Uint64("guarantee ts", guaranteeTs), zap.Uint64("serviceable ts", st), zap.String("channel", q.channel))
}
func (q *queryShard) getServiceableTime(tp tsType) Timestamp {
gracefulTimeInMilliSecond := Params.QueryNodeCfg.GracefulTime
gracefulTime := typeutil.ZeroTimestamp
if gracefulTimeInMilliSecond > 0 {
gracefulTime = tsoutil.ComposeTS(gracefulTimeInMilliSecond, 0)
}
var serviceTs Timestamp
switch tp {
case tsTypeDML: // use min value of dml & delta
serviceTs = q.serviceDmTs.Load()
case tsTypeDelta: // check delta ts only
serviceTs = q.serviceDeltaTs.Load()
}
return serviceTs + gracefulTime
}
func (q *queryShard) setServiceableTime(t Timestamp, tp tsType) {
switch tp {
case tsTypeDML:
ts := q.serviceDmTs.Load()
if t < ts {
return
}
for !q.serviceDmTs.CAS(ts, t) {
ts = q.serviceDmTs.Load()
if t < ts {
return
}
}
case tsTypeDelta:
ts := q.serviceDeltaTs.Load()
if t < ts {
return
}
for !q.serviceDeltaTs.CAS(ts, t) {
ts = q.serviceDeltaTs.Load()
if t < ts {
return
}
}
}
}
func (q *queryShard) search(ctx context.Context, req *querypb.SearchRequest) (*internalpb.SearchResults, error) {
collectionID := req.Req.CollectionID
segmentIDs := req.SegmentIDs
timestamp := req.Req.TravelTimestamp
// check ctx timeout
if !funcutil.CheckCtxValid(ctx) {
return nil, errors.New("search context timeout")
}
// check if collection has been released
collection, err := q.historical.replica.getCollectionByID(collectionID)
if err != nil {
return nil, err
}
if req.GetReq().GetGuaranteeTimestamp() >= collection.getReleaseTime() {
log.Warn("collection release before search", zap.Int64("collectionID", collectionID))
return nil, fmt.Errorf("retrieve failed, collection has been released, collectionID = %d", collectionID)
}
// deserialize query plan
var plan *SearchPlan
if req.Req.GetDslType() == commonpb.DslType_BoolExprV1 {
expr := req.Req.SerializedExprPlan
plan, err = createSearchPlanByExpr(collection, expr)
if err != nil {
return nil, err
}
} else {
dsl := req.Req.Dsl
plan, err = createSearchPlan(collection, dsl)
if err != nil {
return nil, err
}
}
defer plan.delete()
schemaHelper, err := typeutil.CreateSchemaHelper(collection.schema)
if err != nil {
return nil, err
}
// validate top-k
topK := plan.getTopK()
if topK <= 0 || topK >= 16385 {
return nil, fmt.Errorf("limit should be in range [1, 16385], but got %d", topK)
}
// parse plan to search request
searchReq, err := parseSearchRequest(plan, req.Req.PlaceholderGroup)
if err != nil {
return nil, err
}
defer searchReq.delete()
queryNum := searchReq.getNumOfQuery()
searchRequests := []*searchRequest{searchReq}
if len(segmentIDs) == 0 {
// segmentIDs not specified, searching as shard leader
return q.searchLeader(ctx, req, searchRequests, collectionID, schemaHelper, plan, topK, queryNum, timestamp)
}
// segmentIDs specified search as shard follower
return q.searchFollower(ctx, req, searchRequests, collectionID, schemaHelper, plan, topK, queryNum, timestamp)
}
func (q *queryShard) searchLeader(ctx context.Context, req *querypb.SearchRequest, searchRequests []*searchRequest, collectionID UniqueID,
schemaHelper *typeutil.SchemaHelper, plan *SearchPlan, topK int64, queryNum int64, timestamp Timestamp) (*internalpb.SearchResults, error) {
q.streaming.replica.queryRLock()
defer q.streaming.replica.queryRUnlock()
cluster, ok := q.clusterService.getShardCluster(req.GetDmlChannel())
if !ok {
return nil, fmt.Errorf("channel %s leader is not here", req.GetDmlChannel())
}
searchCtx, cancel := context.WithCancel(ctx)
defer cancel()
var results []*internalpb.SearchResults
var streamingResults []*SearchResult
var err error
var mut sync.Mutex
var wg sync.WaitGroup
wg.Add(2) // search cluster and search streaming
go func() {
defer wg.Done()
// shard leader dispatches request to its shard cluster
cResults, cErr := cluster.Search(searchCtx, req)
mut.Lock()
defer mut.Unlock()
if cErr != nil {
log.Warn("search cluster failed", zap.Int64("collectionID", q.collectionID), zap.Error(cErr))
err = cErr
cancel()
return
}
results = cResults
}()
go func() {
defer wg.Done()
// hold request until guarantee timestamp >= service timestamp
guaranteeTs := req.GetReq().GetGuaranteeTimestamp()
q.waitUntilServiceable(ctx, guaranteeTs, tsTypeDML)
// shard leader queries its own streaming data
// TODO add context
sResults, _, _, sErr := q.streaming.search(searchRequests, collectionID, req.Req.PartitionIDs, req.DmlChannel, plan, timestamp)
mut.Lock()
defer mut.Unlock()
if sErr != nil {
log.Warn("failed to search streaming data", zap.Int64("collectionID", q.collectionID), zap.Error(sErr))
err = sErr
cancel()
return
}
streamingResults = sResults
}()
wg.Wait()
if err != nil {
return nil, err
}
defer deleteSearchResults(streamingResults)
results = append(results, &internalpb.SearchResults{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
MetricType: plan.getMetricType(),
NumQueries: queryNum,
TopK: topK,
SlicedBlob: nil,
SlicedOffset: 1,
SlicedNumCount: 1,
})
if len(streamingResults) > 0 {
// reduce search results
numSegment := int64(len(streamingResults))
err = reduceSearchResultsAndFillData(plan, streamingResults, numSegment)
if err != nil {
return nil, err
}
nq := searchRequests[0].getNumOfQuery()
nqOfReqs := []int64{nq}
nqPerSlice := nq
reqSlices, err := getReqSlices(nqOfReqs, nqPerSlice)
if err != nil {
log.Warn("getReqSlices for streaming results error", zap.Error(err))
return nil, err
}
blobs, err := marshal(collectionID, 0, streamingResults, int(numSegment), reqSlices)
defer deleteSearchResultDataBlobs(blobs)
if err != nil {
log.Warn("marshal for streaming results error", zap.Error(err))
return nil, err
}
// assume only one blob will be sent back
blob, err := getSearchResultDataBlob(blobs, 0)
if err != nil {
log.Warn("getSearchResultDataBlob for streaming results error", zap.Error(err))
}
results[len(results)-1].SlicedBlob = blob
}
// reduce shard search results: unmarshal -> reduce -> marshal
log.Debug("shard leader get search results", zap.Int("numbers", len(results)))
searchResultData, err := decodeSearchResults(results)
if err != nil {
log.Warn("shard leader decode search results errors", zap.Error(err))
return nil, err
}
log.Debug("shard leader get valid search results", zap.Int("numbers", len(searchResultData)))
for i, sData := range searchResultData {
log.Debug("reduceSearchResultData",
zap.Int("result No.", i),
zap.Int64("nq", sData.NumQueries),
zap.Int64("topk", sData.TopK),
zap.String("ids", sData.Ids.String()),
zap.Any("len(FieldsData)", len(sData.FieldsData)))
}
reducedResultData, err := reduceSearchResultData(searchResultData, queryNum, plan.getTopK(), plan.getMetricType())
if err != nil {
log.Warn("shard leader reduce errors", zap.Error(err))
return nil, err
}
searchResults, err := encodeSearchResultData(reducedResultData, queryNum, plan.getTopK(), plan.getMetricType())
if err != nil {
log.Warn("shard leader encode search result errors", zap.Error(err))
return nil, err
}
if searchResults.SlicedBlob == nil {
log.Debug("shard leader send nil results to proxy",
zap.String("shard", q.channel))
} else {
log.Debug("shard leader send non-nil results to proxy",
zap.String("shard", q.channel),
zap.String("ids", reducedResultData.Ids.String()))
// printSearchResultData(reducedResultData, q.channel)
}
return searchResults, nil
}
func (q *queryShard) searchFollower(ctx context.Context, req *querypb.SearchRequest, searchRequests []*searchRequest, collectionID UniqueID,
schemaHelper *typeutil.SchemaHelper, plan *SearchPlan, topK int64, queryNum int64, timestamp Timestamp) (*internalpb.SearchResults, error) {
q.historical.replica.queryRLock()
defer q.historical.replica.queryRUnlock()
segmentIDs := req.GetSegmentIDs()
// hold request until guarantee timestamp >= service timestamp
guaranteeTs := req.GetReq().GetGuaranteeTimestamp()
q.waitUntilServiceable(ctx, guaranteeTs, tsTypeDelta)
// search each segments by segment IDs in request
historicalResults, _, err := q.historical.searchSegments(segmentIDs, searchRequests, plan, timestamp)
if err != nil {
return nil, err
}
defer deleteSearchResults(historicalResults)
// reduce search results
numSegment := int64(len(historicalResults))
err = reduceSearchResultsAndFillData(plan, historicalResults, numSegment)
if err != nil {
return nil, err
}
nq := searchRequests[0].getNumOfQuery()
nqOfReqs := []int64{nq}
nqPerSlice := nq
reqSlices, err := getReqSlices(nqOfReqs, nqPerSlice)
if err != nil {
log.Warn("getReqSlices for historical results error", zap.Error(err))
return nil, err
}
blobs, err := marshal(collectionID, 0, historicalResults, int(numSegment), reqSlices)
defer deleteSearchResultDataBlobs(blobs)
if err != nil {
log.Warn("marshal for historical results error", zap.Error(err))
return nil, err
}
// assume only one blob will be sent back
blob, err := getSearchResultDataBlob(blobs, 0)
if err != nil {
log.Warn("getSearchResultDataBlob for historical results error", zap.Error(err))
}
bs := make([]byte, len(blob))
copy(bs, blob)
resp := &internalpb.SearchResults{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
MetricType: plan.getMetricType(),
NumQueries: queryNum,
TopK: topK,
SlicedBlob: bs,
SlicedOffset: 1,
SlicedNumCount: 1,
}
log.Debug("shard follower send search result to leader")
return resp, nil
}
func reduceSearchResultData(searchResultData []*schemapb.SearchResultData, nq int64, topk int64, metricType string) (*schemapb.SearchResultData, error) {
if len(searchResultData) == 0 {
return &schemapb.SearchResultData{
NumQueries: nq,
TopK: topk,
FieldsData: make([]*schemapb.FieldData, 0),
Scores: make([]float32, 0),
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: make([]int64, 0),
},
},
},
Topks: make([]int64, 0),
}, nil
}
ret := &schemapb.SearchResultData{
NumQueries: nq,
TopK: topk,
FieldsData: make([]*schemapb.FieldData, len(searchResultData[0].FieldsData)),
Scores: make([]float32, 0),
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: make([]int64, 0),
},
},
},
Topks: make([]int64, 0),
}
var skipDupCnt int64
var dummyCnt int64
// var realTopK int64 = -1
for i := int64(0); i < nq; i++ {
offsets := make([]int64, len(searchResultData))
var idSet = make(map[int64]struct{})
var j int64
for j = 0; j < topk; {
sel := selectSearchResultData(searchResultData, offsets, topk, i)
if sel == -1 {
break
}
idx := i*topk + offsets[sel]
id := searchResultData[sel].Ids.GetIntId().Data[idx]
score := searchResultData[sel].Scores[idx]
// ignore invalid search result
if id == -1 {
continue
}
// remove duplicates
if _, ok := idSet[id]; !ok {
typeutil.AppendFieldData(ret.FieldsData, searchResultData[sel].FieldsData, idx)
ret.Ids.GetIntId().Data = append(ret.Ids.GetIntId().Data, id)
ret.Scores = append(ret.Scores, score)
idSet[id] = struct{}{}
j++
} else {
// skip entity with same id
skipDupCnt++
}
offsets[sel]++
}
// add empty data
for j < topk {
typeutil.AppendFieldData(ret.FieldsData, searchResultData[0].FieldsData, 0)
ret.Ids.GetIntId().Data = append(ret.Ids.GetIntId().Data, -1)
ret.Scores = append(ret.Scores, -1*float32(math.MaxFloat32))
j++
dummyCnt++
}
// if realTopK != -1 && realTopK != j {
// log.Warn("Proxy Reduce Search Result", zap.Error(errors.New("the length (topk) between all result of query is different")))
// // return nil, errors.New("the length (topk) between all result of query is different")
// }
// realTopK = j
// ret.Topks = append(ret.Topks, realTopK)
}
log.Debug("skip duplicated search result", zap.Int64("count", skipDupCnt))
log.Debug("add dummy data in search result", zap.Int64("count", dummyCnt))
// ret.TopK = realTopK
// if !distance.PositivelyRelated(metricType) {
// for k := range ret.Scores {
// ret.Scores[k] *= -1
// }
// }
return ret, nil
}
func selectSearchResultData(dataArray []*schemapb.SearchResultData, offsets []int64, topk int64, qi int64) int {
sel := -1
maxDistance := -1 * float32(math.MaxFloat32)
for i, offset := range offsets { // query num, the number of ways to merge
if offset >= topk {
continue
}
idx := qi*topk + offset
id := dataArray[i].Ids.GetIntId().Data[idx]
if id != -1 {
distance := dataArray[i].Scores[idx]
if distance > maxDistance {
sel = i
maxDistance = distance
}
}
}
return sel
}
func decodeSearchResults(searchResults []*internalpb.SearchResults) ([]*schemapb.SearchResultData, error) {
results := make([]*schemapb.SearchResultData, 0)
for _, partialSearchResult := range searchResults {
if partialSearchResult.SlicedBlob == nil {
continue
}
var partialResultData schemapb.SearchResultData
err := proto.Unmarshal(partialSearchResult.SlicedBlob, &partialResultData)
if err != nil {
return nil, err
}
results = append(results, &partialResultData)
}
return results, nil
}
func encodeSearchResultData(searchResultData *schemapb.SearchResultData, nq int64, topk int64, metricType string) (searchResults *internalpb.SearchResults, err error) {
searchResults = &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
NumQueries: nq,
TopK: topk,
MetricType: metricType,
SlicedBlob: nil,
}
slicedBlob, err := proto.Marshal(searchResultData)
if err != nil {
return nil, err
}
if searchResultData != nil && searchResultData.Ids != nil && len(searchResultData.Ids.GetIntId().Data) != 0 {
searchResults.SlicedBlob = slicedBlob
}
return
}
func (q *queryShard) query(ctx context.Context, req *querypb.QueryRequest) (*internalpb.RetrieveResults, error) {
collectionID := req.Req.CollectionID
segmentIDs := req.SegmentIDs
partitionIDs := req.Req.PartitionIDs
expr := req.Req.SerializedExprPlan
timestamp := req.Req.TravelTimestamp
// check ctx timeout
if !funcutil.CheckCtxValid(ctx) {
return nil, errors.New("search context timeout")
}
// check if collection has been released
collection, err := q.streaming.replica.getCollectionByID(collectionID)
if err != nil {
return nil, err
}
if req.GetReq().GetGuaranteeTimestamp() >= collection.getReleaseTime() {
log.Warn("collection release before query", zap.Int64("collectionID", collectionID))
return nil, fmt.Errorf("retrieve failed, collection has been released, collectionID = %d", collectionID)
}
// deserialize query plan
plan, err := createRetrievePlanByExpr(collection, expr, timestamp)
if err != nil {
return nil, err
}
defer plan.delete()
// TODO: init vector chunk manager at most once
if q.vectorChunkManager == nil {
if q.localChunkManager == nil {
return nil, fmt.Errorf("can not create vector chunk manager for local chunk manager is nil")
}
if q.remoteChunkManager == nil {
return nil, fmt.Errorf("can not create vector chunk manager for remote chunk manager is nil")
}
q.vectorChunkManager, err = storage.NewVectorChunkManager(q.localChunkManager, q.remoteChunkManager,
&etcdpb.CollectionMeta{
ID: collection.id,
Schema: collection.schema,
}, q.localCacheSize, q.localCacheEnabled)
if err != nil {
return nil, err
}
}
// check if shard leader b.c only leader receives request with no segment specified
if len(req.GetSegmentIDs()) == 0 {
q.streaming.replica.queryRLock()
defer q.streaming.replica.queryRUnlock()
cluster, ok := q.clusterService.getShardCluster(req.GetDmlChannel())
if !ok {
return nil, fmt.Errorf("channel %s leader is not here", req.GetDmlChannel())
}
// add cancel when error occurs
queryCtx, cancel := context.WithCancel(ctx)
defer cancel()
var results []*internalpb.RetrieveResults
var streamingResults []*segcorepb.RetrieveResults
var err error
var mut sync.Mutex
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
// shard leader dispatches request to its shard cluster
cResults, cErr := cluster.Query(queryCtx, req)
mut.Lock()
defer mut.Unlock()
if cErr != nil {
err = cErr
log.Warn("failed to query cluster", zap.Int64("collectionID", q.collectionID), zap.Error(cErr))
cancel()
return
}
results = cResults
}()
go func() {
defer wg.Done()
// hold request until guarantee timestamp >= service timestamp
guaranteeTs := req.GetReq().GetGuaranteeTimestamp()
q.waitUntilServiceable(ctx, guaranteeTs, tsTypeDML)
// shard leader queries its own streaming data
// TODO add context
sResults, _, _, sErr := q.streaming.retrieve(collectionID, partitionIDs, plan, func(segment *Segment) bool { return segment.vChannelID == q.channel })
mut.Lock()
defer mut.Unlock()
if sErr != nil {
err = sErr
log.Warn("failed to query streaming", zap.Int64("collectionID", q.collectionID), zap.Error(err))
cancel()
return
}
streamingResults = sResults
}()
wg.Wait()
if err != nil {
return nil, err
}
streamingResult, err := mergeRetrieveResults(streamingResults)
if err != nil {
return nil, err
}
// complete results with merged streaming result
results = append(results, &internalpb.RetrieveResults{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Ids: streamingResult.Ids,
FieldsData: streamingResult.FieldsData,
})
// merge shard query results
mergedResults, err := mergeInternalRetrieveResults(results)
if err != nil {
return nil, err
}
log.Debug("leader retrieve result", zap.String("channel", req.DmlChannel), zap.String("ids", mergedResults.Ids.String()))
return mergedResults, nil
}
q.historical.replica.queryRLock()
defer q.historical.replica.queryRUnlock()
// hold request until guarantee timestamp >= service timestamp
guaranteeTs := req.GetReq().GetGuaranteeTimestamp()
q.waitUntilServiceable(ctx, guaranteeTs, tsTypeDelta)
// shard follower considers solely historical segments
retrieveResults, err := q.historical.retrieveBySegmentIDs(collectionID, segmentIDs, q.vectorChunkManager, plan)
if err != nil {
return nil, err
}
mergedResult, err := mergeRetrieveResults(retrieveResults)
if err != nil {
return nil, err
}
log.Debug("follower retrieve result", zap.String("ids", mergedResult.Ids.String()))
RetrieveResults := &internalpb.RetrieveResults{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Ids: mergedResult.Ids,
FieldsData: mergedResult.FieldsData,
}
return RetrieveResults, nil
}
// TODO: largely based on function mergeRetrieveResults, need rewriting
func mergeInternalRetrieveResults(retrieveResults []*internalpb.RetrieveResults) (*internalpb.RetrieveResults, error) {
var ret *internalpb.RetrieveResults
var skipDupCnt int64
var idSet = make(map[int64]struct{})
// merge results and remove duplicates
for _, rr := range retrieveResults {
// skip if fields data is empty
if len(rr.FieldsData) == 0 {
continue
}
if ret == nil {
ret = &internalpb.RetrieveResults{
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: []int64{},
},
},
},
FieldsData: make([]*schemapb.FieldData, len(rr.FieldsData)),
}
}
if len(ret.FieldsData) != len(rr.FieldsData) {
log.Warn("mismatch FieldData in RetrieveResults")
return nil, fmt.Errorf("mismatch FieldData in RetrieveResults")
}
dstIds := ret.Ids.GetIntId()
for i, id := range rr.Ids.GetIntId().GetData() {
if _, ok := idSet[id]; !ok {
dstIds.Data = append(dstIds.Data, id)
typeutil.AppendFieldData(ret.FieldsData, rr.FieldsData, int64(i))
idSet[id] = struct{}{}
} else {
// primary keys duplicate
skipDupCnt++
}
}
}
// not found, return default values indicating not result found
if ret == nil {
ret = &internalpb.RetrieveResults{
Ids: &schemapb.IDs{},
FieldsData: []*schemapb.FieldData{},
}
}
return ret, nil
}
// func printSearchResultData(data *schemapb.SearchResultData, header string) {
// size := len(data.Ids.GetIntId().Data)
// if size != len(data.Scores) {
// log.Error("SearchResultData length mis-match")
// }
// log.Debug("==== SearchResultData ====",
// zap.String("header", header), zap.Int64("nq", data.NumQueries), zap.Int64("topk", data.TopK))
// for i := 0; i < size; i++ {
// log.Debug("", zap.Int("i", i), zap.Int64("id", data.Ids.GetIntId().Data[i]), zap.Float32("score", data.Scores[i]))
// }
// }

View File

@ -20,7 +20,11 @@ import (
"context"
"errors"
"fmt"
"strconv"
"sync"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/dependency"
)
type queryShardService struct {
@ -29,19 +33,54 @@ type queryShardService struct {
queryShardsMu sync.Mutex // guards queryShards
queryShards map[Channel]*queryShard // Virtual Channel -> *queryShard
queryChannelMu sync.Mutex // guards queryChannels
queryChannels map[int64]*queryChannel // Collection ID -> query channel
factory dependency.Factory
historical *historical
streaming *streaming
shardClusterService *ShardClusterService
localChunkManager storage.ChunkManager
remoteChunkManager storage.ChunkManager
localCacheEnabled bool
}
func newQueryShardService(ctx context.Context) *queryShardService {
func newQueryShardService(ctx context.Context, historical *historical, streaming *streaming, clusterService *ShardClusterService, factory dependency.Factory) *queryShardService {
queryShardServiceCtx, queryShardServiceCancel := context.WithCancel(ctx)
path := Params.LoadWithDefault("localStorage.Path", "/tmp/milvus/data")
enabled, _ := Params.Load("localStorage.enabled")
localCacheEnabled, _ := strconv.ParseBool(enabled)
localChunkManager := storage.NewLocalChunkManager(storage.RootPath(path))
remoteChunkManager, _ := storage.NewMinioChunkManager(
ctx,
storage.Address(Params.MinioCfg.Address),
storage.AccessKeyID(Params.MinioCfg.AccessKeyID),
storage.SecretAccessKeyID(Params.MinioCfg.SecretAccessKey),
storage.UseSSL(Params.MinioCfg.UseSSL),
storage.BucketName(Params.MinioCfg.BucketName),
storage.CreateBucket(true))
qss := &queryShardService{
ctx: queryShardServiceCtx,
cancel: queryShardServiceCancel,
queryShards: make(map[Channel]*queryShard),
ctx: queryShardServiceCtx,
cancel: queryShardServiceCancel,
queryShards: make(map[Channel]*queryShard),
queryChannels: make(map[int64]*queryChannel),
historical: historical,
streaming: streaming,
shardClusterService: clusterService,
localChunkManager: localChunkManager,
remoteChunkManager: remoteChunkManager,
localCacheEnabled: localCacheEnabled,
factory: factory,
}
return qss
}
func (q *queryShardService) addQueryShard(collectionID UniqueID, channel Channel) error {
func (q *queryShardService) addQueryShard(collectionID UniqueID, channel Channel, replicaID int64) error {
q.queryShardsMu.Lock()
defer q.queryShardsMu.Unlock()
if _, ok := q.queryShards[channel]; ok {
@ -51,6 +90,13 @@ func (q *queryShardService) addQueryShard(collectionID UniqueID, channel Channel
q.ctx,
collectionID,
channel,
replicaID,
q.shardClusterService,
q.historical,
q.streaming,
q.localChunkManager,
q.remoteChunkManager,
q.localCacheEnabled,
)
q.queryShards[channel] = qs
return nil
@ -81,3 +127,50 @@ func (q *queryShardService) getQueryShard(channel Channel) (*queryShard, error)
}
return q.queryShards[channel], nil
}
func (q *queryShardService) close() {
q.cancel()
q.queryShardsMu.Lock()
defer q.queryShardsMu.Unlock()
for _, queryShard := range q.queryShards {
queryShard.Close()
}
}
func (q *queryShardService) getQueryChannel(collectionID int64) *queryChannel {
q.queryChannelMu.Lock()
defer q.queryChannelMu.Unlock()
qc, ok := q.queryChannels[collectionID]
if !ok {
queryStream, _ := q.factory.NewQueryMsgStream(q.ctx)
qc = &queryChannel{
closeCh: make(chan struct{}),
collectionID: collectionID,
queryMsgStream: queryStream,
streaming: q.streaming,
}
q.queryChannels[collectionID] = qc
}
return qc
}
func (q *queryShardService) releaseCollection(collectionID int64) {
q.queryChannelMu.Lock()
qc, ok := q.queryChannels[collectionID]
if ok && qc != nil {
qc.Stop()
}
q.queryChannelMu.Unlock()
q.queryShardsMu.Lock()
for _, queryShard := range q.queryShards {
if queryShard.collectionID == collectionID {
queryShard.Close()
delete(q.queryShards, queryShard.channel)
}
}
q.queryShardsMu.Unlock()
}

View File

@ -21,11 +21,15 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestQueryShardService(t *testing.T) {
qss := newQueryShardService(context.Background())
err := qss.addQueryShard(0, "vchan1")
qn, err := genSimpleQueryNode(context.Background())
require.NoError(t, err)
qss := newQueryShardService(context.Background(), qn.historical, qn.streaming, qn.ShardClusterService, qn.factory)
err = qss.addQueryShard(0, "vchan1", 0)
assert.NoError(t, err)
found1 := qss.hasQueryShard("vchan1")
assert.Equal(t, true, found1)

View File

@ -18,21 +18,263 @@ package querynode
import (
"context"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
func genSimpleQueryShard(ctx context.Context) (*queryShard, error) {
tSafe := newTSafeReplica()
historical, err := genSimpleHistorical(ctx, tSafe)
if err != nil {
return nil, err
}
streaming, err := genSimpleStreaming(ctx, tSafe)
if err != nil {
return nil, err
}
localCM, err := genLocalChunkManager()
if err != nil {
return nil, err
}
remoteCM, err := genRemoteChunkManager(ctx)
if err != nil {
return nil, err
}
shardCluster := NewShardCluster(defaultCollectionID, defaultReplicaID, defaultDMLChannel,
&mockNodeDetector{}, &mockSegmentDetector{}, buildMockQueryNode)
shardClusterService := &ShardClusterService{
clusters: sync.Map{},
}
shardClusterService.clusters.Store(defaultDMLChannel, shardCluster)
qs := newQueryShard(ctx, defaultCollectionID, defaultDMLChannel, defaultReplicaID, shardClusterService,
historical, streaming, localCM, remoteCM, false)
qs.deltaChannel = defaultDeltaChannel
err = qs.watchDMLTSafe()
if err != nil {
return nil, err
}
err = qs.watchDeltaTSafe()
if err != nil {
return nil, err
}
return qs, nil
}
func updateQueryShardTSafe(qs *queryShard, timestamp Timestamp) error {
err := qs.streaming.tSafeReplica.setTSafe(defaultDMLChannel, timestamp)
if err != nil {
return err
}
return qs.historical.tSafeReplica.setTSafe(defaultDeltaChannel, timestamp)
}
func TestQueryShard_Close(t *testing.T) {
qs, err := genSimpleQueryShard(context.Background())
assert.NoError(t, err)
qs.Close()
}
func TestQueryShard_Search(t *testing.T) {
qs := newQueryShard(context.Background(), 0, "vchan1")
_, err := qs.search(context.Background(), &querypb.SearchRequest{})
assert.Error(t, err)
qs, err := genSimpleQueryShard(context.Background())
assert.NoError(t, err)
req, err := genSimpleSearchRequest(IndexFaissIDMap)
assert.NoError(t, err)
t.Run("search follower", func(t *testing.T) {
request := &querypb.SearchRequest{
Req: req,
DmlChannel: "",
SegmentIDs: []int64{defaultSegmentID},
}
_, err = qs.search(context.Background(), request)
assert.NoError(t, err)
})
t.Run("search leader", func(t *testing.T) {
request := &querypb.SearchRequest{
Req: req,
DmlChannel: defaultDMLChannel,
SegmentIDs: []int64{},
}
_, err = qs.search(context.Background(), request)
assert.NoError(t, err)
})
}
func TestQueryShard_Query(t *testing.T) {
qs := newQueryShard(context.Background(), 0, "vchan1")
_, err := qs.query(context.Background(), &querypb.QueryRequest{})
assert.Error(t, err)
qs, err := genSimpleQueryShard(context.Background())
assert.NoError(t, err)
req, err := genSimpleRetrieveRequest()
assert.NoError(t, err)
t.Run("query follower", func(t *testing.T) {
request := &querypb.QueryRequest{
Req: req,
DmlChannel: "",
SegmentIDs: []int64{defaultSegmentID},
}
resp, err := qs.query(context.Background(), request)
assert.NoError(t, err)
assert.ElementsMatch(t, resp.Ids.GetIntId().Data, []int64{1, 2, 3})
})
t.Run("query leader", func(t *testing.T) {
request := &querypb.QueryRequest{
Req: req,
DmlChannel: defaultDMLChannel,
SegmentIDs: []int64{},
}
_, err := qs.query(context.Background(), request)
assert.NoError(t, err)
})
}
func TestQueryShard_waitNewTSafe(t *testing.T) {
qs, err := genSimpleQueryShard(context.Background())
assert.NoError(t, err)
timestamp := Timestamp(1000)
err = updateQueryShardTSafe(qs, timestamp)
assert.NoError(t, err)
dmlTimestamp, err := qs.getNewTSafe(tsTypeDML)
assert.NoError(t, err)
assert.Equal(t, timestamp, dmlTimestamp)
deltaTimestamp, err := qs.getNewTSafe(tsTypeDelta)
assert.NoError(t, err)
assert.Equal(t, timestamp, deltaTimestamp)
}
func TestQueryShard_WaitUntilServiceable(t *testing.T) {
qs, err := genSimpleQueryShard(context.Background())
assert.NoError(t, err)
err = updateQueryShardTSafe(qs, 1000)
assert.NoError(t, err)
qs.waitUntilServiceable(context.Background(), 1000, tsTypeDML)
}
func genSearchResultData(nq int64, topk int64, ids []int64, scores []float32) *schemapb.SearchResultData {
return &schemapb.SearchResultData{
NumQueries: nq,
TopK: topk,
FieldsData: nil,
Scores: scores,
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: ids,
},
},
},
Topks: make([]int64, nq),
}
}
func TestReduceSearchResultData(t *testing.T) {
const (
nq = 1
topk = 4
metricType = "L2"
)
t.Run("case1", func(t *testing.T) {
ids := []int64{1, 2, 3, 4}
scores := []float32{-1.0, -2.0, -3.0, -4.0}
data1 := genSearchResultData(nq, topk, ids, scores)
data2 := genSearchResultData(nq, topk, ids, scores)
dataArray := make([]*schemapb.SearchResultData, 0)
dataArray = append(dataArray, data1)
dataArray = append(dataArray, data2)
res, err := reduceSearchResultData(dataArray, nq, topk, metricType)
assert.Nil(t, err)
assert.Equal(t, ids, res.Ids.GetIntId().Data)
assert.Equal(t, scores, res.Scores)
})
t.Run("case2", func(t *testing.T) {
ids1 := []int64{1, 2, 3, 4}
scores1 := []float32{-1.0, -2.0, -3.0, -4.0}
ids2 := []int64{5, 1, 3, 4}
scores2 := []float32{-1.0, -1.0, -3.0, -4.0}
data1 := genSearchResultData(nq, topk, ids1, scores1)
data2 := genSearchResultData(nq, topk, ids2, scores2)
dataArray := make([]*schemapb.SearchResultData, 0)
dataArray = append(dataArray, data1)
dataArray = append(dataArray, data2)
res, err := reduceSearchResultData(dataArray, nq, topk, metricType)
assert.Nil(t, err)
assert.ElementsMatch(t, []int64{1, 5, 2, 3}, res.Ids.GetIntId().Data)
})
}
func TestMergeInternalRetrieveResults(t *testing.T) {
const (
Dim = 8
Int64FieldName = "Int64Field"
FloatVectorFieldName = "FloatVectorField"
Int64FieldID = common.StartOfUserFieldID + 1
FloatVectorFieldID = common.StartOfUserFieldID + 2
)
Int64Array := []int64{11, 22}
FloatVector := []float32{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 11.0, 22.0, 33.0, 44.0, 55.0, 66.0, 77.0, 88.0}
var fieldDataArray1 []*schemapb.FieldData
fieldDataArray1 = append(fieldDataArray1, genFieldData(Int64FieldName, Int64FieldID, schemapb.DataType_Int64, Int64Array[0:2], 1))
fieldDataArray1 = append(fieldDataArray1, genFieldData(FloatVectorFieldName, FloatVectorFieldID, schemapb.DataType_FloatVector, FloatVector[0:16], Dim))
var fieldDataArray2 []*schemapb.FieldData
fieldDataArray2 = append(fieldDataArray2, genFieldData(Int64FieldName, Int64FieldID, schemapb.DataType_Int64, Int64Array[0:2], 1))
fieldDataArray2 = append(fieldDataArray2, genFieldData(FloatVectorFieldName, FloatVectorFieldID, schemapb.DataType_FloatVector, FloatVector[0:16], Dim))
result1 := &internalpb.RetrieveResults{
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: []int64{0, 1},
},
},
},
// Offset: []int64{0, 1},
FieldsData: fieldDataArray1,
}
result2 := &internalpb.RetrieveResults{
Ids: &schemapb.IDs{
IdField: &schemapb.IDs_IntId{
IntId: &schemapb.LongArray{
Data: []int64{0, 1},
},
},
},
// Offset: []int64{0, 1},
FieldsData: fieldDataArray2,
}
result, err := mergeInternalRetrieveResults([]*internalpb.RetrieveResults{result1, result2})
assert.NoError(t, err)
assert.Equal(t, 2, len(result.FieldsData[0].GetScalars().GetLongData().Data))
assert.Equal(t, 2*Dim, len(result.FieldsData[1].GetVectors().GetFloatVector().Data))
_, err = mergeInternalRetrieveResults(nil)
assert.NoError(t, err)
}

View File

@ -68,16 +68,17 @@ type nodeEvent struct {
}
type segmentEvent struct {
eventType segmentEventType
segmentID int64
nodeID int64
state segmentState
eventType segmentEventType
segmentID int64
partitionID int64
nodeID int64
state segmentState
}
type shardQueryNode interface {
Search(context.Context, *querypb.SearchRequest) (*internalpb.SearchResults, error)
Query(context.Context, *querypb.QueryRequest) (*internalpb.RetrieveResults, error)
Stop()
Stop() error
}
type shardNode struct {
@ -87,9 +88,10 @@ type shardNode struct {
}
type shardSegmentInfo struct {
segmentID int64
nodeID int64
state segmentState
segmentID int64
partitionID int64
nodeID int64
state segmentState
}
// ShardNodeDetector provides method to detect node events
@ -159,6 +161,7 @@ func (sc *ShardCluster) Close() {
// addNode add a node into cluster
func (sc *ShardCluster) addNode(evt nodeEvent) {
log.Debug("ShardCluster add node", zap.Int64("nodeID", evt.nodeID))
sc.mut.Lock()
defer sc.mut.Unlock()
@ -202,15 +205,18 @@ func (sc *ShardCluster) removeNode(evt nodeEvent) {
// updateSegment apply segment change to shard cluster
func (sc *ShardCluster) updateSegment(evt segmentEvent) {
log.Debug("ShardCluster update segment", zap.Int64("nodeID", evt.nodeID), zap.Int64("segmentID", evt.segmentID), zap.Int32("state", int32(evt.state)))
sc.mut.Lock()
defer sc.mut.Unlock()
old, ok := sc.segments[evt.segmentID]
if !ok { // newly add
sc.segments[evt.segmentID] = &shardSegmentInfo{
nodeID: evt.nodeID,
segmentID: evt.segmentID,
state: evt.state,
nodeID: evt.nodeID,
partitionID: evt.partitionID,
segmentID: evt.segmentID,
state: evt.state,
}
return
}
@ -302,6 +308,7 @@ func (sc *ShardCluster) watchNodes(evtCh <-chan nodeEvent) {
for {
select {
case evt, ok := <-evtCh:
log.Debug("node event", zap.Any("evt", evt))
if !ok {
log.Warn("ShardCluster node channel closed", zap.Int64("collectionID", sc.collectionID), zap.Int64("replicaID", sc.replicaID))
return
@ -324,6 +331,7 @@ func (sc *ShardCluster) watchSegments(evtCh <-chan segmentEvent) {
for {
select {
case evt, ok := <-evtCh:
log.Debug("segment event", zap.Any("evt", evt))
if !ok {
log.Warn("ShardCluster segment channel closed", zap.Int64("collectionID", sc.collectionID), zap.Int64("replicaID", sc.replicaID))
return
@ -365,19 +373,23 @@ func (sc *ShardCluster) getSegment(segmentID int64) (*shardSegmentInfo, bool) {
return nil, false
}
return &shardSegmentInfo{
segmentID: segment.segmentID,
nodeID: segment.nodeID,
state: segment.state,
segmentID: segment.segmentID,
nodeID: segment.nodeID,
partitionID: segment.partitionID,
state: segment.state,
}, true
}
// segmentAllocations returns node to segments mappings.
func (sc *ShardCluster) segmentAllocations() map[int64][]int64 {
func (sc *ShardCluster) segmentAllocations(partitionIDs []int64) map[int64][]int64 {
result := make(map[int64][]int64) // nodeID => segmentIDs
sc.mut.RLock()
defer sc.mut.RUnlock()
for _, segment := range sc.segments {
if len(partitionIDs) > 0 && !inList(partitionIDs, segment.partitionID) {
continue
}
result[segment.nodeID] = append(result[segment.nodeID], segment.segmentID)
}
return result
@ -386,16 +398,22 @@ func (sc *ShardCluster) segmentAllocations() map[int64][]int64 {
// Search preforms search operation on shard cluster.
func (sc *ShardCluster) Search(ctx context.Context, req *querypb.SearchRequest) ([]*internalpb.SearchResults, error) {
if sc.state.Load() != int32(available) {
return nil, fmt.Errorf("SharcCluster for %s replicaID %d is no available", sc.vchannelName, sc.replicaID)
return nil, fmt.Errorf("ShardCluster for %s replicaID %d is no available", sc.vchannelName, sc.replicaID)
}
if sc.vchannelName != req.GetDmlChannel() {
return nil, fmt.Errorf("ShardCluster for %s does not match to request channel :%s", sc.vchannelName, req.GetDmlChannel())
}
// get node allocation
segAllocs := sc.segmentAllocations()
//req.GetReq().GetPartitionIDs()
// get node allocation
segAllocs := sc.segmentAllocations(req.GetReq().GetPartitionIDs())
log.Debug("cluster segment distribution", zap.Int("len", len(segAllocs)))
for nodeID, segmentIDs := range segAllocs {
log.Debug("segments distribution", zap.Int64("nodeID", nodeID), zap.Int64s("segments", segmentIDs))
}
// TODO dispatch to local queryShardService query dml channel growing segments
// concurrent visiting nodes
@ -409,7 +427,6 @@ func (sc *ShardCluster) Search(ctx context.Context, req *querypb.SearchRequest)
for nodeID, segments := range segAllocs {
nodeReq := proto.Clone(req).(*querypb.SearchRequest)
nodeReq.DmlChannel = ""
nodeReq.SegmentIDs = segments
node, ok := sc.getNode(nodeID)
if !ok { // meta dismatch, report error
@ -441,7 +458,7 @@ func (sc *ShardCluster) Search(ctx context.Context, req *querypb.SearchRequest)
// Query performs query operation on shard cluster.
func (sc *ShardCluster) Query(ctx context.Context, req *querypb.QueryRequest) ([]*internalpb.RetrieveResults, error) {
if sc.state.Load() != int32(available) {
return nil, fmt.Errorf("SharcCluster for %s replicaID %d is no available", sc.vchannelName, sc.replicaID)
return nil, fmt.Errorf("ShardCluster for %s replicaID %d is no available", sc.vchannelName, sc.replicaID)
}
// handles only the dml channel part, segment ids is dispatch by cluster itself
@ -450,7 +467,7 @@ func (sc *ShardCluster) Query(ctx context.Context, req *querypb.QueryRequest) ([
}
// get node allocation
segAllocs := sc.segmentAllocations()
segAllocs := sc.segmentAllocations(req.GetReq().GetPartitionIDs())
// TODO dispatch to local queryShardService query dml channel growing segments
@ -465,7 +482,6 @@ func (sc *ShardCluster) Query(ctx context.Context, req *querypb.QueryRequest) ([
for nodeID, segments := range segAllocs {
nodeReq := proto.Clone(req).(*querypb.QueryRequest)
nodeReq.DmlChannel = ""
nodeReq.SegmentIDs = segments
node, ok := sc.getNode(nodeID)
if !ok { // meta dismatch, report error

View File

@ -0,0 +1,109 @@
package querynode
import (
"context"
"fmt"
"path"
"strconv"
"sync"
grpcquerynodeclient "github.com/milvus-io/milvus/internal/distributed/querynode/client"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
clientv3 "go.etcd.io/etcd/client/v3"
)
const (
ReplicaMetaPrefix = "queryCoord-ReplicaMeta"
)
// shardQueryNodeWrapper wraps a querynode to shardQueryNode and preventing it been closed
type shardQueryNodeWrapper struct {
*QueryNode
}
// Stop overrides default close method
func (w *shardQueryNodeWrapper) Stop() error { return nil }
// ShardClusterService maintains the online ShardCluster(leader) in this querynode.
type ShardClusterService struct {
client *clientv3.Client // etcd client for detectors
session *sessionutil.Session
node *QueryNode
clusters sync.Map // channel name => *shardCluster
}
// newShardClusterService returns a new shardClusterService
func newShardClusterService(client *clientv3.Client, session *sessionutil.Session, node *QueryNode) *ShardClusterService {
return &ShardClusterService{
node: node,
session: session,
client: client,
clusters: sync.Map{},
}
}
// addShardCluster adds shardCluster into service.
func (s *ShardClusterService) addShardCluster(collectionID, replicaID int64, vchannelName string) {
nodeDetector := NewEtcdShardNodeDetector(s.client, path.Join(Params.EtcdCfg.MetaRootPath, ReplicaMetaPrefix),
func() (map[int64]string, error) {
result := make(map[int64]string)
sessions, _, err := s.session.GetSessions(typeutil.QueryNodeRole)
if err != nil {
return nil, err
}
for _, session := range sessions {
result[session.ServerID] = session.Address
}
return result, nil
})
segmentDetector := NewEtcdShardSegmentDetector(s.client, path.Join(Params.EtcdCfg.MetaRootPath, util.SegmentMetaPrefix, strconv.FormatInt(collectionID, 10)))
cs := NewShardCluster(collectionID, replicaID, vchannelName, nodeDetector, segmentDetector,
func(nodeID int64, addr string) shardQueryNode {
if nodeID == s.session.ServerID {
// wrap node itself
return &shardQueryNodeWrapper{QueryNode: s.node}
}
ctx := context.Background()
qn, _ := grpcquerynodeclient.NewClient(ctx, addr)
return qn
})
s.clusters.Store(vchannelName, cs)
}
// getShardCluster gets shardCluster of specified vchannel if exists.
func (s *ShardClusterService) getShardCluster(vchannelName string) (*ShardCluster, bool) {
raw, ok := s.clusters.Load(vchannelName)
if !ok {
return nil, false
}
return raw.(*ShardCluster), true
}
// releaseShardCluster removes shardCluster from service and stops it.
func (s *ShardClusterService) releaseShardCluster(vchannelName string) error {
raw, ok := s.clusters.LoadAndDelete(vchannelName)
if !ok {
return fmt.Errorf("ShardCluster of channel: %s does not exists", vchannelName)
}
cs := raw.(*ShardCluster)
cs.Close()
return nil
}
// releaseCollection removes all shardCluster matching specified collectionID
func (s *ShardClusterService) releaseCollection(collectionID int64) {
s.clusters.Range(func(k, v interface{}) bool {
cs := v.(*ShardCluster)
if cs.collectionID == collectionID {
s.releaseShardCluster(k.(string))
}
return true
})
}

View File

@ -0,0 +1,34 @@
package querynode
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
)
func TestShardClusterService(t *testing.T) {
client := v3client.New(embedetcdServer.Server)
defer client.Close()
session := sessionutil.NewSession(context.Background(), "/by-dev/sessions/unittest/querynode/", client)
clusterService := newShardClusterService(client, session, nil)
assert.NotPanics(t, func() {
clusterService.addShardCluster(defaultCollectionID, defaultReplicaID, defaultDMLChannel)
})
shardCluster, ok := clusterService.getShardCluster(defaultDMLChannel)
assert.True(t, ok)
assert.NotNil(t, shardCluster)
_, ok = clusterService.getShardCluster("non-exist-channel")
assert.False(t, ok)
err := clusterService.releaseShardCluster(defaultDMLChannel)
assert.NoError(t, err)
err = clusterService.releaseShardCluster("non-exist-channel")
assert.Error(t, err)
}

View File

@ -61,7 +61,8 @@ func (m *mockShardQueryNode) Query(_ context.Context, _ *querypb.QueryRequest) (
return m.queryResult, m.queryErr
}
func (m *mockShardQueryNode) Stop() {
func (m *mockShardQueryNode) Stop() error {
return nil
}
func buildMockQueryNode(nodeID int64, addr string) shardQueryNode {

View File

@ -65,6 +65,7 @@ func (nd *etcdShardNodeDetector) Close() {
// watchNodes lists current online nodes and returns a channel for incoming events.
func (nd *etcdShardNodeDetector) watchNodes(collectionID int64, replicaID int64, vchannelName string) ([]nodeEvent, <-chan nodeEvent) {
log.Debug("nodeDetector watch", zap.Int64("collectionID", collectionID), zap.Int64("replicaID", replicaID), zap.String("vchannelName", vchannelName))
resp, err := nd.client.Get(context.Background(), nd.path, clientv3.WithPrefix())
if err != nil {
log.Warn("Etcd NodeDetector get replica info failed", zap.Error(err))
@ -264,5 +265,8 @@ func (nd *etcdShardNodeDetector) handleDelEvent(e *clientv3.Event, collectionID,
func (nd *etcdShardNodeDetector) parseReplicaInfo(bs []byte) (*milvuspb.ReplicaInfo, error) {
info := &milvuspb.ReplicaInfo{}
err := proto.Unmarshal(bs, info)
if err == nil {
log.Debug("ReplicaInfo", zap.Any("info", info))
}
return info, err
}

View File

@ -19,9 +19,6 @@ package querynode
import (
"context"
"fmt"
"io/ioutil"
"net/url"
"os"
"path"
"strconv"
"testing"
@ -33,41 +30,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
)
func startEmbedEtcdServer() (*embed.Etcd, error) {
dir, err := ioutil.TempDir(os.TempDir(), "milvus_ut")
if err != nil {
return nil, err
}
defer os.RemoveAll(dir)
config := embed.NewConfig()
config.Dir = os.TempDir()
config.LogLevel = "warn"
config.LogOutputs = []string{"default"}
u, err := url.Parse("http://localhost:2389")
if err != nil {
return nil, err
}
config.LCUrls = []url.URL{*u}
u, err = url.Parse("http://localhost:2390")
if err != nil {
return nil, err
}
config.LPUrls = []url.URL{*u}
return embed.StartEtcd(config)
}
func TestEtcdShardNodeDetector_watch(t *testing.T) {
etcdServer, err := startEmbedEtcdServer()
require.NoError(t, err)
defer etcdServer.Close()
client := v3client.New(etcdServer.Server)
client := v3client.New(embedetcdServer.Server)
defer client.Close()
type testCase struct {

View File

@ -0,0 +1,209 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querynode
import (
"context"
"sync"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/querypb"
"go.etcd.io/etcd/api/v3/mvccpb"
v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
)
// etcdShardSegmentDetector watch etcd prefix for segment event.
type etcdShardSegmentDetector struct {
client *clientv3.Client
path string
evtCh chan segmentEvent
wg sync.WaitGroup
closeCh chan struct{}
closeOnce sync.Once
}
// NewEtcdShardSegmentDetector returns a segmentDetector with provided etcd client and root path.
func NewEtcdShardSegmentDetector(client *clientv3.Client, rootPath string) *etcdShardSegmentDetector {
return &etcdShardSegmentDetector{
client: client,
path: rootPath,
evtCh: make(chan segmentEvent, 32),
closeCh: make(chan struct{}),
}
}
// Close perform closing procedure and notify all watcher to quit.
func (sd *etcdShardSegmentDetector) Close() {
sd.closeOnce.Do(func() {
close(sd.closeCh)
sd.wg.Wait()
close(sd.evtCh)
})
}
func (sd *etcdShardSegmentDetector) afterClose(fn func()) {
<-sd.closeCh
fn()
}
func (sd *etcdShardSegmentDetector) getCtx() context.Context {
ctx, cancel := context.WithCancel(context.Background())
go sd.afterClose(cancel)
return ctx
}
func (sd *etcdShardSegmentDetector) watchSegments(collectionID int64, replicaID int64, vchannelName string) ([]segmentEvent, <-chan segmentEvent) {
log.Debug("segmentDetector start watch", zap.Int64("collectionID", collectionID),
zap.Int64("replicaID", replicaID),
zap.String("vchannelName", vchannelName),
zap.String("rootPath", sd.path))
resp, err := sd.client.Get(context.Background(), sd.path, clientv3.WithPrefix())
if err != nil {
log.Warn("Etcd SegmentDetector get replica info failed", zap.Error(err))
panic(err)
}
var events []segmentEvent
for _, kv := range resp.Kvs {
info, err := sd.parseSegmentInfo(kv.Value)
if err != nil {
log.Warn("SegmentDetector failed to parse segmentInfo", zap.Error(err))
continue
}
if info.CollectionID != collectionID || info.GetDmChannel() != vchannelName {
continue
}
if inList(info.GetReplicaIds(), replicaID) {
events = append(events, segmentEvent{
eventType: segmentAdd,
segmentID: info.GetSegmentID(),
partitionID: info.GetPartitionID(),
nodeID: info.GetNodeID(),
state: segmentStateLoaded,
})
}
}
sd.wg.Add(1)
watchCh := sd.client.Watch(sd.getCtx(), sd.path, clientv3.WithRev(resp.Header.GetRevision()+1), clientv3.WithPrefix(), clientv3.WithPrevKV())
go sd.watch(watchCh, collectionID, replicaID, vchannelName)
return events, sd.evtCh
}
func (sd *etcdShardSegmentDetector) watch(ch clientv3.WatchChan, collectionID int64, replicaID int64, vchannel string) {
defer sd.wg.Done()
log.Debug("etcdSegmentDetector start watch")
for {
select {
case <-sd.closeCh:
log.Warn("Closed SegmentDetector watch loop quit", zap.Int64("collectionID", collectionID), zap.Int64("replicaID", replicaID))
return
case evt, ok := <-ch:
if !ok {
log.Warn("SegmentDetector event channel closed")
return
}
if err := evt.Err(); err != nil {
if err == v3rpc.ErrCompacted {
sd.wg.Add(1)
watchCh := sd.client.Watch(sd.getCtx(), sd.path, clientv3.WithPrefix())
go sd.watch(watchCh, collectionID, replicaID, vchannel)
return
}
}
for _, e := range evt.Events {
log.Debug("segment evt", zap.Any("evt", evt))
switch e.Type {
case mvccpb.PUT:
sd.handlePutEvent(e, collectionID, replicaID, vchannel)
case mvccpb.DELETE:
sd.handleDelEvent(e, collectionID, replicaID, vchannel)
}
}
}
}
}
func (sd *etcdShardSegmentDetector) handlePutEvent(e *clientv3.Event, collectionID int64, replicaID int64, vchannel string) {
info, err := sd.parseSegmentInfo(e.Kv.Value)
if err != nil {
log.Warn("Segment detector failed to parse event", zap.Any("event", e), zap.Error(err))
return
}
if info.GetCollectionID() != collectionID || vchannel != info.GetDmChannel() || !inList(info.GetReplicaIds(), replicaID) {
// ignore not match events
return
}
sd.evtCh <- segmentEvent{
eventType: segmentAdd,
segmentID: info.GetSegmentID(),
partitionID: info.GetPartitionID(),
nodeID: info.GetNodeID(),
state: segmentStateLoaded,
}
}
func (sd *etcdShardSegmentDetector) handleDelEvent(e *clientv3.Event, collectionID int64, replicaID int64, vchannel string) {
if e.PrevKv == nil {
return
}
info, err := sd.parseSegmentInfo(e.PrevKv.Value)
if err != nil {
log.Warn("SegmentDetector failed to parse delete event", zap.Any("event", e), zap.Error(err))
return
}
if info.GetCollectionID() != collectionID || vchannel != info.GetDmChannel() || !inList(info.GetReplicaIds(), replicaID) {
// ignore not match events
return
}
sd.evtCh <- segmentEvent{
eventType: segmentDel,
segmentID: info.GetSegmentID(),
partitionID: info.GetPartitionID(),
nodeID: info.GetNodeID(),
state: segmentStateOffline,
}
}
// TODO maybe should use other proto
func (sd *etcdShardSegmentDetector) parseSegmentInfo(bs []byte) (*querypb.SegmentInfo, error) {
info := &querypb.SegmentInfo{}
err := proto.Unmarshal(bs, info)
if err == nil {
log.Debug("segment info", zap.Any("segmentInfo", info))
}
return info, err
}
func inList(list []int64, target int64) bool {
for _, i := range list {
if i == target {
return true
}
}
return false
}

View File

@ -0,0 +1,298 @@
package querynode
import (
"context"
"fmt"
"path"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
)
func TestEtcdShardSegmentDetector_watch(t *testing.T) {
client := v3client.New(embedetcdServer.Server)
defer client.Close()
type testCase struct {
name string
oldRecords map[string]*querypb.SegmentInfo
updateRecords map[string]*querypb.SegmentInfo
delRecords []string
expectInitEvents []segmentEvent
expectUpdateEvents []segmentEvent
oldGarbage map[string]string
newGarbage map[string]string
collectionID int64
replicaID int64
channel string
}
cases := []testCase{
{
name: "normal init",
collectionID: 1,
replicaID: 1,
channel: "dml_1_1_v0",
oldRecords: map[string]*querypb.SegmentInfo{
"segment_1": {
CollectionID: 1,
SegmentID: 1,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{1, 2},
},
},
oldGarbage: map[string]string{
"noice1": string([]byte{23, 21, 20}),
},
expectInitEvents: []segmentEvent{
{
eventType: segmentAdd,
segmentID: 1,
nodeID: 1,
state: segmentStateLoaded,
},
},
},
{
name: "normal init with other segments",
collectionID: 1,
replicaID: 1,
channel: "dml_1_1_v0",
oldRecords: map[string]*querypb.SegmentInfo{
"segment_1": {
CollectionID: 1,
SegmentID: 1,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{1, 2},
},
"segment_2": {
CollectionID: 1,
SegmentID: 2,
NodeID: 1,
DmChannel: "dml_1_1_v1",
ReplicaIds: []int64{1, 2},
},
"segment_3": {
CollectionID: 2,
SegmentID: 3,
NodeID: 2,
DmChannel: "dml_3_2_v0",
ReplicaIds: []int64{1, 2},
},
"segment_4": { // may not happen
CollectionID: 1,
SegmentID: 4,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{2},
},
},
expectInitEvents: []segmentEvent{
{
eventType: segmentAdd,
segmentID: 1,
nodeID: 1,
state: segmentStateLoaded,
},
},
},
{
name: "normal add segment",
collectionID: 1,
replicaID: 1,
channel: "dml_1_1_v0",
updateRecords: map[string]*querypb.SegmentInfo{
"segment_1": {
CollectionID: 1,
SegmentID: 1,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{1, 2},
},
},
oldGarbage: map[string]string{
"noice1": string([]byte{23, 21, 20}),
},
expectUpdateEvents: []segmentEvent{
{
eventType: segmentAdd,
segmentID: 1,
nodeID: 1,
state: segmentStateLoaded,
},
},
},
{
name: "normal add segment with other replica",
collectionID: 1,
replicaID: 1,
channel: "dml_1_1_v0",
updateRecords: map[string]*querypb.SegmentInfo{
"segment_1": {
CollectionID: 1,
SegmentID: 1,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{1, 2},
},
"segment_2": {
CollectionID: 1,
SegmentID: 2,
NodeID: 2,
DmChannel: "dml_2_1_v1",
ReplicaIds: []int64{2},
},
},
newGarbage: map[string]string{
"noice1": string([]byte{23, 21, 20}),
},
expectUpdateEvents: []segmentEvent{
{
eventType: segmentAdd,
segmentID: 1,
nodeID: 1,
state: segmentStateLoaded,
},
},
},
{
name: "normal remove segment",
collectionID: 1,
replicaID: 1,
channel: "dml_1_1_v0",
oldRecords: map[string]*querypb.SegmentInfo{
"segment_1": {
CollectionID: 1,
SegmentID: 1,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{1, 2},
},
},
oldGarbage: map[string]string{
"noice1": string([]byte{23, 21, 20}),
},
expectInitEvents: []segmentEvent{
{
eventType: segmentAdd,
segmentID: 1,
nodeID: 1,
state: segmentStateLoaded,
},
},
delRecords: []string{
"segment_1", "noice1",
},
expectUpdateEvents: []segmentEvent{
{
eventType: segmentDel,
segmentID: 1,
nodeID: 1,
state: segmentStateOffline,
},
},
},
{
name: "normal remove segment with other replica",
collectionID: 1,
replicaID: 1,
channel: "dml_1_1_v0",
oldRecords: map[string]*querypb.SegmentInfo{
"segment_1": {
CollectionID: 1,
SegmentID: 1,
NodeID: 1,
DmChannel: "dml_1_1_v0",
ReplicaIds: []int64{1, 2},
},
"segment_2": {
CollectionID: 1,
SegmentID: 2,
NodeID: 2,
DmChannel: "dml_2_1_v1",
ReplicaIds: []int64{2},
},
},
expectInitEvents: []segmentEvent{
{
eventType: segmentAdd,
segmentID: 1,
nodeID: 1,
state: segmentStateLoaded,
},
},
delRecords: []string{
"segment_1", "segment_2",
},
expectUpdateEvents: []segmentEvent{
{
eventType: segmentDel,
segmentID: 1,
nodeID: 1,
state: segmentStateOffline,
},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
suffix := funcutil.RandomString(6)
rootPath := fmt.Sprintf("qn_shard_segment_detector_watch_%s", suffix)
ctx := context.Background()
// put existing records
for key, info := range tc.oldRecords {
bs, err := proto.Marshal(info)
require.NoError(t, err)
_, err = client.Put(ctx, path.Join(rootPath, key), string(bs))
require.NoError(t, err)
}
// put garbage data
for key, value := range tc.oldGarbage {
_, err := client.Put(ctx, path.Join(rootPath, key), value)
require.NoError(t, err)
}
sd := NewEtcdShardSegmentDetector(client, rootPath)
segments, ch := sd.watchSegments(tc.collectionID, tc.replicaID, tc.channel)
assert.ElementsMatch(t, tc.expectInitEvents, segments)
// update etcd kvs to generate events
go func() {
for key, info := range tc.updateRecords {
bs, err := proto.Marshal(info)
require.NoError(t, err)
_, err = client.Put(ctx, path.Join(rootPath, key), string(bs))
require.NoError(t, err)
}
for _, k := range tc.delRecords {
_, err := client.Delete(ctx, path.Join(rootPath, k))
require.NoError(t, err)
}
for key, value := range tc.newGarbage {
_, err := client.Put(ctx, path.Join(rootPath, key), value)
require.NoError(t, err)
}
// need a way to detect event processed
time.Sleep(time.Millisecond * 100)
sd.Close()
}()
var newEvents []segmentEvent
for evt := range ch {
newEvents = append(newEvents, evt)
}
assert.ElementsMatch(t, tc.expectUpdateEvents, newEvents)
})
}
}

View File

@ -61,7 +61,7 @@ func (s *streaming) close() {
s.replica.freeAll()
}
func (s *streaming) retrieve(collID UniqueID, partIDs []UniqueID, plan *RetrievePlan) ([]*segcorepb.RetrieveResults, []UniqueID, []UniqueID, error) {
func (s *streaming) retrieve(collID UniqueID, partIDs []UniqueID, plan *RetrievePlan, filters ...func(segment *Segment) bool) ([]*segcorepb.RetrieveResults, []UniqueID, []UniqueID, error) {
retrieveResults := make([]*segcorepb.RetrieveResults, 0)
retrieveSegmentIDs := make([]UniqueID, 0)
@ -91,6 +91,15 @@ func (s *streaming) retrieve(collID UniqueID, partIDs []UniqueID, plan *Retrieve
if err != nil {
return retrieveResults, retrieveSegmentIDs, retrievePartIDs, err
}
filtered := false
for _, filter := range filters {
if !filter(seg) {
filtered = true
}
}
if filtered {
continue
}
result, err := seg.retrieve(plan)
if err != nil {
return retrieveResults, retrieveSegmentIDs, retrievePartIDs, err

View File

@ -27,7 +27,6 @@ import (
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
@ -133,61 +132,29 @@ func (r *addQueryChannelTask) Execute(ctx context.Context) error {
zap.Any("collectionID", r.req.CollectionID))
collectionID := r.req.CollectionID
if r.node.queryService == nil {
errMsg := "null query service, collectionID = " + fmt.Sprintln(collectionID)
return errors.New(errMsg)
if r.node.queryShardService == nil {
return fmt.Errorf("null query shard service, collectionID %d", collectionID)
}
if r.node.queryService.hasQueryCollection(collectionID) {
log.Debug("queryCollection has been existed when addQueryChannel",
zap.Any("collectionID", collectionID),
)
return nil
}
qc := r.node.queryShardService.getQueryChannel(collectionID)
log.Debug("add query channel for collection", zap.Int64("collectionID", collectionID))
// add search collection
err := r.node.queryService.addQueryCollection(collectionID)
if err != nil {
return err
}
log.Debug("add query collection", zap.Any("collectionID", collectionID))
// add request channel
sc, err := r.node.queryService.getQueryCollection(collectionID)
if err != nil {
return err
}
consumeChannels := []string{r.req.QueryChannel}
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.QueryNodeID)
sc.queryMsgStream.AsConsumer(consumeChannels, consumeSubName)
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
if r.req.SeekPosition == nil || len(r.req.SeekPosition.MsgID) == 0 {
// as consumer
log.Debug("QueryNode AsConsumer", zap.Strings("channels", consumeChannels), zap.String("sub name", consumeSubName))
} else {
// seek query channel
err = sc.queryMsgStream.Seek([]*internalpb.MsgPosition{r.req.SeekPosition})
if err != nil {
return err
}
log.Debug("QueryNode seek query channel: ", zap.Any("consumeChannels", consumeChannels),
zap.String("seek position", string(r.req.SeekPosition.MsgID)))
err := qc.AsConsumer(r.req.QueryChannel, consumeSubName, r.req.SeekPosition)
if err != nil {
log.Warn("query channel as consumer failed", zap.Int64("collectionID", collectionID), zap.String("channel", r.req.QueryChannel), zap.Error(err))
return err
}
// add result channel
// producerChannels := []string{r.req.QueryResultChannel}
// sc.queryResultMsgStream.AsProducer(producerChannels)
// log.Debug("QueryNode AsProducer", zap.Strings("channels", producerChannels))
// init global sealed segments
for _, segment := range r.req.GlobalSealedSegments {
sc.globalSegmentManager.addGlobalSegmentInfo(segment)
}
/*
for _, segment := range r.req.GlobalSealedSegments {
sc.globalSegmentManager.addGlobalSegmentInfo(segment)
}*/
// start queryCollection, message stream need to asConsumer before start
sc.start()
log.Debug("start query collection", zap.Any("collectionID", collectionID))
qc.Start()
log.Debug("start query channel", zap.Int64("collectionID", collectionID))
log.Debug("addQueryChannelTask done",
zap.Any("collectionID", r.req.CollectionID),
@ -254,6 +221,7 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
log.Debug("Starting WatchDmChannels ...",
zap.String("collectionName", w.req.Schema.Name),
zap.Int64("collectionID", collectionID),
zap.Int64("replicaID", w.req.GetReplicaID()),
zap.Any("load type", lType),
zap.Strings("vChannels", vChannels),
zap.Strings("pChannels", pChannels),
@ -263,6 +231,11 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
sCol := w.node.streaming.replica.addCollection(collectionID, w.req.Schema)
hCol := w.node.historical.replica.addCollection(collectionID, w.req.Schema)
//add shard cluster
for _, vchannel := range vChannels {
w.node.ShardClusterService.addShardCluster(w.req.GetCollectionID(), w.req.GetReplicaID(), vchannel)
}
// load growing segments
unFlushedSegments := make([]*queryPb.SegmentLoadInfo, 0)
unFlushedSegmentIDs := make([]UniqueID, 0)
@ -453,15 +426,21 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
w.node.tSafeReplica.addTSafe(channel)
}
// add tSafe watcher if queryCollection exists
qc, err := w.node.queryService.getQueryCollection(collectionID)
if err == nil {
for _, channel := range vChannels {
err = qc.addTSafeWatcher(channel)
if err != nil {
// tSafe have been exist, not error
log.Warn(err.Error())
}
// add tsafe watch in query shard if exists
for _, dmlChannel := range vChannels {
if !w.node.queryShardService.hasQueryShard(dmlChannel) {
//TODO add replica id in req
w.node.queryShardService.addQueryShard(collectionID, dmlChannel, 0)
}
qs, err := w.node.queryShardService.getQueryShard(dmlChannel)
if err != nil {
log.Warn("failed to get query shard", zap.String("dmlChannel", dmlChannel), zap.Error(err))
continue
}
err = qs.watchDMLTSafe()
if err != nil {
log.Warn("failed to start query shard watch dml tsafe", zap.Error(err))
}
}
@ -591,15 +570,26 @@ func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
w.node.tSafeReplica.addTSafe(channel)
}
// add tSafe watcher if queryCollection exists
qc, err := w.node.queryService.getQueryCollection(collectionID)
if err == nil {
for _, channel := range vDeltaChannels {
err = qc.addTSafeWatcher(channel)
if err != nil {
// tSafe have been existed, not error
log.Warn(err.Error())
}
// add tsafe watch in query shard if exists
for _, channel := range vDeltaChannels {
dmlChannel, err := funcutil.ConvertChannelName(channel, Params.CommonCfg.RootCoordDelta, Params.CommonCfg.RootCoordDml)
if err != nil {
log.Warn("failed to convert delta channel to dml", zap.String("channel", channel), zap.Error(err))
continue
}
if !w.node.queryShardService.hasQueryShard(dmlChannel) {
//TODO add replica id in req
w.node.queryShardService.addQueryShard(collectionID, dmlChannel, 0)
}
qs, err := w.node.queryShardService.getQueryShard(dmlChannel)
if err != nil {
log.Warn("failed to get query shard", zap.String("dmlChannel", dmlChannel), zap.Error(err))
continue
}
err = qs.watchDeltaTSafe()
if err != nil {
log.Warn("failed to start query shard watch delta tsafe", zap.Error(err))
}
}
@ -712,10 +702,7 @@ func (r *releaseCollectionTask) Execute(ctx context.Context) error {
zap.Any("collectionID", r.req.CollectionID),
)
// remove query collection
// queryCollection and Collection would be deleted in releaseCollection,
// so we don't need to remove the tSafeWatcher or channel manually.
r.node.queryService.stopQueryCollection(r.req.CollectionID)
r.node.queryShardService.releaseCollection(r.req.CollectionID)
err := r.releaseReplica(r.node.streaming.replica, replicaStreaming)
if err != nil {

View File

@ -82,14 +82,10 @@ func TestTask_AddQueryChannel(t *testing.T) {
err = task.Execute(ctx)
assert.NoError(t, err)
})
t.Run("test execute has queryCollection", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
@ -98,12 +94,11 @@ func TestTask_AddQueryChannel(t *testing.T) {
err = task.Execute(ctx)
assert.NoError(t, err)
})
t.Run("test execute nil query service", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
node.queryService = nil
node.queryShardService = nil
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
@ -114,23 +109,24 @@ func TestTask_AddQueryChannel(t *testing.T) {
assert.Error(t, err)
})
t.Run("test execute add query collection failed", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
/*
t.Run("test execute add query collection failed", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
err = node.streaming.replica.removeCollection(defaultCollectionID)
assert.NoError(t, err)
err = node.historical.replica.removeCollection(defaultCollectionID)
assert.NoError(t, err)
err = node.streaming.replica.removeCollection(defaultCollectionID)
assert.NoError(t, err)
err = node.historical.replica.removeCollection(defaultCollectionID)
assert.NoError(t, err)
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
err = task.Execute(ctx)
assert.Error(t, err)
})
err = task.Execute(ctx)
assert.Error(t, err)
})*/
t.Run("test execute init global sealed segments", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
@ -169,28 +165,31 @@ func TestTask_AddQueryChannel(t *testing.T) {
err = task.Execute(ctx)
assert.NoError(t, err)
})
/*
t.Run("test execute seek error", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
t.Run("test execute seek error", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
t.Run("test execute seek error", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
position := &internalpb.MsgPosition{
ChannelName: genQueryChannel(),
MsgID: []byte{1, 2, 3, 4, 5, 6, 7, 8},
MsgGroup: defaultSubName,
Timestamp: 0,
}
position := &internalpb.MsgPosition{
ChannelName: genQueryChannel(),
MsgID: []byte{1, 2, 3, 4, 5, 6, 7, 8},
MsgGroup: defaultSubName,
Timestamp: 0,
}
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
task.req.SeekPosition = position
task.req.SeekPosition = position
err = task.Execute(ctx)
assert.Error(t, err)
})
err = task.Execute(ctx)
assert.Error(t, err)
})*/
}
func TestTask_watchDmChannelsTask(t *testing.T) {
@ -687,8 +686,9 @@ func TestTask_releaseCollectionTask(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)
/*
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)*/
task := releaseCollectionTask{
req: genReleaseCollectionRequest(),
@ -719,8 +719,9 @@ func TestTask_releaseCollectionTask(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)
/*
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)*/
col, err := node.historical.replica.getCollectionByID(defaultCollectionID)
assert.NoError(t, err)
@ -776,8 +777,9 @@ func TestTask_releasePartitionTask(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)
/*
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)*/
task := releasePartitionsTask{
req: genReleasePartitionsRequest(),
@ -820,8 +822,9 @@ func TestTask_releasePartitionTask(t *testing.T) {
col.addVDeltaChannels([]Channel{defaultDeltaChannel})
col.setLoadType(loadTypePartition)
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)
/*
err = node.queryService.addQueryCollection(defaultCollectionID)
assert.NoError(t, err)*/
task := releasePartitionsTask{
req: genReleasePartitionsRequest(),