mirror of https://github.com/milvus-io/milvus.git
Refactor query node and query service (#5751)
Signed-off-by: xige-16 <xi.ge@zilliz.com> Signed-off-by: bigsheeper <yihao.dai@zilliz.com> Co-authored-by: xige-16 <xi.ge@zilliz.com> Co-authored-by: yudong.cai <yudong.cai@zilliz.com>pull/5794/head
parent
4165b761c9
commit
cdbc6d2c94
1
go.mod
1
go.mod
|
@ -42,6 +42,7 @@ require (
|
|||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
|
||||
golang.org/x/text v0.3.3
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
||||
google.golang.org/grpc v1.31.0
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
|
|
|
@ -225,7 +225,7 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
|
|||
"StateNone\020\000\022\014\n\010Unissued\020\001\022\016\n\nInProgress\020"
|
||||
"\002\022\014\n\010Finished\020\003\022\n\n\006Failed\020\004*X\n\014SegmentSt"
|
||||
"ate\022\024\n\020SegmentStateNone\020\000\022\014\n\010NotExist\020\001\022"
|
||||
"\013\n\007Growing\020\002\022\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004*\272\006"
|
||||
"\013\n\007Growing\020\002\022\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004*\363\007"
|
||||
"\n\007MsgType\022\r\n\tUndefined\020\000\022\024\n\020CreateCollec"
|
||||
"tion\020d\022\022\n\016DropCollection\020e\022\021\n\rHasCollect"
|
||||
"ion\020f\022\026\n\022DescribeCollection\020g\022\023\n\017ShowCol"
|
||||
|
@ -235,21 +235,25 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
|
|||
"\014HasPartition\020\312\001\022\026\n\021DescribePartition\020\313\001"
|
||||
"\022\023\n\016ShowPartitions\020\314\001\022\023\n\016LoadPartitions\020"
|
||||
"\315\001\022\026\n\021ReleasePartitions\020\316\001\022\021\n\014ShowSegmen"
|
||||
"ts\020\372\001\022\024\n\017DescribeSegment\020\373\001\022\020\n\013CreateInd"
|
||||
"ex\020\254\002\022\022\n\rDescribeIndex\020\255\002\022\016\n\tDropIndex\020\256"
|
||||
"\002\022\013\n\006Insert\020\220\003\022\013\n\006Delete\020\221\003\022\n\n\005Flush\020\222\003\022"
|
||||
"\013\n\006Search\020\364\003\022\021\n\014SearchResult\020\365\003\022\022\n\rGetIn"
|
||||
"dexState\020\366\003\022\032\n\025GetIndexBuildProgress\020\367\003\022"
|
||||
"\034\n\027GetCollectionStatistics\020\370\003\022\033\n\026GetPart"
|
||||
"itionStatistics\020\371\003\022\r\n\010Retrieve\020\372\003\022\023\n\016Ret"
|
||||
"rieveResult\020\373\003\022\020\n\013SegmentInfo\020\330\004\022\r\n\010Time"
|
||||
"Tick\020\260\t\022\023\n\016QueryNodeStats\020\261\t\022\016\n\tLoadInde"
|
||||
"x\020\262\t\022\016\n\tRequestID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024\n"
|
||||
"\017AllocateSegment\020\265\t\022\026\n\021SegmentStatistics"
|
||||
"\020\266\t\022\025\n\020SegmentFlushDone\020\267\t*\"\n\007DslType\022\007\n"
|
||||
"\003Dsl\020\000\022\016\n\nBoolExprV1\020\001B5Z3github.com/mil"
|
||||
"vus-io/milvus/internal/proto/commonpbb\006p"
|
||||
"roto3"
|
||||
"ts\020\372\001\022\024\n\017DescribeSegment\020\373\001\022\021\n\014LoadSegme"
|
||||
"nts\020\374\001\022\024\n\017ReleaseSegments\020\375\001\022\024\n\017HandoffS"
|
||||
"egments\020\376\001\022\030\n\023LoadBalanceSegments\020\377\001\022\020\n\013"
|
||||
"CreateIndex\020\254\002\022\022\n\rDescribeIndex\020\255\002\022\016\n\tDr"
|
||||
"opIndex\020\256\002\022\013\n\006Insert\020\220\003\022\013\n\006Delete\020\221\003\022\n\n\005"
|
||||
"Flush\020\222\003\022\013\n\006Search\020\364\003\022\021\n\014SearchResult\020\365\003"
|
||||
"\022\022\n\rGetIndexState\020\366\003\022\032\n\025GetIndexBuildPro"
|
||||
"gress\020\367\003\022\034\n\027GetCollectionStatistics\020\370\003\022\033"
|
||||
"\n\026GetPartitionStatistics\020\371\003\022\r\n\010Retrieve\020"
|
||||
"\372\003\022\023\n\016RetrieveResult\020\373\003\022\024\n\017WatchDmChanne"
|
||||
"ls\020\374\003\022\025\n\020RemoveDmChannels\020\375\003\022\027\n\022WatchQue"
|
||||
"ryChannels\020\376\003\022\030\n\023RemoveQueryChannels\020\377\003\022"
|
||||
"\020\n\013SegmentInfo\020\330\004\022\r\n\010TimeTick\020\260\t\022\023\n\016Quer"
|
||||
"yNodeStats\020\261\t\022\016\n\tLoadIndex\020\262\t\022\016\n\tRequest"
|
||||
"ID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024\n\017AllocateSegmen"
|
||||
"t\020\265\t\022\026\n\021SegmentStatistics\020\266\t\022\025\n\020SegmentF"
|
||||
"lushDone\020\267\t*\"\n\007DslType\022\007\n\003Dsl\020\000\022\016\n\nBoolE"
|
||||
"xprV1\020\001B5Z3github.com/milvus-io/milvus/i"
|
||||
"nternal/proto/commonpbb\006proto3"
|
||||
;
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_common_2eproto_deps[1] = {
|
||||
};
|
||||
|
@ -264,7 +268,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_com
|
|||
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_common_2eproto_once;
|
||||
static bool descriptor_table_common_2eproto_initialized = false;
|
||||
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_common_2eproto = {
|
||||
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 2045,
|
||||
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 2230,
|
||||
&descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_sccs, descriptor_table_common_2eproto_deps, 6, 0,
|
||||
schemas, file_default_instances, TableStruct_common_2eproto::offsets,
|
||||
file_level_metadata_common_2eproto, 6, file_level_enum_descriptors_common_2eproto, file_level_service_descriptors_common_2eproto,
|
||||
|
@ -371,6 +375,10 @@ bool MsgType_IsValid(int value) {
|
|||
case 206:
|
||||
case 250:
|
||||
case 251:
|
||||
case 252:
|
||||
case 253:
|
||||
case 254:
|
||||
case 255:
|
||||
case 300:
|
||||
case 301:
|
||||
case 302:
|
||||
|
@ -385,6 +393,10 @@ bool MsgType_IsValid(int value) {
|
|||
case 505:
|
||||
case 506:
|
||||
case 507:
|
||||
case 508:
|
||||
case 509:
|
||||
case 510:
|
||||
case 511:
|
||||
case 600:
|
||||
case 1200:
|
||||
case 1201:
|
||||
|
|
|
@ -215,6 +215,10 @@ enum MsgType : int {
|
|||
ReleasePartitions = 206,
|
||||
ShowSegments = 250,
|
||||
DescribeSegment = 251,
|
||||
LoadSegments = 252,
|
||||
ReleaseSegments = 253,
|
||||
HandoffSegments = 254,
|
||||
LoadBalanceSegments = 255,
|
||||
CreateIndex = 300,
|
||||
DescribeIndex = 301,
|
||||
DropIndex = 302,
|
||||
|
@ -229,6 +233,10 @@ enum MsgType : int {
|
|||
GetPartitionStatistics = 505,
|
||||
Retrieve = 506,
|
||||
RetrieveResult = 507,
|
||||
WatchDmChannels = 508,
|
||||
RemoveDmChannels = 509,
|
||||
WatchQueryChannels = 510,
|
||||
RemoveQueryChannels = 511,
|
||||
SegmentInfo = 600,
|
||||
TimeTick = 1200,
|
||||
QueryNodeStats = 1201,
|
||||
|
|
|
@ -246,9 +246,9 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
|
|||
return ret.(*commonpb.Status), err
|
||||
}
|
||||
|
||||
func (c *Client) CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error) {
|
||||
func (c *Client) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
|
||||
ret, err := c.recall(func() (interface{}, error) {
|
||||
return c.grpcClient.CreateQueryChannel(ctx, &querypb.CreateQueryChannelRequest{})
|
||||
return c.grpcClient.CreateQueryChannel(ctx, req)
|
||||
})
|
||||
return ret.(*querypb.CreateQueryChannelResponse), err
|
||||
}
|
||||
|
|
|
@ -272,7 +272,7 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
|
|||
}
|
||||
|
||||
func (s *Server) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
|
||||
return s.queryservice.CreateQueryChannel(ctx)
|
||||
return s.queryservice.CreateQueryChannel(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
|
||||
|
|
|
@ -81,6 +81,10 @@ func NewMqMsgStream(ctx context.Context,
|
|||
|
||||
func (ms *mqMsgStream) AsProducer(channels []string) {
|
||||
for _, channel := range channels {
|
||||
if len(channel) == 0 {
|
||||
log.Error("MsgStream asProducer's channel is a empty string")
|
||||
break
|
||||
}
|
||||
fn := func() error {
|
||||
pp, err := ms.client.CreateProducer(mqclient.ProducerOptions{Topic: channel})
|
||||
if err != nil {
|
||||
|
|
|
@ -16,9 +16,11 @@ import (
|
|||
"errors"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
)
|
||||
|
||||
type MsgType = commonpb.MsgType
|
||||
|
@ -882,3 +884,52 @@ func (sim *SegmentInfoMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
|||
SegmentMsg: segMsg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////LoadBalanceSegments//////////////////////////////////////////
|
||||
type LoadBalanceSegmentsMsg struct {
|
||||
BaseMsg
|
||||
querypb.LoadBalanceSegments
|
||||
}
|
||||
|
||||
func (l *LoadBalanceSegmentsMsg) TraceCtx() context.Context {
|
||||
return l.BaseMsg.Ctx
|
||||
}
|
||||
|
||||
func (l *LoadBalanceSegmentsMsg) SetTraceCtx(ctx context.Context) {
|
||||
l.BaseMsg.Ctx = ctx
|
||||
}
|
||||
|
||||
func (l *LoadBalanceSegmentsMsg) ID() UniqueID {
|
||||
return l.Base.MsgID
|
||||
}
|
||||
|
||||
func (l *LoadBalanceSegmentsMsg) Type() MsgType {
|
||||
return l.Base.MsgType
|
||||
}
|
||||
|
||||
func (l *LoadBalanceSegmentsMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
load := input.(*LoadBalanceSegmentsMsg)
|
||||
loadReq := &load.LoadBalanceSegments
|
||||
mb, err := proto.Marshal(loadReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mb, nil
|
||||
}
|
||||
|
||||
func (l *LoadBalanceSegmentsMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
loadReq := querypb.LoadBalanceSegments{}
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &loadReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loadMsg := &LoadBalanceSegmentsMsg{LoadBalanceSegments: loadReq}
|
||||
loadMsg.BeginTimestamp = loadReq.Base.Timestamp
|
||||
loadMsg.EndTimestamp = loadReq.Base.Timestamp
|
||||
|
||||
return loadMsg, nil
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@ func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
|
|||
flushCompletedMsg := FlushCompletedMsg{}
|
||||
queryNodeSegStatsMsg := QueryNodeStatsMsg{}
|
||||
segmentStatisticsMsg := SegmentStatisticsMsg{}
|
||||
loadBalanceSegmentsMsg := LoadBalanceSegmentsMsg{}
|
||||
|
||||
p := &ProtoUnmarshalDispatcher{}
|
||||
p.TempMap = make(map[commonpb.MsgType]UnmarshalFunc)
|
||||
|
@ -80,6 +81,7 @@ func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
|
|||
p.TempMap[commonpb.MsgType_SegmentInfo] = segmentInfoMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_SegmentFlushDone] = flushCompletedMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_SegmentStatistics] = segmentStatisticsMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_LoadBalanceSegments] = loadBalanceSegmentsMsg.Unmarshal
|
||||
|
||||
return p
|
||||
}
|
||||
|
|
|
@ -94,6 +94,10 @@ enum MsgType {
|
|||
/* DEFINE REQUESTS: SEGMENT */
|
||||
ShowSegments = 250;
|
||||
DescribeSegment = 251;
|
||||
LoadSegments = 252;
|
||||
ReleaseSegments = 253;
|
||||
HandoffSegments = 254;
|
||||
LoadBalanceSegments = 255;
|
||||
|
||||
/* DEFINITION REQUESTS: INDEX */
|
||||
CreateIndex = 300;
|
||||
|
@ -114,6 +118,10 @@ enum MsgType {
|
|||
GetPartitionStatistics = 505;
|
||||
Retrieve = 506;
|
||||
RetrieveResult = 507;
|
||||
WatchDmChannels = 508;
|
||||
RemoveDmChannels = 509;
|
||||
WatchQueryChannels = 510;
|
||||
RemoveQueryChannels = 511;
|
||||
|
||||
/* DATA SERVICE */
|
||||
SegmentInfo = 600;
|
||||
|
|
|
@ -208,8 +208,12 @@ const (
|
|||
MsgType_LoadPartitions MsgType = 205
|
||||
MsgType_ReleasePartitions MsgType = 206
|
||||
// DEFINE REQUESTS: SEGMENT
|
||||
MsgType_ShowSegments MsgType = 250
|
||||
MsgType_DescribeSegment MsgType = 251
|
||||
MsgType_ShowSegments MsgType = 250
|
||||
MsgType_DescribeSegment MsgType = 251
|
||||
MsgType_LoadSegments MsgType = 252
|
||||
MsgType_ReleaseSegments MsgType = 253
|
||||
MsgType_HandoffSegments MsgType = 254
|
||||
MsgType_LoadBalanceSegments MsgType = 255
|
||||
// DEFINITION REQUESTS: INDEX
|
||||
MsgType_CreateIndex MsgType = 300
|
||||
MsgType_DescribeIndex MsgType = 301
|
||||
|
@ -227,6 +231,10 @@ const (
|
|||
MsgType_GetPartitionStatistics MsgType = 505
|
||||
MsgType_Retrieve MsgType = 506
|
||||
MsgType_RetrieveResult MsgType = 507
|
||||
MsgType_WatchDmChannels MsgType = 508
|
||||
MsgType_RemoveDmChannels MsgType = 509
|
||||
MsgType_WatchQueryChannels MsgType = 510
|
||||
MsgType_RemoveQueryChannels MsgType = 511
|
||||
// DATA SERVICE
|
||||
MsgType_SegmentInfo MsgType = 600
|
||||
// SYSTEM CONTROL
|
||||
|
@ -259,6 +267,10 @@ var MsgType_name = map[int32]string{
|
|||
206: "ReleasePartitions",
|
||||
250: "ShowSegments",
|
||||
251: "DescribeSegment",
|
||||
252: "LoadSegments",
|
||||
253: "ReleaseSegments",
|
||||
254: "HandoffSegments",
|
||||
255: "LoadBalanceSegments",
|
||||
300: "CreateIndex",
|
||||
301: "DescribeIndex",
|
||||
302: "DropIndex",
|
||||
|
@ -273,6 +285,10 @@ var MsgType_name = map[int32]string{
|
|||
505: "GetPartitionStatistics",
|
||||
506: "Retrieve",
|
||||
507: "RetrieveResult",
|
||||
508: "WatchDmChannels",
|
||||
509: "RemoveDmChannels",
|
||||
510: "WatchQueryChannels",
|
||||
511: "RemoveQueryChannels",
|
||||
600: "SegmentInfo",
|
||||
1200: "TimeTick",
|
||||
1201: "QueryNodeStats",
|
||||
|
@ -303,6 +319,10 @@ var MsgType_value = map[string]int32{
|
|||
"ReleasePartitions": 206,
|
||||
"ShowSegments": 250,
|
||||
"DescribeSegment": 251,
|
||||
"LoadSegments": 252,
|
||||
"ReleaseSegments": 253,
|
||||
"HandoffSegments": 254,
|
||||
"LoadBalanceSegments": 255,
|
||||
"CreateIndex": 300,
|
||||
"DescribeIndex": 301,
|
||||
"DropIndex": 302,
|
||||
|
@ -317,6 +337,10 @@ var MsgType_value = map[string]int32{
|
|||
"GetPartitionStatistics": 505,
|
||||
"Retrieve": 506,
|
||||
"RetrieveResult": 507,
|
||||
"WatchDmChannels": 508,
|
||||
"RemoveDmChannels": 509,
|
||||
"WatchQueryChannels": 510,
|
||||
"RemoveQueryChannels": 511,
|
||||
"SegmentInfo": 600,
|
||||
"TimeTick": 1200,
|
||||
"QueryNodeStats": 1201,
|
||||
|
@ -661,79 +685,84 @@ func init() {
|
|||
func init() { proto.RegisterFile("common.proto", fileDescriptor_555bd8c177793206) }
|
||||
|
||||
var fileDescriptor_555bd8c177793206 = []byte{
|
||||
// 1183 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xcb, 0x6e, 0x1b, 0xc7,
|
||||
0x12, 0xd5, 0x90, 0x34, 0xa9, 0x29, 0x52, 0x54, 0xab, 0xf5, 0xb0, 0xec, 0x2b, 0x5c, 0x18, 0x5a,
|
||||
0x19, 0x02, 0x2c, 0xdd, 0x7b, 0x8d, 0x9b, 0xac, 0xbc, 0xb0, 0x38, 0x96, 0x4c, 0xd8, 0x7a, 0x64,
|
||||
0x28, 0x1b, 0x46, 0x36, 0xc6, 0x68, 0xa6, 0x44, 0x75, 0x3c, 0xd3, 0xcd, 0x74, 0xf7, 0xd8, 0xe2,
|
||||
0x5f, 0x24, 0xfe, 0x87, 0xec, 0x92, 0x20, 0xef, 0x00, 0xf9, 0x82, 0xbc, 0xd7, 0xf9, 0x84, 0x7c,
|
||||
0x40, 0x9e, 0xf6, 0x26, 0xa8, 0x9e, 0x21, 0x39, 0x01, 0x9c, 0x5d, 0xd7, 0xe9, 0xaa, 0xd3, 0xa7,
|
||||
0xab, 0xfa, 0xcc, 0x40, 0x27, 0x56, 0x59, 0xa6, 0xe4, 0xf6, 0x48, 0x2b, 0xab, 0xf8, 0x72, 0x26,
|
||||
0xd2, 0xa7, 0xb9, 0x29, 0xa2, 0xed, 0x62, 0x6b, 0xf3, 0x31, 0x34, 0x07, 0x36, 0xb2, 0xb9, 0xe1,
|
||||
0xb7, 0x00, 0x50, 0x6b, 0xa5, 0x1f, 0xc7, 0x2a, 0xc1, 0x75, 0xef, 0x9a, 0x77, 0xbd, 0xfb, 0xbf,
|
||||
0x7f, 0x6f, 0xbf, 0xa2, 0x66, 0xfb, 0x0e, 0xa5, 0xf5, 0x54, 0x82, 0xa1, 0x8f, 0x93, 0x25, 0x5f,
|
||||
0x83, 0xa6, 0xc6, 0xc8, 0x28, 0xb9, 0x5e, 0xbb, 0xe6, 0x5d, 0xf7, 0xc3, 0x32, 0xda, 0x7c, 0x0d,
|
||||
0x3a, 0xf7, 0x70, 0xfc, 0x30, 0x4a, 0x73, 0x3c, 0x8e, 0x84, 0xe6, 0x0c, 0xea, 0x4f, 0x70, 0xec,
|
||||
0xf8, 0xfd, 0x90, 0x96, 0x7c, 0x05, 0x2e, 0x3d, 0xa5, 0xed, 0xb2, 0xb0, 0x08, 0x36, 0x37, 0xa0,
|
||||
0xb1, 0x9b, 0xaa, 0xd3, 0xd9, 0x2e, 0x55, 0x74, 0x26, 0xbb, 0x37, 0xa0, 0x75, 0x3b, 0x49, 0x34,
|
||||
0x1a, 0xc3, 0xbb, 0x50, 0x13, 0xa3, 0x92, 0xaf, 0x26, 0x46, 0x9c, 0x43, 0x63, 0xa4, 0xb4, 0x75,
|
||||
0x6c, 0xf5, 0xd0, 0xad, 0x37, 0x9f, 0x7b, 0xd0, 0x3a, 0x30, 0xc3, 0xdd, 0xc8, 0x20, 0x7f, 0x1d,
|
||||
0xe6, 0x33, 0x33, 0x7c, 0x6c, 0xc7, 0xa3, 0xc9, 0x2d, 0x37, 0x5e, 0x79, 0xcb, 0x03, 0x33, 0x3c,
|
||||
0x19, 0x8f, 0x30, 0x6c, 0x65, 0xc5, 0x82, 0x94, 0x64, 0x66, 0xd8, 0x0f, 0x4a, 0xe6, 0x22, 0xe0,
|
||||
0x1b, 0xe0, 0x5b, 0x91, 0xa1, 0xb1, 0x51, 0x36, 0x5a, 0xaf, 0x5f, 0xf3, 0xae, 0x37, 0xc2, 0x19,
|
||||
0xc0, 0xaf, 0xc2, 0xbc, 0x51, 0xb9, 0x8e, 0xb1, 0x1f, 0xac, 0x37, 0x5c, 0xd9, 0x34, 0xde, 0xbc,
|
||||
0x05, 0xfe, 0x81, 0x19, 0xde, 0xc5, 0x28, 0x41, 0xcd, 0xff, 0x03, 0x8d, 0xd3, 0xc8, 0x14, 0x8a,
|
||||
0xda, 0xff, 0xac, 0x88, 0x6e, 0x10, 0xba, 0xcc, 0xad, 0xf7, 0x1a, 0xe0, 0x4f, 0x27, 0xc1, 0xdb,
|
||||
0xd0, 0x1a, 0xe4, 0x71, 0x8c, 0xc6, 0xb0, 0x39, 0xbe, 0x0c, 0x8b, 0x0f, 0x24, 0x5e, 0x8c, 0x30,
|
||||
0xb6, 0x98, 0xb8, 0x1c, 0xe6, 0xf1, 0x25, 0x58, 0xe8, 0x29, 0x29, 0x31, 0xb6, 0x7b, 0x91, 0x48,
|
||||
0x31, 0x61, 0x35, 0xbe, 0x02, 0xec, 0x18, 0x75, 0x26, 0x8c, 0x11, 0x4a, 0x06, 0x28, 0x05, 0x26,
|
||||
0xac, 0xce, 0x2f, 0xc3, 0x72, 0x4f, 0xa5, 0x29, 0xc6, 0x56, 0x28, 0x79, 0xa8, 0xec, 0x9d, 0x0b,
|
||||
0x61, 0xac, 0x61, 0x0d, 0xa2, 0xed, 0xa7, 0x29, 0x0e, 0xa3, 0xf4, 0xb6, 0x1e, 0xe6, 0x19, 0x4a,
|
||||
0xcb, 0x2e, 0x11, 0x47, 0x09, 0x06, 0x22, 0x43, 0x49, 0x4c, 0xac, 0x55, 0x41, 0xfb, 0x32, 0xc1,
|
||||
0x0b, 0xea, 0x1f, 0x9b, 0xe7, 0x57, 0x60, 0xb5, 0x44, 0x2b, 0x07, 0x44, 0x19, 0x32, 0x9f, 0x2f,
|
||||
0x42, 0xbb, 0xdc, 0x3a, 0x39, 0x3a, 0xbe, 0xc7, 0xa0, 0xc2, 0x10, 0xaa, 0x67, 0x21, 0xc6, 0x4a,
|
||||
0x27, 0xac, 0x5d, 0x91, 0xf0, 0x10, 0x63, 0xab, 0x74, 0x3f, 0x60, 0x1d, 0x12, 0x5c, 0x82, 0x03,
|
||||
0x8c, 0x74, 0x7c, 0x1e, 0xa2, 0xc9, 0x53, 0xcb, 0x16, 0x38, 0x83, 0xce, 0x9e, 0x48, 0xf1, 0x50,
|
||||
0xd9, 0x3d, 0x95, 0xcb, 0x84, 0x75, 0x79, 0x17, 0xe0, 0x00, 0x6d, 0x54, 0x76, 0x60, 0x91, 0x8e,
|
||||
0xed, 0x45, 0xf1, 0x39, 0x96, 0x00, 0xe3, 0x6b, 0xc0, 0x7b, 0x91, 0x94, 0xca, 0xf6, 0x34, 0x46,
|
||||
0x16, 0xf7, 0x54, 0x9a, 0xa0, 0x66, 0x4b, 0x24, 0xe7, 0x6f, 0xb8, 0x48, 0x91, 0xf1, 0x59, 0x76,
|
||||
0x80, 0x29, 0x4e, 0xb3, 0x97, 0x67, 0xd9, 0x25, 0x4e, 0xd9, 0x2b, 0x24, 0x7e, 0x37, 0x17, 0x69,
|
||||
0xe2, 0x5a, 0x52, 0x8c, 0x65, 0x95, 0x34, 0x96, 0xe2, 0x0f, 0xef, 0xf7, 0x07, 0x27, 0x6c, 0x8d,
|
||||
0xaf, 0xc2, 0x52, 0x89, 0x1c, 0xa0, 0xd5, 0x22, 0x76, 0xcd, 0xbb, 0x4c, 0x52, 0x8f, 0x72, 0x7b,
|
||||
0x74, 0x76, 0x80, 0x99, 0xd2, 0x63, 0xb6, 0x4e, 0x03, 0x75, 0x4c, 0x93, 0x11, 0xb1, 0x2b, 0x9c,
|
||||
0xc3, 0x42, 0x10, 0x84, 0xf8, 0x76, 0x8e, 0xc6, 0x86, 0x51, 0x8c, 0xec, 0xe7, 0xd6, 0xd6, 0x23,
|
||||
0x00, 0x97, 0x46, 0x36, 0x47, 0xce, 0xa1, 0x3b, 0x8b, 0x0e, 0x95, 0x44, 0x36, 0xc7, 0x3b, 0x30,
|
||||
0xff, 0x40, 0x0a, 0x63, 0x72, 0x4c, 0x98, 0x47, 0x2d, 0xea, 0xcb, 0x63, 0xad, 0x86, 0xe4, 0x2e,
|
||||
0x56, 0xa3, 0xdd, 0x3d, 0x21, 0x85, 0x39, 0x77, 0x8f, 0x03, 0xa0, 0x59, 0xf6, 0xaa, 0xb1, 0xf5,
|
||||
0x08, 0x3a, 0x03, 0x1c, 0xd2, 0x3b, 0x28, 0xb8, 0x57, 0x80, 0x55, 0xe3, 0x19, 0xfb, 0x54, 0xa1,
|
||||
0x47, 0xef, 0x74, 0x5f, 0xab, 0x67, 0x42, 0x0e, 0x59, 0x8d, 0xc8, 0x06, 0x18, 0xa5, 0x8e, 0xb8,
|
||||
0x0d, 0xad, 0xbd, 0x34, 0x77, 0xa7, 0x34, 0xb6, 0xbe, 0x6a, 0x3a, 0xbf, 0x3a, 0xdb, 0x2d, 0x80,
|
||||
0xff, 0x40, 0x26, 0x78, 0x26, 0x24, 0x26, 0x6c, 0xce, 0xb5, 0xd6, 0x8d, 0x60, 0xf6, 0x84, 0x58,
|
||||
0x42, 0xd7, 0x0a, 0xb4, 0x1a, 0x55, 0x30, 0xa4, 0xfe, 0xdc, 0x8d, 0x4c, 0x05, 0x3a, 0xa3, 0x79,
|
||||
0x05, 0x68, 0x62, 0x2d, 0x4e, 0xab, 0xe5, 0x43, 0x9a, 0xcc, 0xe0, 0x5c, 0x3d, 0x9b, 0x61, 0x86,
|
||||
0x9d, 0xd3, 0x49, 0xfb, 0x68, 0x07, 0x63, 0x63, 0x31, 0xeb, 0x29, 0x79, 0x26, 0x86, 0x86, 0x09,
|
||||
0x3a, 0xe9, 0xbe, 0x8a, 0x92, 0x4a, 0xf9, 0x5b, 0x34, 0xb1, 0x10, 0x53, 0x8c, 0x4c, 0x95, 0xf5,
|
||||
0x09, 0x5f, 0x81, 0xc5, 0x42, 0xea, 0x71, 0xa4, 0xad, 0x70, 0xe0, 0xd7, 0x9e, 0x9b, 0x91, 0x56,
|
||||
0xa3, 0x19, 0xf6, 0x0d, 0x79, 0xb3, 0x73, 0x37, 0x32, 0x33, 0xe8, 0x5b, 0x8f, 0xaf, 0xc1, 0xd2,
|
||||
0x44, 0xea, 0x0c, 0xff, 0xce, 0xe3, 0xcb, 0xd0, 0x25, 0xa9, 0x53, 0xcc, 0xb0, 0xef, 0x1d, 0x48,
|
||||
0xa2, 0x2a, 0xe0, 0x0f, 0x8e, 0xa1, 0x54, 0x55, 0xc1, 0x7f, 0x74, 0x87, 0x11, 0x43, 0x39, 0x2a,
|
||||
0xc3, 0x5e, 0x78, 0xa4, 0x74, 0x72, 0x58, 0x09, 0xb3, 0x97, 0x1e, 0x67, 0xd0, 0x2e, 0xf4, 0xbb,
|
||||
0x17, 0xc3, 0xde, 0xaf, 0x39, 0xed, 0x65, 0x5e, 0x81, 0x7d, 0x50, 0xe3, 0x5d, 0xf0, 0xe9, 0x3e,
|
||||
0x45, 0xfc, 0x61, 0x8d, 0xb7, 0xa1, 0xd9, 0x97, 0x06, 0xb5, 0x65, 0xef, 0xd0, 0x54, 0x9b, 0x85,
|
||||
0x05, 0xd8, 0xbb, 0xf4, 0x76, 0x2e, 0xb9, 0x11, 0xb3, 0xe7, 0x6e, 0xa3, 0x30, 0x2b, 0xfb, 0xa5,
|
||||
0xee, 0x14, 0x55, 0x9d, 0xfb, 0x6b, 0x9d, 0x4e, 0xda, 0x47, 0x3b, 0x7b, 0xaa, 0xec, 0xb7, 0x3a,
|
||||
0xbf, 0x0a, 0xab, 0x13, 0xcc, 0xf9, 0x68, 0xfa, 0x48, 0x7f, 0xaf, 0xf3, 0x0d, 0xb8, 0xbc, 0x8f,
|
||||
0x76, 0xd6, 0x7e, 0x2a, 0x12, 0xc6, 0x8a, 0xd8, 0xb0, 0x3f, 0xea, 0xfc, 0x5f, 0xb0, 0xb6, 0x8f,
|
||||
0x76, 0xda, 0x86, 0xca, 0xe6, 0x9f, 0x75, 0xbe, 0x00, 0xf3, 0x21, 0x19, 0x0d, 0x9f, 0x22, 0x7b,
|
||||
0x51, 0xa7, 0x5e, 0x4e, 0xc2, 0x52, 0xce, 0xcb, 0x3a, 0xb5, 0xa2, 0x6c, 0x4c, 0x5f, 0x9e, 0x29,
|
||||
0xf6, 0x53, 0x83, 0xaa, 0x4e, 0x44, 0x86, 0x27, 0x22, 0x7e, 0xc2, 0x3e, 0xf2, 0xa9, 0xea, 0x8d,
|
||||
0x1c, 0xf5, 0xf8, 0x50, 0x25, 0x48, 0xf4, 0x86, 0x7d, 0xec, 0x53, 0x6b, 0x68, 0x2c, 0x45, 0x6b,
|
||||
0x3e, 0x71, 0x71, 0x69, 0xce, 0x7e, 0xc0, 0x3e, 0xa5, 0x8f, 0x1e, 0x94, 0xf1, 0xc9, 0xe0, 0x88,
|
||||
0x7d, 0xe6, 0xd3, 0x1c, 0x6e, 0xa7, 0xa9, 0x8a, 0x23, 0x3b, 0x9d, 0xc3, 0xe7, 0x3e, 0x0d, 0xb2,
|
||||
0xe2, 0xab, 0x52, 0xf8, 0x17, 0x3e, 0x5f, 0x9d, 0xfa, 0xcd, 0xb5, 0x35, 0x20, 0xbf, 0x7d, 0xe9,
|
||||
0x6f, 0x6d, 0x42, 0x2b, 0x30, 0xa9, 0xf3, 0x4e, 0x0b, 0xea, 0x81, 0x49, 0xd9, 0x1c, 0x99, 0x7a,
|
||||
0x57, 0xa9, 0xf4, 0xce, 0xc5, 0x48, 0x3f, 0xfc, 0x2f, 0xf3, 0x76, 0xff, 0xff, 0xe6, 0xcd, 0xa1,
|
||||
0xb0, 0xe7, 0xf9, 0x29, 0xfd, 0x5b, 0x76, 0x8a, 0x9f, 0xcd, 0x0d, 0xa1, 0xca, 0xd5, 0x8e, 0x90,
|
||||
0x16, 0xb5, 0x8c, 0xd2, 0x1d, 0xf7, 0xff, 0xd9, 0x29, 0xfe, 0x3f, 0xa3, 0xd3, 0xd3, 0xa6, 0x8b,
|
||||
0x6f, 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x77, 0x76, 0x64, 0xb2, 0x59, 0x08, 0x00, 0x00,
|
||||
// 1263 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xcb, 0x72, 0xdb, 0x36,
|
||||
0x17, 0x36, 0x45, 0xc5, 0x32, 0x61, 0xd9, 0x86, 0xe1, 0x4b, 0x9c, 0xfc, 0x9e, 0x7f, 0x32, 0x5e,
|
||||
0x65, 0x3c, 0x13, 0xbb, 0x6d, 0xa6, 0xed, 0x2a, 0x8b, 0x58, 0x8c, 0x6d, 0x4d, 0xe2, 0x4b, 0x29,
|
||||
0x27, 0xcd, 0x74, 0x93, 0x81, 0xc9, 0x63, 0x09, 0x0d, 0x09, 0xa8, 0x00, 0xe8, 0x58, 0x6f, 0xd1,
|
||||
0xe6, 0x1d, 0xba, 0x6b, 0x3b, 0xbd, 0xf7, 0x15, 0x7a, 0x5f, 0xf7, 0x11, 0xba, 0xef, 0x3d, 0xd7,
|
||||
0xce, 0x01, 0x29, 0x89, 0x99, 0x49, 0x77, 0x3c, 0xdf, 0xb9, 0xe0, 0xc3, 0x77, 0xce, 0x01, 0x49,
|
||||
0x33, 0x56, 0x59, 0xa6, 0xe4, 0x46, 0x5f, 0x2b, 0xab, 0xd8, 0x42, 0x26, 0xd2, 0xd3, 0xdc, 0x14,
|
||||
0xd6, 0x46, 0xe1, 0x5a, 0xbb, 0x47, 0x26, 0x3b, 0x96, 0xdb, 0xdc, 0xb0, 0x6b, 0x84, 0x80, 0xd6,
|
||||
0x4a, 0xdf, 0x8b, 0x55, 0x02, 0x2b, 0xde, 0x25, 0xef, 0xf2, 0xec, 0x6b, 0xff, 0xdf, 0x78, 0x49,
|
||||
0xce, 0xc6, 0x0d, 0x0c, 0x6b, 0xa9, 0x04, 0xa2, 0x00, 0x86, 0x9f, 0x6c, 0x99, 0x4c, 0x6a, 0xe0,
|
||||
0x46, 0xc9, 0x95, 0xda, 0x25, 0xef, 0x72, 0x10, 0x95, 0xd6, 0xda, 0x1b, 0xa4, 0x79, 0x13, 0x06,
|
||||
0x77, 0x78, 0x9a, 0xc3, 0x21, 0x17, 0x9a, 0x51, 0xe2, 0xdf, 0x87, 0x81, 0xab, 0x1f, 0x44, 0xf8,
|
||||
0xc9, 0x16, 0xc9, 0xb9, 0x53, 0x74, 0x97, 0x89, 0x85, 0xb1, 0xb6, 0x4a, 0xea, 0x5b, 0xa9, 0x3a,
|
||||
0x1e, 0x7b, 0x31, 0xa3, 0x39, 0xf4, 0x5e, 0x21, 0x8d, 0xeb, 0x49, 0xa2, 0xc1, 0x18, 0x36, 0x4b,
|
||||
0x6a, 0xa2, 0x5f, 0xd6, 0xab, 0x89, 0x3e, 0x63, 0xa4, 0xde, 0x57, 0xda, 0xba, 0x6a, 0x7e, 0xe4,
|
||||
0xbe, 0xd7, 0x1e, 0x7a, 0xa4, 0xb1, 0x67, 0xba, 0x5b, 0xdc, 0x00, 0x7b, 0x93, 0x4c, 0x65, 0xa6,
|
||||
0x7b, 0xcf, 0x0e, 0xfa, 0xc3, 0x5b, 0xae, 0xbe, 0xf4, 0x96, 0x7b, 0xa6, 0x7b, 0x34, 0xe8, 0x43,
|
||||
0xd4, 0xc8, 0x8a, 0x0f, 0x64, 0x92, 0x99, 0x6e, 0x3b, 0x2c, 0x2b, 0x17, 0x06, 0x5b, 0x25, 0x81,
|
||||
0x15, 0x19, 0x18, 0xcb, 0xb3, 0xfe, 0x8a, 0x7f, 0xc9, 0xbb, 0x5c, 0x8f, 0xc6, 0x00, 0xbb, 0x48,
|
||||
0xa6, 0x8c, 0xca, 0x75, 0x0c, 0xed, 0x70, 0xa5, 0xee, 0xd2, 0x46, 0xf6, 0xda, 0x35, 0x12, 0xec,
|
||||
0x99, 0xee, 0x2e, 0xf0, 0x04, 0x34, 0x7b, 0x85, 0xd4, 0x8f, 0xb9, 0x29, 0x18, 0x4d, 0xff, 0x37,
|
||||
0x23, 0xbc, 0x41, 0xe4, 0x22, 0xd7, 0x3f, 0xac, 0x93, 0x60, 0xd4, 0x09, 0x36, 0x4d, 0x1a, 0x9d,
|
||||
0x3c, 0x8e, 0xc1, 0x18, 0x3a, 0xc1, 0x16, 0xc8, 0xdc, 0x6d, 0x09, 0x67, 0x7d, 0x88, 0x2d, 0x24,
|
||||
0x2e, 0x86, 0x7a, 0x6c, 0x9e, 0xcc, 0xb4, 0x94, 0x94, 0x10, 0xdb, 0x6d, 0x2e, 0x52, 0x48, 0x68,
|
||||
0x8d, 0x2d, 0x12, 0x7a, 0x08, 0x3a, 0x13, 0xc6, 0x08, 0x25, 0x43, 0x90, 0x02, 0x12, 0xea, 0xb3,
|
||||
0xf3, 0x64, 0xa1, 0xa5, 0xd2, 0x14, 0x62, 0x2b, 0x94, 0xdc, 0x57, 0xf6, 0xc6, 0x99, 0x30, 0xd6,
|
||||
0xd0, 0x3a, 0x96, 0x6d, 0xa7, 0x29, 0x74, 0x79, 0x7a, 0x5d, 0x77, 0xf3, 0x0c, 0xa4, 0xa5, 0xe7,
|
||||
0xb0, 0x46, 0x09, 0x86, 0x22, 0x03, 0x89, 0x95, 0x68, 0xa3, 0x82, 0xb6, 0x65, 0x02, 0x67, 0xa8,
|
||||
0x1f, 0x9d, 0x62, 0x17, 0xc8, 0x52, 0x89, 0x56, 0x0e, 0xe0, 0x19, 0xd0, 0x80, 0xcd, 0x91, 0xe9,
|
||||
0xd2, 0x75, 0x74, 0x70, 0x78, 0x93, 0x92, 0x4a, 0x85, 0x48, 0x3d, 0x88, 0x20, 0x56, 0x3a, 0xa1,
|
||||
0xd3, 0x15, 0x0a, 0x77, 0x20, 0xb6, 0x4a, 0xb7, 0x43, 0xda, 0x44, 0xc2, 0x25, 0xd8, 0x01, 0xae,
|
||||
0xe3, 0x5e, 0x04, 0x26, 0x4f, 0x2d, 0x9d, 0x61, 0x94, 0x34, 0xb7, 0x45, 0x0a, 0xfb, 0xca, 0x6e,
|
||||
0xab, 0x5c, 0x26, 0x74, 0x96, 0xcd, 0x12, 0xb2, 0x07, 0x96, 0x97, 0x0a, 0xcc, 0xe1, 0xb1, 0x2d,
|
||||
0x1e, 0xf7, 0xa0, 0x04, 0x28, 0x5b, 0x26, 0xac, 0xc5, 0xa5, 0x54, 0xb6, 0xa5, 0x81, 0x5b, 0xd8,
|
||||
0x56, 0x69, 0x02, 0x9a, 0xce, 0x23, 0x9d, 0x17, 0x70, 0x91, 0x02, 0x65, 0xe3, 0xe8, 0x10, 0x52,
|
||||
0x18, 0x45, 0x2f, 0x8c, 0xa3, 0x4b, 0x1c, 0xa3, 0x17, 0x91, 0xfc, 0x56, 0x2e, 0xd2, 0xc4, 0x49,
|
||||
0x52, 0xb4, 0x65, 0x09, 0x39, 0x96, 0xe4, 0xf7, 0x6f, 0xb5, 0x3b, 0x47, 0x74, 0x99, 0x2d, 0x91,
|
||||
0xf9, 0x12, 0xd9, 0x03, 0xab, 0x45, 0xec, 0xc4, 0x3b, 0x8f, 0x54, 0x0f, 0x72, 0x7b, 0x70, 0xb2,
|
||||
0x07, 0x99, 0xd2, 0x03, 0xba, 0x82, 0x0d, 0x75, 0x95, 0x86, 0x2d, 0xa2, 0x17, 0x18, 0x23, 0x33,
|
||||
0x61, 0x18, 0xc1, 0x7b, 0x39, 0x18, 0x1b, 0xf1, 0x18, 0xe8, 0xaf, 0x8d, 0xf5, 0xbb, 0x84, 0xb8,
|
||||
0x30, 0x5c, 0x73, 0x60, 0x8c, 0xcc, 0x8e, 0xad, 0x7d, 0x25, 0x81, 0x4e, 0xb0, 0x26, 0x99, 0xba,
|
||||
0x2d, 0x85, 0x31, 0x39, 0x24, 0xd4, 0x43, 0x89, 0xda, 0xf2, 0x50, 0xab, 0x2e, 0x6e, 0x17, 0xad,
|
||||
0xa1, 0x77, 0x5b, 0x48, 0x61, 0x7a, 0x6e, 0x38, 0x08, 0x99, 0x2c, 0xb5, 0xaa, 0xaf, 0xdf, 0x25,
|
||||
0xcd, 0x0e, 0x74, 0x71, 0x0e, 0x8a, 0xda, 0x8b, 0x84, 0x56, 0xed, 0x71, 0xf5, 0x11, 0x43, 0x0f,
|
||||
0xe7, 0x74, 0x47, 0xab, 0x07, 0x42, 0x76, 0x69, 0x0d, 0x8b, 0x75, 0x80, 0xa7, 0xae, 0xf0, 0x34,
|
||||
0x69, 0x6c, 0xa7, 0xb9, 0x3b, 0xa5, 0xbe, 0xfe, 0x5b, 0xc3, 0xed, 0xab, 0x5b, 0xbb, 0x19, 0x12,
|
||||
0xdc, 0x96, 0x09, 0x9c, 0x08, 0x09, 0x09, 0x9d, 0x70, 0xd2, 0xba, 0x16, 0x8c, 0x47, 0x88, 0x26,
|
||||
0x78, 0xad, 0x50, 0xab, 0x7e, 0x05, 0x03, 0xd4, 0x67, 0x97, 0x9b, 0x0a, 0x74, 0x82, 0xfd, 0x0a,
|
||||
0xc1, 0xc4, 0x5a, 0x1c, 0x57, 0xd3, 0xbb, 0xd8, 0x99, 0x4e, 0x4f, 0x3d, 0x18, 0x63, 0x86, 0xf6,
|
||||
0xf0, 0xa4, 0x1d, 0xb0, 0x9d, 0x81, 0xb1, 0x90, 0xb5, 0x94, 0x3c, 0x11, 0x5d, 0x43, 0x05, 0x9e,
|
||||
0x74, 0x4b, 0xf1, 0xa4, 0x92, 0xfe, 0x2e, 0x76, 0x2c, 0x82, 0x14, 0xb8, 0xa9, 0x56, 0xbd, 0xcf,
|
||||
0x16, 0xc9, 0x5c, 0x41, 0xf5, 0x90, 0x6b, 0x2b, 0x1c, 0xf8, 0xad, 0xe7, 0x7a, 0xa4, 0x55, 0x7f,
|
||||
0x8c, 0x7d, 0x87, 0xbb, 0xd9, 0xdc, 0xe5, 0x66, 0x0c, 0x7d, 0xef, 0xb1, 0x65, 0x32, 0x3f, 0xa4,
|
||||
0x3a, 0xc6, 0x7f, 0xf0, 0xd8, 0x02, 0x99, 0x45, 0xaa, 0x23, 0xcc, 0xd0, 0x1f, 0x1d, 0x88, 0xa4,
|
||||
0x2a, 0xe0, 0x4f, 0xae, 0x42, 0xc9, 0xaa, 0x82, 0xff, 0xec, 0x0e, 0xc3, 0x0a, 0x65, 0xab, 0x0c,
|
||||
0x7d, 0xe4, 0x21, 0xd3, 0xe1, 0x61, 0x25, 0x4c, 0x1f, 0xbb, 0x40, 0xac, 0x3a, 0x0a, 0x7c, 0xe2,
|
||||
0x02, 0xcb, 0x9a, 0x23, 0xf4, 0xa9, 0x43, 0x77, 0xb9, 0x4c, 0xd4, 0xc9, 0xc9, 0x08, 0x7d, 0xe6,
|
||||
0xb1, 0x15, 0xb2, 0x80, 0xe9, 0x5b, 0x3c, 0xe5, 0x32, 0x1e, 0xc7, 0x3f, 0xf7, 0x18, 0x25, 0xd3,
|
||||
0x85, 0x30, 0x6e, 0x14, 0xe9, 0x47, 0x35, 0x27, 0x4a, 0x49, 0xa0, 0xc0, 0x3e, 0xae, 0xb1, 0x59,
|
||||
0x12, 0xa0, 0x50, 0x85, 0xfd, 0x49, 0x8d, 0x4d, 0x93, 0xc9, 0xb6, 0x34, 0xa0, 0x2d, 0x7d, 0x1f,
|
||||
0xc7, 0x65, 0xb2, 0xd8, 0x2d, 0xfa, 0x01, 0x0e, 0xe5, 0x39, 0x37, 0x3b, 0xf4, 0xa1, 0x73, 0x14,
|
||||
0xaf, 0x00, 0xfd, 0xdd, 0x77, 0x57, 0xad, 0x3e, 0x09, 0x7f, 0xf8, 0x78, 0xd2, 0x0e, 0xd8, 0xf1,
|
||||
0x0e, 0xd0, 0x3f, 0x7d, 0x76, 0x91, 0x2c, 0x0d, 0x31, 0xb7, 0xa0, 0xa3, 0xe9, 0xff, 0xcb, 0x67,
|
||||
0xab, 0xe4, 0xfc, 0x0e, 0xd8, 0x71, 0x5f, 0x31, 0x49, 0x18, 0x2b, 0x62, 0x43, 0xff, 0xf6, 0xd9,
|
||||
0xff, 0xc8, 0xf2, 0x0e, 0xd8, 0x91, 0xbe, 0x15, 0xe7, 0x3f, 0x3e, 0x9b, 0x21, 0x53, 0x11, 0x6e,
|
||||
0x30, 0x9c, 0x02, 0x7d, 0xe4, 0x63, 0x93, 0x86, 0x66, 0x49, 0xe7, 0xb1, 0x8f, 0xd2, 0xbd, 0xcd,
|
||||
0x6d, 0xdc, 0x0b, 0xb3, 0x56, 0x8f, 0x4b, 0x09, 0xa9, 0xa1, 0x4f, 0x7c, 0xb6, 0x44, 0x68, 0x04,
|
||||
0x99, 0x3a, 0x85, 0x0a, 0xfc, 0x14, 0x5f, 0x66, 0xe6, 0x82, 0xdf, 0xca, 0x41, 0x0f, 0x46, 0x8e,
|
||||
0x67, 0x3e, 0x4a, 0x5d, 0xc4, 0xbf, 0xe8, 0x79, 0xee, 0xa3, 0xd4, 0xa5, 0xf2, 0x6d, 0x79, 0xa2,
|
||||
0xe8, 0x2f, 0x75, 0x64, 0x75, 0x24, 0x32, 0x38, 0x12, 0xf1, 0x7d, 0xfa, 0x69, 0x80, 0xac, 0x5c,
|
||||
0xd2, 0xbe, 0x4a, 0x00, 0xe9, 0x1b, 0xfa, 0x59, 0x80, 0xd2, 0x63, 0xeb, 0x0a, 0xe9, 0x3f, 0x77,
|
||||
0x76, 0xf9, 0xaa, 0xb4, 0x43, 0xfa, 0x05, 0xbe, 0xd6, 0xa4, 0xb4, 0x8f, 0x3a, 0x07, 0xf4, 0xcb,
|
||||
0x00, 0xaf, 0x71, 0x3d, 0x4d, 0x55, 0xcc, 0xed, 0x68, 0x80, 0xbe, 0x0a, 0x70, 0x02, 0x2b, 0x0f,
|
||||
0x42, 0x29, 0xcc, 0xd7, 0x01, 0x5e, 0xaf, 0xc4, 0x5d, 0xdb, 0x42, 0x7c, 0x28, 0xbe, 0x09, 0xd6,
|
||||
0xd7, 0x48, 0x23, 0x34, 0xa9, 0x5b, 0xfa, 0x06, 0xf1, 0x43, 0x93, 0xd2, 0x09, 0x7c, 0x8d, 0xb6,
|
||||
0x94, 0x4a, 0x6f, 0x9c, 0xf5, 0xf5, 0x9d, 0x57, 0xa9, 0xb7, 0xf5, 0xfa, 0x3b, 0x57, 0xbb, 0xc2,
|
||||
0xf6, 0xf2, 0x63, 0xfc, 0x29, 0x6e, 0x16, 0x7f, 0xc9, 0x2b, 0x42, 0x95, 0x5f, 0x9b, 0x42, 0x5a,
|
||||
0xd0, 0x92, 0xa7, 0x9b, 0xee, 0xc7, 0xb9, 0x59, 0xfc, 0x38, 0xfb, 0xc7, 0xc7, 0x93, 0xce, 0xbe,
|
||||
0xfa, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x16, 0x2d, 0x7a, 0x6b, 0x12, 0x09, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ option go_package = "github.com/milvus-io/milvus/internal/proto/querypb";
|
|||
import "common.proto";
|
||||
import "milvus.proto";
|
||||
import "internal.proto";
|
||||
import "data_service.proto";
|
||||
import "schema.proto";
|
||||
|
||||
service QueryService {
|
||||
|
@ -45,6 +44,7 @@ service QueryNode {
|
|||
rpc GetSegmentInfo(GetSegmentInfoRequest) returns (GetSegmentInfoResponse) {}
|
||||
}
|
||||
|
||||
//--------------------query service proto------------------
|
||||
message RegisterNodeRequest {
|
||||
common.MsgBase base = 1;
|
||||
common.Address address = 2;
|
||||
|
@ -65,19 +65,6 @@ message ShowCollectionsResponse {
|
|||
repeated int64 collectionIDs = 2;
|
||||
}
|
||||
|
||||
message LoadCollectionRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
int64 collectionID = 3;
|
||||
schema.CollectionSchema schema = 4;
|
||||
}
|
||||
|
||||
message ReleaseCollectionRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
int64 collectionID = 3;
|
||||
}
|
||||
|
||||
message ShowPartitionsRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
|
@ -89,31 +76,17 @@ message ShowPartitionsResponse {
|
|||
repeated int64 partitionIDs = 2;
|
||||
}
|
||||
|
||||
enum PartitionState {
|
||||
NotExist = 0;
|
||||
NotPresent = 1;
|
||||
OnDisk = 2;
|
||||
PartialInMemory = 3;
|
||||
InMemory = 4;
|
||||
PartialInGPU = 5;
|
||||
InGPU = 6;
|
||||
}
|
||||
|
||||
message PartitionStates {
|
||||
int64 partitionID = 1;
|
||||
PartitionState state = 2;
|
||||
}
|
||||
|
||||
message GetPartitionStatesRequest {
|
||||
message LoadCollectionRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
int64 collectionID = 3;
|
||||
repeated int64 partitionIDs = 4;
|
||||
schema.CollectionSchema schema = 4;
|
||||
}
|
||||
|
||||
message GetPartitionStatesResponse {
|
||||
common.Status status = 1;
|
||||
repeated PartitionStates partition_descriptions = 2;
|
||||
message ReleaseCollectionRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
int64 collectionID = 3;
|
||||
}
|
||||
|
||||
message LoadPartitionsRequest {
|
||||
|
@ -131,65 +104,41 @@ message ReleasePartitionsRequest {
|
|||
repeated int64 partitionIDs = 4;
|
||||
}
|
||||
|
||||
message CreateQueryChannelRequest {
|
||||
int64 collectionID = 1;
|
||||
}
|
||||
|
||||
message CreateQueryChannelResponse {
|
||||
common.Status status = 1;
|
||||
string request_channel = 2;
|
||||
string result_channel = 3;
|
||||
}
|
||||
|
||||
message AddQueryChannelRequest {
|
||||
common.MsgBase base = 1;
|
||||
string request_channelID = 2;
|
||||
string result_channelID = 3;
|
||||
}
|
||||
|
||||
message RemoveQueryChannelRequest {
|
||||
common.Status status = 1;
|
||||
common.MsgBase base = 2;
|
||||
string request_channelID = 3;
|
||||
string result_channelID = 4;
|
||||
}
|
||||
|
||||
message WatchDmChannelInfo {
|
||||
string channelID = 1;
|
||||
internal.MsgPosition pos = 2;
|
||||
repeated int64 excluded_segments = 3;
|
||||
}
|
||||
|
||||
message WatchDmChannelsRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 collectionID = 2;
|
||||
repeated string channelIDs = 3;
|
||||
repeated WatchDmChannelInfo infos = 4;
|
||||
}
|
||||
|
||||
message LoadSegmentsRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
int64 collectionID = 3;
|
||||
int64 partitionID = 4;
|
||||
repeated int64 segmentIDs = 5;
|
||||
repeated int64 fieldIDs = 6;
|
||||
repeated data.SegmentStateInfo segment_states = 7;
|
||||
schema.CollectionSchema schema = 8;
|
||||
}
|
||||
|
||||
message ReleaseSegmentsRequest {
|
||||
message GetPartitionStatesRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 dbID = 2;
|
||||
int64 collectionID = 3;
|
||||
repeated int64 partitionIDs = 4;
|
||||
repeated int64 segmentIDs = 5;
|
||||
}
|
||||
|
||||
message SegmentInfo {
|
||||
int64 segmentID = 1;
|
||||
int64 collectionID = 2;
|
||||
int64 partitionID = 3;
|
||||
int64 mem_size = 4;
|
||||
int64 num_rows = 5;
|
||||
string index_name = 6;
|
||||
int64 indexID = 7;
|
||||
enum PartitionState {
|
||||
NotExist = 0;
|
||||
NotPresent = 1;
|
||||
OnDisk = 2;
|
||||
PartialInMemory = 3;
|
||||
InMemory = 4;
|
||||
PartialInGPU = 5;
|
||||
InGPU = 6;
|
||||
}
|
||||
|
||||
message PartitionStates {
|
||||
int64 partitionID = 1;
|
||||
PartitionState state = 2;
|
||||
}
|
||||
|
||||
message GetPartitionStatesResponse {
|
||||
common.Status status = 1;
|
||||
repeated PartitionStates partition_descriptions = 2;
|
||||
}
|
||||
|
||||
message GetSegmentInfoRequest {
|
||||
|
@ -197,10 +146,183 @@ message GetSegmentInfoRequest {
|
|||
repeated int64 segmentIDs = 2;
|
||||
}
|
||||
|
||||
message SegmentInfo {
|
||||
int64 segmentID = 1;
|
||||
int64 collectionID = 2;
|
||||
int64 partitionID = 3;
|
||||
int64 nodeID = 4;
|
||||
int64 mem_size = 5;
|
||||
int64 num_rows = 6;
|
||||
string index_name = 7;
|
||||
int64 indexID = 8;
|
||||
string channelID = 9;
|
||||
SegmentState segment_state = 10;
|
||||
}
|
||||
|
||||
message GetSegmentInfoResponse {
|
||||
common.Status status = 1;
|
||||
repeated SegmentInfo infos = 2;
|
||||
}
|
||||
message CreateQueryChannelRequest {
|
||||
|
||||
//-----------------query node proto----------------
|
||||
message AddQueryChannelRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 nodeID = 2;
|
||||
int64 collectionID = 3;
|
||||
string request_channelID = 4;
|
||||
string result_channelID = 5;
|
||||
}
|
||||
|
||||
message RemoveQueryChannelRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 nodeID = 2;
|
||||
int64 collectionID = 3;
|
||||
string request_channelID = 4;
|
||||
string result_channelID = 5;
|
||||
}
|
||||
|
||||
//message excludedSegmentInfo {
|
||||
// int64 segmentID = 1;
|
||||
// internal.MsgPosition pos = 2;
|
||||
//}
|
||||
|
||||
//message WatchDmChannelInfo {
|
||||
// string channelID = 1;
|
||||
// internal.MsgPosition pos = 2;
|
||||
//}
|
||||
|
||||
message WatchDmChannelsRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 nodeID = 2;
|
||||
int64 collectionID = 3;
|
||||
int64 partitionID = 4;
|
||||
repeated VchannelInfo infos = 5;
|
||||
schema.CollectionSchema schema = 6;
|
||||
repeated CheckPoint exclude_infos = 7;
|
||||
}
|
||||
|
||||
enum TriggerCondition {
|
||||
handoff = 0;
|
||||
loadBalance = 1;
|
||||
grpcRequest = 2;
|
||||
nodeDown = 3;
|
||||
}
|
||||
|
||||
//message FieldBinlogPath {
|
||||
// int64 filedID = 1;
|
||||
// repeated string binlog_path = 2;
|
||||
//}
|
||||
|
||||
//used for handoff task
|
||||
message SegmentLoadInfo {
|
||||
int64 segmentID = 1;
|
||||
int64 partitionID = 2;
|
||||
int64 collectionID = 3;
|
||||
int64 dbID = 4;
|
||||
int64 flush_time = 5;
|
||||
repeated FieldBinlog binlog_paths = 6;
|
||||
}
|
||||
|
||||
message LoadSegmentsRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 nodeID = 2;
|
||||
repeated SegmentLoadInfo infos = 3;
|
||||
schema.CollectionSchema schema = 4;
|
||||
TriggerCondition load_condition = 5;
|
||||
}
|
||||
|
||||
message ReleaseSegmentsRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 nodeID = 2;
|
||||
int64 dbID = 3;
|
||||
int64 collectionID = 4;
|
||||
repeated int64 partitionIDs = 5;
|
||||
repeated int64 segmentIDs = 6;
|
||||
}
|
||||
|
||||
//----------------etcd-----------------
|
||||
enum SegmentState {
|
||||
None = 0;
|
||||
Growing = 1;
|
||||
Frozen = 2;
|
||||
sealing = 3;
|
||||
sealed = 4;
|
||||
}
|
||||
|
||||
message DmChannelInfo {
|
||||
int64 nodeID_loaded = 1;
|
||||
repeated string channelIDs = 2;
|
||||
}
|
||||
|
||||
message QueryChannelInfo {
|
||||
int64 collectionID = 1;
|
||||
string query_channelID = 2;
|
||||
string query_result_channelID = 3;
|
||||
}
|
||||
|
||||
message CollectionInfo {
|
||||
int64 collectionID = 1;
|
||||
repeated int64 partitionIDs = 2;
|
||||
repeated DmChannelInfo channel_infos = 3;
|
||||
schema.CollectionSchema schema = 6;
|
||||
}
|
||||
|
||||
message HandoffSegments {
|
||||
common.MsgBase base = 1;
|
||||
repeated SegmentLoadInfo infos = 2;
|
||||
}
|
||||
|
||||
message LoadBalanceSegmentInfo {
|
||||
int64 segmentID = 1;
|
||||
int64 partitionID = 2;
|
||||
int64 collectionID = 3;
|
||||
int64 source_nodeID = 4;
|
||||
int64 dst_nodeID = 5;
|
||||
bool source_done = 6;
|
||||
bool dst_done = 7;
|
||||
bool valid_info = 8;
|
||||
}
|
||||
|
||||
message LoadBalanceSegments {
|
||||
common.MsgBase base = 1;
|
||||
repeated LoadBalanceSegmentInfo infos = 2;
|
||||
}
|
||||
|
||||
//--------------temp used, delete after data_service.proto update-----
|
||||
message CheckPoint {
|
||||
int64 segmentID = 1;
|
||||
internal.MsgPosition position = 2;
|
||||
int64 num_of_rows = 3;
|
||||
}
|
||||
|
||||
message VchannelInfo {
|
||||
int64 collectionID = 1;
|
||||
string channelName = 2;
|
||||
internal.MsgPosition seek_position = 3;
|
||||
repeated CheckPoint checkPoints = 4;
|
||||
repeated int64 flushedSegments = 5;
|
||||
}
|
||||
|
||||
message SegmentBinlogs {
|
||||
int64 segmentID = 1;
|
||||
repeated FieldBinlog fieldBinlogs = 2;
|
||||
}
|
||||
|
||||
message FieldBinlog{
|
||||
int64 fieldID = 1;
|
||||
repeated string binlogs = 2;
|
||||
}
|
||||
|
||||
message GetRecoveryInfoResponse {
|
||||
common.MsgBase base = 1;
|
||||
repeated VchannelInfo channels = 2;
|
||||
repeated SegmentBinlogs binlogs = 3;
|
||||
}
|
||||
|
||||
message GetRecoveryInfoRequest {
|
||||
common.MsgBase base = 1;
|
||||
int64 collectionID = 2;
|
||||
int64 partitionID = 3;
|
||||
}
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
|
@ -159,7 +160,7 @@ func (node *ProxyNode) Init() error {
|
|||
}
|
||||
|
||||
if node.queryService != nil {
|
||||
resp, err := node.queryService.CreateQueryChannel(ctx)
|
||||
resp, err := node.queryService.CreateQueryChannel(ctx, &querypb.CreateQueryChannelRequest{})
|
||||
if err != nil {
|
||||
log.Debug("ProxyNode CreateQueryChannel failed", zap.Error(err))
|
||||
return err
|
||||
|
|
|
@ -1142,8 +1142,9 @@ func (st *SearchTask) Execute(ctx context.Context) error {
|
|||
err := st.queryMsgStream.Produce(&msgPack)
|
||||
log.Debug("proxynode", zap.Int("length of searchMsg", len(msgPack.Msgs)))
|
||||
log.Debug("proxy node sent one searchMsg",
|
||||
zap.Any("collectionID", st.CollectionID),
|
||||
zap.Any("msgID", tsMsg.ID()),
|
||||
zap.Any("collectionID", st.CollectionID))
|
||||
)
|
||||
if err != nil {
|
||||
log.Debug("proxynode", zap.String("send search request failed", err.Error()))
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ package querynode
|
|||
*/
|
||||
import "C"
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
@ -38,6 +40,9 @@ type Collection struct {
|
|||
partitionIDs []UniqueID
|
||||
schema *schemapb.CollectionSchema
|
||||
watchedChannels []VChannel
|
||||
|
||||
releaseMu sync.RWMutex // guards releaseTime
|
||||
releaseTime Timestamp
|
||||
}
|
||||
|
||||
func (c *Collection) ID() UniqueID {
|
||||
|
@ -73,6 +78,18 @@ func (c *Collection) getWatchedDmChannels() []VChannel {
|
|||
return c.watchedChannels
|
||||
}
|
||||
|
||||
func (c *Collection) setReleaseTime(t Timestamp) {
|
||||
c.releaseMu.Lock()
|
||||
defer c.releaseMu.Unlock()
|
||||
c.releaseTime = t
|
||||
}
|
||||
|
||||
func (c *Collection) getReleaseTime() Timestamp {
|
||||
c.releaseMu.RLock()
|
||||
defer c.releaseMu.RUnlock()
|
||||
return c.releaseTime
|
||||
}
|
||||
|
||||
func newCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) *Collection {
|
||||
/*
|
||||
CCollection
|
||||
|
@ -93,6 +110,7 @@ func newCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) *Co
|
|||
|
||||
log.Debug("create collection", zap.Int64("collectionID", collectionID))
|
||||
|
||||
newCollection.setReleaseTime(Timestamp(math.MaxUint64))
|
||||
return newCollection
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import "C"
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
|
@ -52,9 +53,7 @@ type ReplicaInterface interface {
|
|||
hasCollection(collectionID UniqueID) bool
|
||||
getCollectionNum() int
|
||||
getPartitionIDs(collectionID UniqueID) ([]UniqueID, error)
|
||||
|
||||
getVecFieldIDsByCollectionID(collectionID UniqueID) ([]int64, error)
|
||||
getFieldIDsByCollectionID(collectionID UniqueID) ([]int64, error)
|
||||
|
||||
// partition
|
||||
addPartition(collectionID UniqueID, partitionID UniqueID) error
|
||||
|
@ -63,29 +62,23 @@ type ReplicaInterface interface {
|
|||
hasPartition(partitionID UniqueID) bool
|
||||
getPartitionNum() int
|
||||
getSegmentIDs(partitionID UniqueID) ([]UniqueID, error)
|
||||
|
||||
enablePartition(partitionID UniqueID) error
|
||||
disablePartition(partitionID UniqueID) error
|
||||
getSegmentIDsByVChannel(partitionID UniqueID, vChannel VChannel) ([]UniqueID, error)
|
||||
|
||||
// segment
|
||||
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error
|
||||
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID VChannel, segType segmentType, onService bool) error
|
||||
setSegment(segment *Segment) error
|
||||
removeSegment(segmentID UniqueID) error
|
||||
getSegmentByID(segmentID UniqueID) (*Segment, error)
|
||||
hasSegment(segmentID UniqueID) bool
|
||||
getSegmentNum() int
|
||||
setSegmentEnableIndex(segmentID UniqueID, enable bool) error
|
||||
setSegmentEnableLoadBinLog(segmentID UniqueID, enable bool) error
|
||||
getSegmentsToLoadBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
|
||||
getSegmentStatistics() []*internalpb.SegmentStats
|
||||
|
||||
// excluded segments
|
||||
initExcludedSegments(collectionID UniqueID)
|
||||
removeExcludedSegments(collectionID UniqueID)
|
||||
addExcludedSegments(collectionID UniqueID, segmentIDs []UniqueID) error
|
||||
getExcludedSegments(collectionID UniqueID) ([]UniqueID, error)
|
||||
addExcludedSegments(collectionID UniqueID, segmentInfos []*querypb.CheckPoint) error
|
||||
getExcludedSegments(collectionID UniqueID) ([]*querypb.CheckPoint, error)
|
||||
|
||||
getEnabledSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
|
||||
getSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
|
||||
replaceGrowingSegmentBySealedSegment(segment *Segment) error
|
||||
|
||||
|
@ -98,7 +91,7 @@ type collectionReplica struct {
|
|||
partitions map[UniqueID]*Partition
|
||||
segments map[UniqueID]*Segment
|
||||
|
||||
excludedSegments map[UniqueID][]UniqueID // map[collectionID]segmentIDs
|
||||
excludedSegments map[UniqueID][]*querypb.CheckPoint // map[collectionID]segmentIDs
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------- collection
|
||||
|
@ -217,26 +210,6 @@ func (colReplica *collectionReplica) getVecFieldIDsByCollectionID(collectionID U
|
|||
return vecFields, nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getFieldIDsByCollectionID(collectionID UniqueID) ([]int64, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
fields, err := colReplica.getFieldsByCollectionIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targetFields := make([]int64, 0)
|
||||
for _, field := range fields {
|
||||
targetFields = append(targetFields, field.FieldID)
|
||||
}
|
||||
|
||||
// add row id field
|
||||
targetFields = append(targetFields, rowIDFieldID)
|
||||
|
||||
return targetFields, nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getFieldsByCollectionIDPrivate(collectionID UniqueID) ([]*schemapb.FieldSchema, error) {
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
|
@ -336,6 +309,27 @@ func (colReplica *collectionReplica) getSegmentIDs(partitionID UniqueID) ([]Uniq
|
|||
return colReplica.getSegmentIDsPrivate(partitionID)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getSegmentIDsByVChannel(partitionID UniqueID, vChannel VChannel) ([]UniqueID, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
segmentIDs, err := colReplica.getSegmentIDsPrivate(partitionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segmentIDsTmp := make([]UniqueID, 0)
|
||||
for _, segmentID := range segmentIDs {
|
||||
segment, err := colReplica.getSegmentByIDPrivate(segmentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if segment.vChannelID == vChannel {
|
||||
segmentIDsTmp = append(segmentIDsTmp, segment.ID())
|
||||
}
|
||||
}
|
||||
|
||||
return segmentIDsTmp, nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getSegmentIDsPrivate(partitionID UniqueID) ([]UniqueID, error) {
|
||||
partition, err2 := colReplica.getPartitionByIDPrivate(partitionID)
|
||||
if err2 != nil {
|
||||
|
@ -344,52 +338,16 @@ func (colReplica *collectionReplica) getSegmentIDsPrivate(partitionID UniqueID)
|
|||
return partition.segmentIDs, nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) enablePartition(partitionID UniqueID) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
partition, err := colReplica.getPartitionByIDPrivate(partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partition.enable = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) disablePartition(partitionID UniqueID) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
partition, err := colReplica.getPartitionByIDPrivate(partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partition.enable = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getEnabledPartitionIDsPrivate() []UniqueID {
|
||||
partitionIDs := make([]UniqueID, 0)
|
||||
for _, partition := range colReplica.partitions {
|
||||
if partition.enable {
|
||||
partitionIDs = append(partitionIDs, partition.partitionID)
|
||||
}
|
||||
}
|
||||
return partitionIDs
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------- segment
|
||||
func (colReplica *collectionReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error {
|
||||
func (colReplica *collectionReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID VChannel, segType segmentType, onService bool) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var newSegment = newSegment(collection, segmentID, partitionID, collectionID, segType)
|
||||
return colReplica.addSegmentPrivate(segmentID, partitionID, newSegment)
|
||||
seg := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, onService)
|
||||
return colReplica.addSegmentPrivate(segmentID, partitionID, seg)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) addSegmentPrivate(segmentID UniqueID, partitionID UniqueID, segment *Segment) error {
|
||||
|
@ -499,35 +457,6 @@ func (colReplica *collectionReplica) getSegmentStatistics() []*internalpb.Segmen
|
|||
return statisticData
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getEnabledSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
targetCollectionIDs := make([]UniqueID, 0)
|
||||
targetPartitionIDs := make([]UniqueID, 0)
|
||||
targetSegmentIDs := make([]UniqueID, 0)
|
||||
|
||||
for _, partitionID := range colReplica.getEnabledPartitionIDsPrivate() {
|
||||
segmentIDs, err := colReplica.getSegmentIDsPrivate(partitionID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, segmentID := range segmentIDs {
|
||||
segment, err := colReplica.getSegmentByIDPrivate(segmentID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if segment.getType() == segType {
|
||||
targetCollectionIDs = append(targetCollectionIDs, segment.collectionID)
|
||||
targetPartitionIDs = append(targetPartitionIDs, segment.partitionID)
|
||||
targetSegmentIDs = append(targetSegmentIDs, segment.segmentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return targetCollectionIDs, targetPartitionIDs, targetSegmentIDs
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
@ -572,39 +501,11 @@ func (colReplica *collectionReplica) replaceGrowingSegmentBySealedSegment(segmen
|
|||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) setSegmentEnableIndex(segmentID UniqueID, enable bool) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
targetSegment, err := colReplica.getSegmentByIDPrivate(segmentID)
|
||||
if targetSegment.segmentType != segmentTypeSealed {
|
||||
return errors.New("unexpected segment type")
|
||||
}
|
||||
if err == nil && targetSegment != nil {
|
||||
targetSegment.setEnableIndex(enable)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) setSegmentEnableLoadBinLog(segmentID UniqueID, enable bool) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
targetSegment, err := colReplica.getSegmentByIDPrivate(segmentID)
|
||||
if targetSegment.segmentType != segmentTypeGrowing {
|
||||
return errors.New("unexpected segment type")
|
||||
}
|
||||
if err == nil && targetSegment != nil {
|
||||
targetSegment.setLoadBinLogEnable(enable)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) initExcludedSegments(collectionID UniqueID) {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
colReplica.excludedSegments[collectionID] = make([]UniqueID, 0)
|
||||
colReplica.excludedSegments[collectionID] = make([]*querypb.CheckPoint, 0)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) removeExcludedSegments(collectionID UniqueID) {
|
||||
|
@ -614,7 +515,7 @@ func (colReplica *collectionReplica) removeExcludedSegments(collectionID UniqueI
|
|||
delete(colReplica.excludedSegments, collectionID)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID, segmentIDs []UniqueID) error {
|
||||
func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID, segmentInfos []*querypb.CheckPoint) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
|
@ -622,11 +523,11 @@ func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID,
|
|||
return errors.New("addExcludedSegments failed, cannot found collection, id =" + fmt.Sprintln(collectionID))
|
||||
}
|
||||
|
||||
colReplica.excludedSegments[collectionID] = append(colReplica.excludedSegments[collectionID], segmentIDs...)
|
||||
colReplica.excludedSegments[collectionID] = append(colReplica.excludedSegments[collectionID], segmentInfos...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getExcludedSegments(collectionID UniqueID) ([]UniqueID, error) {
|
||||
func (colReplica *collectionReplica) getExcludedSegments(collectionID UniqueID) ([]*querypb.CheckPoint, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
|
@ -650,37 +551,11 @@ func (colReplica *collectionReplica) freeAll() {
|
|||
colReplica.segments = make(map[UniqueID]*Segment)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplica) getSegmentsToLoadBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
targetCollectionIDs := make([]UniqueID, 0)
|
||||
targetPartitionIDs := make([]UniqueID, 0)
|
||||
targetSegmentIDs := make([]UniqueID, 0)
|
||||
|
||||
for _, segment := range colReplica.segments {
|
||||
if !segment.enableLoadBinLog {
|
||||
continue
|
||||
}
|
||||
if segment.getType() == segType {
|
||||
if segType == segmentTypeSealed && !segment.getEnableIndex() {
|
||||
continue
|
||||
}
|
||||
|
||||
targetCollectionIDs = append(targetCollectionIDs, segment.collectionID)
|
||||
targetPartitionIDs = append(targetPartitionIDs, segment.partitionID)
|
||||
targetSegmentIDs = append(targetSegmentIDs, segment.segmentID)
|
||||
}
|
||||
}
|
||||
|
||||
return targetCollectionIDs, targetPartitionIDs, targetSegmentIDs
|
||||
}
|
||||
|
||||
func newCollectionReplica() ReplicaInterface {
|
||||
collections := make(map[UniqueID]*Collection)
|
||||
partitions := make(map[UniqueID]*Partition)
|
||||
segments := make(map[UniqueID]*Segment)
|
||||
excludedSegments := make(map[UniqueID][]UniqueID)
|
||||
excludedSegments := make(map[UniqueID][]*querypb.CheckPoint)
|
||||
|
||||
var replica ReplicaInterface = &collectionReplica{
|
||||
collections: collections,
|
||||
|
|
|
@ -172,7 +172,7 @@ func TestCollectionReplica_addSegment(t *testing.T) {
|
|||
|
||||
const segmentNum = 3
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -191,7 +191,7 @@ func TestCollectionReplica_removeSegment(t *testing.T) {
|
|||
const segmentNum = 3
|
||||
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -212,7 +212,7 @@ func TestCollectionReplica_getSegmentByID(t *testing.T) {
|
|||
const segmentNum = 3
|
||||
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -231,7 +231,7 @@ func TestCollectionReplica_hasSegment(t *testing.T) {
|
|||
const segmentNum = 3
|
||||
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -255,36 +255,34 @@ func TestCollectionReplica_freeAll(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestReplaceGrowingSegmentBySealedSegment(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
collectionID := UniqueID(0)
|
||||
segmentID := UniqueID(520)
|
||||
initTestMeta(t, node, collectionID, segmentID)
|
||||
|
||||
_, _, segIDs := node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
|
||||
assert.Equal(t, len(segIDs), 1)
|
||||
|
||||
collection, err := node.historical.replica.getCollectionByID(collectionID)
|
||||
assert.NoError(t, err)
|
||||
ns := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeSealed)
|
||||
err = node.historical.replica.replaceGrowingSegmentBySealedSegment(ns)
|
||||
assert.NoError(t, err)
|
||||
err = node.historical.replica.setSegmentEnableIndex(segmentID, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
segmentNums := node.historical.replica.getSegmentNum()
|
||||
assert.Equal(t, segmentNums, 1)
|
||||
|
||||
segment, err := node.historical.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, segment.getType(), segmentTypeSealed)
|
||||
|
||||
_, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
|
||||
assert.Equal(t, len(segIDs), 0)
|
||||
_, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeSealed)
|
||||
assert.Equal(t, len(segIDs), 1)
|
||||
|
||||
err = node.Stop()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
//func TestReplaceGrowingSegmentBySealedSegment(t *testing.T) {
|
||||
// node := newQueryNodeMock()
|
||||
// collectionID := UniqueID(0)
|
||||
// segmentID := UniqueID(520)
|
||||
// initTestMeta(t, node, collectionID, segmentID)
|
||||
//
|
||||
// _, _, segIDs := node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
|
||||
// assert.Equal(t, len(segIDs), 1)
|
||||
//
|
||||
// collection, err := node.historical.replica.getCollectionByID(collectionID)
|
||||
// assert.NoError(t, err)
|
||||
// ns := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeSealed, true)
|
||||
// err = node.historical.replica.replaceGrowingSegmentBySealedSegment(ns)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// segmentNums := node.historical.replica.getSegmentNum()
|
||||
// assert.Equal(t, segmentNums, 1)
|
||||
//
|
||||
// segment, err := node.historical.replica.getSegmentByID(segmentID)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// assert.Equal(t, segment.getType(), segmentTypeSealed)
|
||||
//
|
||||
// _, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
|
||||
// assert.Equal(t, len(segIDs), 0)
|
||||
// _, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeSealed)
|
||||
// assert.Equal(t, len(segIDs), 1)
|
||||
//
|
||||
// err = node.Stop()
|
||||
// assert.NoError(t, err)
|
||||
//}
|
||||
|
|
|
@ -33,9 +33,9 @@ const (
|
|||
type dataSyncService struct {
|
||||
ctx context.Context
|
||||
|
||||
mu sync.Mutex // guards FlowGraphs
|
||||
collectionFlowGraphs map[UniqueID][]*queryNodeFlowGraph // map[collectionID]flowGraphs
|
||||
partitionFlowGraphs map[UniqueID][]*queryNodeFlowGraph // map[partitionID]flowGraphs
|
||||
mu sync.Mutex // guards FlowGraphs
|
||||
collectionFlowGraphs map[UniqueID]map[VChannel]*queryNodeFlowGraph // map[collectionID]flowGraphs
|
||||
partitionFlowGraphs map[UniqueID]map[VChannel]*queryNodeFlowGraph // map[partitionID]flowGraphs
|
||||
|
||||
streamingReplica ReplicaInterface
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
|
@ -47,10 +47,9 @@ func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID,
|
|||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; ok {
|
||||
return errors.New("collection flow graph has been existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
|
||||
dsService.collectionFlowGraphs[collectionID] = make(map[VChannel]*queryNodeFlowGraph)
|
||||
}
|
||||
dsService.collectionFlowGraphs[collectionID] = make([]*queryNodeFlowGraph, 0)
|
||||
for _, vChannel := range vChannels {
|
||||
// collection flow graph doesn't need partition id
|
||||
partitionID := UniqueID(0)
|
||||
|
@ -62,7 +61,7 @@ func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID,
|
|||
dsService.tSafeReplica,
|
||||
vChannel,
|
||||
dsService.msFactory)
|
||||
dsService.collectionFlowGraphs[collectionID] = append(dsService.collectionFlowGraphs[collectionID], newFlowGraph)
|
||||
dsService.collectionFlowGraphs[collectionID][vChannel] = newFlowGraph
|
||||
log.Debug("add collection flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", vChannel))
|
||||
|
@ -70,27 +69,37 @@ func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) getCollectionFlowGraphs(collectionID UniqueID) ([]*queryNodeFlowGraph, error) {
|
||||
func (dsService *dataSyncService) getCollectionFlowGraphs(collectionID UniqueID, vChannels []string) (map[VChannel]*queryNodeFlowGraph, error) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
|
||||
return nil, errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
}
|
||||
return dsService.collectionFlowGraphs[collectionID], nil
|
||||
|
||||
tmpFGs := make(map[VChannel]*queryNodeFlowGraph)
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID][channel]; ok {
|
||||
tmpFGs[channel] = dsService.collectionFlowGraphs[collectionID][channel]
|
||||
}
|
||||
}
|
||||
|
||||
return tmpFGs, nil
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) startCollectionFlowGraph(collectionID UniqueID) error {
|
||||
func (dsService *dataSyncService) startCollectionFlowGraph(collectionID UniqueID, vChannels []string) error {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
|
||||
return errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
|
||||
}
|
||||
for _, fg := range dsService.collectionFlowGraphs[collectionID] {
|
||||
// start flow graph
|
||||
log.Debug("start flow graph", zap.Any("channel", fg.channel))
|
||||
go fg.flowGraph.Start()
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.collectionFlowGraphs[collectionID][channel]; ok {
|
||||
// start flow graph
|
||||
log.Debug("start collection flow graph", zap.Any("channel", channel))
|
||||
go dsService.collectionFlowGraphs[collectionID][channel].flowGraph.Start()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -114,10 +123,9 @@ func (dsService *dataSyncService) addPartitionFlowGraph(collectionID UniqueID, p
|
|||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; ok {
|
||||
return errors.New("partition flow graph has been existed, partitionID = " + fmt.Sprintln(partitionID))
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
|
||||
dsService.partitionFlowGraphs[partitionID] = make(map[VChannel]*queryNodeFlowGraph)
|
||||
}
|
||||
dsService.partitionFlowGraphs[partitionID] = make([]*queryNodeFlowGraph, 0)
|
||||
for _, vChannel := range vChannels {
|
||||
newFlowGraph := newQueryNodeFlowGraph(dsService.ctx,
|
||||
flowGraphTypePartition,
|
||||
|
@ -127,31 +135,42 @@ func (dsService *dataSyncService) addPartitionFlowGraph(collectionID UniqueID, p
|
|||
dsService.tSafeReplica,
|
||||
vChannel,
|
||||
dsService.msFactory)
|
||||
dsService.partitionFlowGraphs[partitionID] = append(dsService.partitionFlowGraphs[partitionID], newFlowGraph)
|
||||
dsService.partitionFlowGraphs[partitionID][vChannel] = newFlowGraph
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) getPartitionFlowGraphs(partitionID UniqueID) ([]*queryNodeFlowGraph, error) {
|
||||
func (dsService *dataSyncService) getPartitionFlowGraphs(partitionID UniqueID, vChannels []string) (map[VChannel]*queryNodeFlowGraph, error) {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
|
||||
return nil, errors.New("partition flow graph doesn't existed, partitionID = " + fmt.Sprintln(partitionID))
|
||||
}
|
||||
return dsService.partitionFlowGraphs[partitionID], nil
|
||||
|
||||
tmpFGs := make(map[VChannel]*queryNodeFlowGraph)
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID][channel]; ok {
|
||||
tmpFGs[channel] = dsService.partitionFlowGraphs[partitionID][channel]
|
||||
}
|
||||
}
|
||||
|
||||
return tmpFGs, nil
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) startPartitionFlowGraph(partitionID UniqueID) error {
|
||||
func (dsService *dataSyncService) startPartitionFlowGraph(partitionID UniqueID, vChannels []string) error {
|
||||
dsService.mu.Lock()
|
||||
defer dsService.mu.Unlock()
|
||||
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
|
||||
return errors.New("partition flow graph doesn't existed, partitionID = " + fmt.Sprintln(partitionID))
|
||||
}
|
||||
for _, fg := range dsService.partitionFlowGraphs[partitionID] {
|
||||
// start flow graph
|
||||
go fg.flowGraph.Start()
|
||||
for _, channel := range vChannels {
|
||||
if _, ok := dsService.partitionFlowGraphs[partitionID][channel]; ok {
|
||||
// start flow graph
|
||||
log.Debug("start partition flow graph", zap.Any("channel", channel))
|
||||
go dsService.partitionFlowGraphs[partitionID][channel].flowGraph.Start()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -177,8 +196,8 @@ func newDataSyncService(ctx context.Context,
|
|||
|
||||
return &dataSyncService{
|
||||
ctx: ctx,
|
||||
collectionFlowGraphs: make(map[UniqueID][]*queryNodeFlowGraph),
|
||||
partitionFlowGraphs: make(map[UniqueID][]*queryNodeFlowGraph),
|
||||
collectionFlowGraphs: make(map[UniqueID]map[VChannel]*queryNodeFlowGraph),
|
||||
partitionFlowGraphs: make(map[UniqueID]map[VChannel]*queryNodeFlowGraph),
|
||||
streamingReplica: streamingReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
msFactory: factory,
|
||||
|
@ -200,6 +219,6 @@ func (dsService *dataSyncService) close() {
|
|||
}
|
||||
}
|
||||
}
|
||||
dsService.collectionFlowGraphs = make(map[UniqueID][]*queryNodeFlowGraph)
|
||||
dsService.partitionFlowGraphs = make(map[UniqueID][]*queryNodeFlowGraph)
|
||||
dsService.collectionFlowGraphs = make(map[UniqueID]map[VChannel]*queryNodeFlowGraph)
|
||||
dsService.partitionFlowGraphs = make(map[UniqueID]map[VChannel]*queryNodeFlowGraph)
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||
channels := []VChannel{"0"}
|
||||
err = node.streaming.dataSyncService.addCollectionFlowGraph(collectionID, channels)
|
||||
assert.NoError(t, err)
|
||||
err = node.streaming.dataSyncService.startCollectionFlowGraph(collectionID)
|
||||
err = node.streaming.dataSyncService.startCollectionFlowGraph(collectionID, channels)
|
||||
assert.NoError(t, err)
|
||||
|
||||
<-node.queryNodeLoopCtx.Done()
|
||||
|
|
|
@ -94,29 +94,49 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
|
|||
// check if collection and partition exist
|
||||
collection := fdmNode.replica.hasCollection(msg.CollectionID)
|
||||
partition := fdmNode.replica.hasPartition(msg.PartitionID)
|
||||
if !collection || !partition {
|
||||
if fdmNode.graphType == flowGraphTypeCollection && !collection {
|
||||
log.Debug("filter invalid insert message, collection dose not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
if fdmNode.graphType == flowGraphTypePartition && !partition {
|
||||
log.Debug("filter invalid insert message, partition dose not exist",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the collection from message is target collection
|
||||
if msg.CollectionID != fdmNode.collectionID {
|
||||
log.Debug("filter invalid insert message, collection is not the target collection",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the flow graph type is partition, check if the partition is target partition
|
||||
if fdmNode.graphType == flowGraphTypePartition && msg.PartitionID != fdmNode.partitionID {
|
||||
log.Debug("filter invalid insert message, partition is not the target partition",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the segment is in excluded segments
|
||||
// Check if the segment is in excluded segments,
|
||||
// messages after seekPosition may contain the redundant data from flushed slice of segment,
|
||||
// so we need to compare the endTimestamp of received messages and position's timestamp.
|
||||
excludedSegments, err := fdmNode.replica.getExcludedSegments(fdmNode.collectionID)
|
||||
//log.Debug("excluded segments", zap.String("segmentIDs", fmt.Sprintln(excludedSegments)))
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return nil
|
||||
}
|
||||
for _, id := range excludedSegments {
|
||||
if msg.SegmentID == id {
|
||||
for _, segmentInfo := range excludedSegments {
|
||||
if msg.SegmentID == segmentInfo.SegmentID && msg.EndTs() < segmentInfo.Position.Timestamp {
|
||||
log.Debug("filter invalid insert message, segments are excluded segments",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -128,6 +148,9 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
|
|||
}
|
||||
|
||||
if len(msg.Timestamps) <= 0 {
|
||||
log.Debug("filter invalid insert message, no message",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Any("partitionID", msg.PartitionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -75,9 +75,18 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
|
||||
// 1. hash insertMessages to insertData
|
||||
for _, task := range iMsg.insertMessages {
|
||||
// check if partition exists, if not, create partition
|
||||
if hasPartition := iNode.replica.hasPartition(task.PartitionID); !hasPartition {
|
||||
err := iNode.replica.addPartition(task.CollectionID, task.PartitionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// check if segment exists, if not, create this segment
|
||||
if !iNode.replica.hasSegment(task.SegmentID) {
|
||||
err := iNode.replica.addSegment(task.SegmentID, task.PartitionID, task.CollectionID, segmentTypeGrowing)
|
||||
err := iNode.replica.addSegment(task.SegmentID, task.PartitionID, task.CollectionID, task.ChannelID, segmentTypeGrowing, true)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
continue
|
||||
|
|
|
@ -56,7 +56,7 @@ func newQueryNodeFlowGraph(ctx context.Context,
|
|||
var dmStreamNode node = q.newDmInputNode(ctx1, factory)
|
||||
var filterDmNode node = newFilteredDmNode(streamingReplica, flowGraphType, collectionID, partitionID)
|
||||
var insertNode node = newInsertNode(streamingReplica)
|
||||
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, collectionID, channel, factory)
|
||||
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, flowGraphType, collectionID, partitionID, channel, factory)
|
||||
|
||||
q.flowGraph.AddNode(dmStreamNode)
|
||||
q.flowGraph.AddNode(filterDmNode)
|
||||
|
|
|
@ -24,10 +24,12 @@ import (
|
|||
|
||||
type serviceTimeNode struct {
|
||||
baseNode
|
||||
collectionID UniqueID
|
||||
vChannel VChannel
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
//timeTickMsgStream msgstream.MsgStream
|
||||
graphType flowGraphType
|
||||
collectionID UniqueID
|
||||
partitionID UniqueID
|
||||
vChannel VChannel
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
timeTickMsgStream msgstream.MsgStream
|
||||
}
|
||||
|
||||
func (stNode *serviceTimeNode) Name() string {
|
||||
|
@ -57,11 +59,19 @@ func (stNode *serviceTimeNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
}
|
||||
|
||||
// update service time
|
||||
channel := stNode.vChannel + strconv.FormatInt(stNode.collectionID, 10)
|
||||
stNode.tSafeReplica.setTSafe(channel, serviceTimeMsg.timeRange.timestampMax)
|
||||
var id UniqueID
|
||||
if stNode.graphType == flowGraphTypePartition {
|
||||
id = stNode.partitionID
|
||||
} else {
|
||||
id = stNode.collectionID
|
||||
}
|
||||
channelTmp := stNode.vChannel + strconv.FormatInt(stNode.collectionID, 10)
|
||||
stNode.tSafeReplica.setTSafe(channelTmp, id, serviceTimeMsg.timeRange.timestampMax)
|
||||
//log.Debug("update tSafe:",
|
||||
// zap.Int64("tSafe", int64(serviceTimeMsg.timeRange.timestampMax)),
|
||||
// zap.Any("collectionID", stNode.collectionID),
|
||||
// zap.Any("id", id),
|
||||
// zap.Any("channel", channelTmp),
|
||||
//)
|
||||
|
||||
//if err := stNode.sendTimeTick(serviceTimeMsg.timeRange.timestampMax); err != nil {
|
||||
|
@ -98,7 +108,9 @@ func (stNode *serviceTimeNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
|
||||
func newServiceTimeNode(ctx context.Context,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
graphType flowGraphType,
|
||||
collectionID UniqueID,
|
||||
partitionID UniqueID,
|
||||
channel VChannel,
|
||||
factory msgstream.Factory) *serviceTimeNode {
|
||||
|
||||
|
@ -109,19 +121,23 @@ func newServiceTimeNode(ctx context.Context,
|
|||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
//timeTimeMsgStream, err := factory.NewMsgStream(ctx)
|
||||
//if err != nil {
|
||||
// log.Error(err.Error())
|
||||
//} else {
|
||||
// timeTimeMsgStream.AsProducer([]string{Params.QueryTimeTickChannelName})
|
||||
// log.Debug("query node AsProducer: " + Params.QueryTimeTickChannelName)
|
||||
//}
|
||||
timeTimeMsgStream, err := factory.NewMsgStream(ctx)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
} else {
|
||||
// TODO: use param table
|
||||
timeTickChannel := "query-node-time-tick-0"
|
||||
timeTimeMsgStream.AsProducer([]string{timeTickChannel})
|
||||
log.Debug("query node AsProducer: " + timeTickChannel)
|
||||
}
|
||||
|
||||
return &serviceTimeNode{
|
||||
baseNode: baseNode,
|
||||
collectionID: collectionID,
|
||||
vChannel: channel,
|
||||
tSafeReplica: tSafeReplica,
|
||||
//timeTickMsgStream: timeTimeMsgStream,
|
||||
baseNode: baseNode,
|
||||
graphType: graphType,
|
||||
collectionID: collectionID,
|
||||
partitionID: partitionID,
|
||||
vChannel: channel,
|
||||
tSafeReplica: tSafeReplica,
|
||||
timeTickMsgStream: timeTimeMsgStream,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,8 @@ package querynode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
|
@ -20,7 +22,7 @@ import (
|
|||
|
||||
type historical struct {
|
||||
replica ReplicaInterface
|
||||
loadService *loadService
|
||||
loader *segmentLoader
|
||||
statsService *statsService
|
||||
}
|
||||
|
||||
|
@ -30,25 +32,86 @@ func newHistorical(ctx context.Context,
|
|||
indexService types.IndexService,
|
||||
factory msgstream.Factory) *historical {
|
||||
replica := newCollectionReplica()
|
||||
ls := newLoadService(ctx, masterService, dataService, indexService, replica)
|
||||
ss := newStatsService(ctx, replica, ls.segLoader.indexLoader.fieldStatsChan, factory)
|
||||
loader := newSegmentLoader(ctx, masterService, indexService, dataService, replica)
|
||||
ss := newStatsService(ctx, replica, loader.indexLoader.fieldStatsChan, factory)
|
||||
|
||||
return &historical{
|
||||
replica: replica,
|
||||
loadService: ls,
|
||||
loader: loader,
|
||||
statsService: ss,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *historical) start() {
|
||||
h.loadService.start()
|
||||
h.statsService.start()
|
||||
}
|
||||
|
||||
func (h *historical) close() {
|
||||
h.loadService.close()
|
||||
h.statsService.close()
|
||||
|
||||
// free collectionReplica
|
||||
h.replica.freeAll()
|
||||
}
|
||||
|
||||
func (h *historical) search(searchReqs []*searchRequest,
|
||||
collID UniqueID,
|
||||
partIDs []UniqueID,
|
||||
plan *Plan,
|
||||
searchTs Timestamp) ([]*SearchResult, []*Segment, error) {
|
||||
|
||||
searchResults := make([]*SearchResult, 0)
|
||||
segmentResults := make([]*Segment, 0)
|
||||
|
||||
// get historical partition ids
|
||||
var searchPartIDs []UniqueID
|
||||
if len(partIDs) == 0 {
|
||||
hisPartIDs, err := h.replica.getPartitionIDs(collID)
|
||||
if len(hisPartIDs) == 0 {
|
||||
// no partitions in collection, do empty search
|
||||
return nil, nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
searchPartIDs = hisPartIDs
|
||||
} else {
|
||||
for _, id := range partIDs {
|
||||
_, err := h.replica.getPartitionByID(id)
|
||||
if err == nil {
|
||||
searchPartIDs = append(searchPartIDs, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// all partitions have been released
|
||||
if len(searchPartIDs) == 0 {
|
||||
return nil, nil, errors.New("partitions have been released , collectionID = " +
|
||||
fmt.Sprintln(collID) +
|
||||
"target partitionIDs = " +
|
||||
fmt.Sprintln(partIDs))
|
||||
}
|
||||
|
||||
for _, partID := range searchPartIDs {
|
||||
segIDs, err := h.replica.getSegmentIDs(partID)
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
for _, segID := range segIDs {
|
||||
seg, err := h.replica.getSegmentByID(segID)
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
if !seg.getOnService() {
|
||||
continue
|
||||
}
|
||||
searchResult, err := seg.segmentSearch(plan, searchReqs, []Timestamp{searchTs})
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
searchResults = append(searchResults, searchResult)
|
||||
segmentResults = append(segmentResults, seg)
|
||||
}
|
||||
}
|
||||
|
||||
return searchResults, segmentResults, nil
|
||||
}
|
||||
|
|
|
@ -14,6 +14,10 @@ package querynode
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -70,30 +74,54 @@ func (node *QueryNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.Stri
|
|||
}
|
||||
|
||||
func (node *QueryNode) AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelRequest) (*commonpb.Status, error) {
|
||||
//if node.searchService == nil || node.searchService.searchMsgStream == nil {
|
||||
// errMsg := "null search service or null search message stream"
|
||||
collectionID := in.CollectionID
|
||||
if node.searchService == nil {
|
||||
errMsg := "null search service, collectionID = " + fmt.Sprintln(collectionID)
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: errMsg,
|
||||
}
|
||||
return status, errors.New(errMsg)
|
||||
}
|
||||
|
||||
//if _, ok := node.searchService.searchCollections[in.CollectionID]; !ok {
|
||||
// errMsg := "null search collection, collectionID = " + fmt.Sprintln(collectionID)
|
||||
// status := &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
// Reason: errMsg,
|
||||
// }
|
||||
//
|
||||
// return status, errors.New(errMsg)
|
||||
//}
|
||||
//
|
||||
//// add request channel
|
||||
//consumeChannels := []string{in.RequestChannelID}
|
||||
//consumeSubName := Params.MsgChannelSubName
|
||||
//node.searchService.searchMsgStream.AsConsumer(consumeChannels, consumeSubName)
|
||||
//node.retrieveService.retrieveMsgStream.AsConsumer(consumeChannels, "RetrieveSubName")
|
||||
//log.Debug("querynode AsConsumer: " + strings.Join(consumeChannels, ", ") + " : " + consumeSubName)
|
||||
//
|
||||
//// add result channel
|
||||
//producerChannels := []string{in.ResultChannelID}
|
||||
//node.searchService.searchResultMsgStream.AsProducer(producerChannels)
|
||||
//node.retrieveService.retrieveResultMsgStream.AsProducer(producerChannels)
|
||||
//log.Debug("querynode AsProducer: " + strings.Join(producerChannels, ", "))
|
||||
|
||||
// Do nothing
|
||||
// add search collection
|
||||
if !node.searchService.hasSearchCollection(collectionID) {
|
||||
node.searchService.addSearchCollection(collectionID)
|
||||
log.Debug("add search collection", zap.Any("collectionID", collectionID))
|
||||
}
|
||||
|
||||
// add request channel
|
||||
sc := node.searchService.searchCollections[in.CollectionID]
|
||||
consumeChannels := []string{in.RequestChannelID}
|
||||
//consumeSubName := Params.MsgChannelSubName
|
||||
consumeSubName := Params.MsgChannelSubName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
|
||||
sc.searchMsgStream.AsConsumer(consumeChannels, consumeSubName)
|
||||
node.retrieveService.retrieveMsgStream.AsConsumer(consumeChannels, "RetrieveSubName")
|
||||
log.Debug("querynode AsConsumer: " + strings.Join(consumeChannels, ", ") + " : " + consumeSubName)
|
||||
|
||||
// add result channel
|
||||
producerChannels := []string{in.ResultChannelID}
|
||||
sc.searchResultMsgStream.AsProducer(producerChannels)
|
||||
node.retrieveService.retrieveResultMsgStream.AsProducer(producerChannels)
|
||||
log.Debug("querynode AsProducer: " + strings.Join(producerChannels, ", "))
|
||||
|
||||
// message stream need to asConsumer before start
|
||||
// add search collection
|
||||
if !node.searchService.hasSearchCollection(collectionID) {
|
||||
node.searchService.addSearchCollection(collectionID)
|
||||
log.Debug("add search collection", zap.Any("collectionID", collectionID))
|
||||
}
|
||||
sc.start()
|
||||
log.Debug("start search collection", zap.Any("collectionID", collectionID))
|
||||
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
|
@ -206,7 +234,11 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
|
|||
log.Error(err.Error())
|
||||
return status, err
|
||||
}
|
||||
log.Debug("loadSegmentsTask Enqueue done", zap.Any("collectionID", in.CollectionID))
|
||||
segmentIDs := make([]UniqueID, 0)
|
||||
for _, info := range in.Infos {
|
||||
segmentIDs = append(segmentIDs, info.SegmentID)
|
||||
}
|
||||
log.Debug("loadSegmentsTask Enqueue done", zap.Int64s("segmentIDs", segmentIDs))
|
||||
|
||||
func() {
|
||||
err = dct.WaitToFinish()
|
||||
|
@ -214,7 +246,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
|
|||
log.Error(err.Error())
|
||||
return
|
||||
}
|
||||
log.Debug("loadSegmentsTask WaitToFinish done", zap.Any("collectionID", in.CollectionID))
|
||||
log.Debug("loadSegmentsTask WaitToFinish done", zap.Int64s("segmentIDs", segmentIDs))
|
||||
}()
|
||||
|
||||
status := &commonpb.Status{
|
||||
|
@ -301,11 +333,17 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, in *queryPb.ReleaseS
|
|||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
}
|
||||
for _, id := range in.SegmentIDs {
|
||||
err2 := node.historical.loadService.segLoader.replica.removeSegment(id)
|
||||
if err2 != nil {
|
||||
err := node.historical.replica.removeSegment(id)
|
||||
if err != nil {
|
||||
// not return, try to release all segments
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err2.Error()
|
||||
status.Reason = err.Error()
|
||||
}
|
||||
err = node.streaming.replica.removeSegment(id)
|
||||
if err != nil {
|
||||
// not return, try to release all segments
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err.Error()
|
||||
}
|
||||
}
|
||||
return status, nil
|
||||
|
|
|
@ -297,7 +297,6 @@ func (loader *indexLoader) setIndexInfo(collectionID UniqueID, segment *Segment,
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("QueryNode IndexLoader setIndexInfo", zap.Any("Req", req), zap.Any("response", response))
|
||||
if response.Status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
return errors.New(response.Status.Reason)
|
||||
}
|
||||
|
@ -377,7 +376,7 @@ func newIndexLoader(ctx context.Context, masterService types.MasterService, inde
|
|||
replica: replica,
|
||||
|
||||
fieldIndexes: make(map[string][]*internalpb.IndexStats),
|
||||
fieldStatsChan: make(chan []*internalpb.FieldStats, 1),
|
||||
fieldStatsChan: make(chan []*internalpb.FieldStats, 1024),
|
||||
|
||||
masterService: masterService,
|
||||
indexService: indexService,
|
||||
|
|
|
@ -1,216 +0,0 @@
|
|||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
)
|
||||
|
||||
const loadingCheckInterval = 3
|
||||
|
||||
type loadService struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
segLoader *segmentLoader
|
||||
}
|
||||
|
||||
// -------------------------------------------- load index -------------------------------------------- //
|
||||
func (s *loadService) start() {
|
||||
wg := &sync.WaitGroup{}
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-time.After(loadingCheckInterval * time.Second):
|
||||
wg.Add(2)
|
||||
//go s.segLoader.indexLoader.doLoadIndex(wg)
|
||||
go s.loadSegmentActively(wg)
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *loadService) close() {
|
||||
s.cancel()
|
||||
}
|
||||
|
||||
func (s *loadService) loadSegmentActively(wg *sync.WaitGroup) {
|
||||
collectionIDs, partitionIDs, segmentIDs := s.segLoader.replica.getSegmentsToLoadBySegmentType(segmentTypeGrowing)
|
||||
if len(collectionIDs) <= 0 {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
log.Debug("do load segment for growing segments:", zap.String("segmentIDs", fmt.Sprintln(segmentIDs)))
|
||||
for i := range collectionIDs {
|
||||
collection, err := s.segLoader.replica.getCollectionByID(collectionIDs[i])
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
}
|
||||
|
||||
fieldIDs, err := s.segLoader.replica.getFieldIDsByCollectionID(collectionIDs[i])
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
segment := newSegment(collection, segmentIDs[i], partitionIDs[i], collectionIDs[i], segmentTypeSealed)
|
||||
segment.setLoadBinLogEnable(true)
|
||||
err = s.loadSegmentInternal(collectionIDs[i], segment, fieldIDs)
|
||||
if err == nil {
|
||||
// replace segment
|
||||
err = s.segLoader.replica.replaceGrowingSegmentBySealedSegment(segment)
|
||||
}
|
||||
if err != nil {
|
||||
deleteSegment(segment)
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
// sendQueryNodeStats
|
||||
err := s.segLoader.indexLoader.sendQueryNodeStats()
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// load segment passively
|
||||
func (s *loadService) loadSegmentPassively(collectionID UniqueID, partitionID UniqueID, segmentIDs []UniqueID, fieldIDs []int64) error {
|
||||
// TODO: interim solution
|
||||
if len(fieldIDs) == 0 {
|
||||
var err error
|
||||
fieldIDs, err = s.segLoader.replica.getFieldIDsByCollectionID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, segmentID := range segmentIDs {
|
||||
collection, err := s.segLoader.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.segLoader.replica.getPartitionByID(partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segment := newSegment(collection, segmentID, partitionID, collectionID, segmentTypeSealed)
|
||||
segment.setLoadBinLogEnable(true)
|
||||
err = s.loadSegmentInternal(collectionID, segment, fieldIDs)
|
||||
if err == nil {
|
||||
err = s.segLoader.replica.setSegment(segment)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
err = s.addSegmentToLoadBuffer(segment)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *loadService) addSegmentToLoadBuffer(segment *Segment) error {
|
||||
segmentID := segment.segmentID
|
||||
partitionID := segment.partitionID
|
||||
collectionID := segment.collectionID
|
||||
deleteSegment(segment)
|
||||
err := s.segLoader.replica.addSegment(segmentID, partitionID, collectionID, segmentTypeGrowing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.segLoader.replica.setSegmentEnableLoadBinLog(segmentID, true)
|
||||
if err != nil {
|
||||
s.segLoader.replica.removeSegment(segmentID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *loadService) loadSegmentInternal(collectionID UniqueID, segment *Segment, fieldIDs []int64) error {
|
||||
// create segment
|
||||
statesResp, err := s.segLoader.GetSegmentStates(segment.segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if statesResp.States[0].State != commonpb.SegmentState_Flushed {
|
||||
return errors.New("segment not flush done")
|
||||
}
|
||||
|
||||
insertBinlogPaths, srcFieldIDs, err := s.segLoader.getInsertBinlogPaths(segment.segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vectorFieldIDs, err := s.segLoader.replica.getVecFieldIDsByCollectionID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loadIndexFieldIDs := make([]int64, 0)
|
||||
for _, vecFieldID := range vectorFieldIDs {
|
||||
err = s.segLoader.indexLoader.setIndexInfo(collectionID, segment, vecFieldID)
|
||||
if err != nil {
|
||||
log.Warn("QueryNode load_service", zap.Any("SegmentID", segment.segmentID), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
loadIndexFieldIDs = append(loadIndexFieldIDs, vecFieldID)
|
||||
}
|
||||
// we don't need load to vector fields
|
||||
fieldIDs = s.segLoader.filterOutVectorFields(fieldIDs, loadIndexFieldIDs)
|
||||
|
||||
//log.Debug("srcFieldIDs in internal:", srcFieldIDs)
|
||||
//log.Debug("dstFieldIDs in internal:", fieldIDs)
|
||||
targetFields, err := s.segLoader.checkTargetFields(insertBinlogPaths, srcFieldIDs, fieldIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("loading insert...")
|
||||
err = s.segLoader.loadSegmentFieldsData(segment, targetFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, id := range loadIndexFieldIDs {
|
||||
log.Debug("loading index...")
|
||||
err = s.segLoader.indexLoader.loadIndex(segment, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newLoadService(ctx context.Context, masterService types.MasterService, dataService types.DataService, indexService types.IndexService, replica ReplicaInterface) *loadService {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
|
||||
segLoader := newSegmentLoader(ctx1, masterService, indexService, dataService, replica)
|
||||
|
||||
return &loadService{
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
|
||||
segLoader: segLoader,
|
||||
}
|
||||
}
|
|
@ -19,11 +19,6 @@ import (
|
|||
"math/rand"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/indexnode"
|
||||
minioKV "github.com/milvus-io/milvus/internal/kv/minio"
|
||||
|
@ -31,7 +26,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
)
|
||||
|
@ -1025,96 +1019,97 @@ func doInsert(ctx context.Context, collectionID UniqueID, partitionID UniqueID,
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestSegmentLoad_Search_Vector(t *testing.T) {
|
||||
collectionID := UniqueID(0)
|
||||
partitionID := UniqueID(1)
|
||||
segmentID := UniqueID(2)
|
||||
fieldIDs := []int64{0, 101}
|
||||
|
||||
// mock write insert bin log
|
||||
keyPrefix := path.Join("query-node-seg-manager-test-minio-prefix", strconv.FormatInt(collectionID, 10), strconv.FormatInt(partitionID, 10))
|
||||
|
||||
node := newQueryNodeMock()
|
||||
defer node.Stop()
|
||||
|
||||
ctx := node.queryNodeLoopCtx
|
||||
node.historical.loadService = newLoadService(ctx, nil, nil, nil, node.historical.replica)
|
||||
|
||||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
err := node.historical.replica.addPartition(collectionID, partitionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.historical.replica.addSegment(segmentID, partitionID, collectionID, segmentTypeSealed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
paths, srcFieldIDs, err := generateInsertBinLog(collectionID, partitionID, segmentID, keyPrefix)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fieldsMap, _ := node.historical.loadService.segLoader.checkTargetFields(paths, srcFieldIDs, fieldIDs)
|
||||
assert.Equal(t, len(fieldsMap), 4)
|
||||
|
||||
segment, err := node.historical.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.historical.loadService.segLoader.loadSegmentFieldsData(segment, fieldsMap)
|
||||
assert.NoError(t, err)
|
||||
|
||||
indexPaths, err := generateIndex(segmentID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
indexInfo := &indexInfo{
|
||||
indexPaths: indexPaths,
|
||||
readyLoad: true,
|
||||
}
|
||||
err = segment.setIndexInfo(100, indexInfo)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.historical.loadService.segLoader.indexLoader.loadIndex(segment, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// do search
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
|
||||
const DIM = 16
|
||||
var searchRawData []byte
|
||||
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
for _, ele := range vec {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
searchRawData = append(searchRawData, buf...)
|
||||
}
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: milvuspb.PlaceholderType_FloatVector,
|
||||
Values: [][]byte{searchRawData},
|
||||
}
|
||||
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
|
||||
placeHolderGroupBlob, err := proto.Marshal(&placeholderGroup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
searchTimestamp := Timestamp(1020)
|
||||
collection, err := node.historical.replica.getCollectionByID(collectionID)
|
||||
assert.NoError(t, err)
|
||||
plan, err := createPlan(*collection, dslString)
|
||||
assert.NoError(t, err)
|
||||
holder, err := parseSearchRequest(plan, placeHolderGroupBlob)
|
||||
assert.NoError(t, err)
|
||||
placeholderGroups := make([]*searchRequest, 0)
|
||||
placeholderGroups = append(placeholderGroups, holder)
|
||||
|
||||
// wait for segment building index
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
_, err = segment.segmentSearch(plan, placeholderGroups, []Timestamp{searchTimestamp})
|
||||
assert.Nil(t, err)
|
||||
|
||||
plan.delete()
|
||||
holder.delete()
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
//
|
||||
//func TestSegmentLoad_Search_Vector(t *testing.T) {
|
||||
// collectionID := UniqueID(0)
|
||||
// partitionID := UniqueID(1)
|
||||
// segmentID := UniqueID(2)
|
||||
// fieldIDs := []int64{0, 101}
|
||||
//
|
||||
// // mock write insert bin log
|
||||
// keyPrefix := path.Join("query-node-seg-manager-test-minio-prefix", strconv.FormatInt(collectionID, 10), strconv.FormatInt(partitionID, 10))
|
||||
//
|
||||
// node := newQueryNodeMock()
|
||||
// defer node.Stop()
|
||||
//
|
||||
// ctx := node.queryNodeLoopCtx
|
||||
// node.historical.loadService = newLoadService(ctx, nil, nil, nil, node.historical.replica)
|
||||
//
|
||||
// initTestMeta(t, node, collectionID, 0)
|
||||
//
|
||||
// err := node.historical.replica.addPartition(collectionID, partitionID)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// err = node.historical.replica.addSegment(segmentID, partitionID, collectionID, segmentTypeSealed)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// paths, srcFieldIDs, err := generateInsertBinLog(collectionID, partitionID, segmentID, keyPrefix)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// fieldsMap, _ := node.historical.loadService.segLoader.checkTargetFields(paths, srcFieldIDs, fieldIDs)
|
||||
// assert.Equal(t, len(fieldsMap), 4)
|
||||
//
|
||||
// segment, err := node.historical.replica.getSegmentByID(segmentID)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// err = node.historical.loadService.segLoader.loadSegmentFieldsData(segment, fieldsMap)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// indexPaths, err := generateIndex(segmentID)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// indexInfo := &indexInfo{
|
||||
// indexPaths: indexPaths,
|
||||
// readyLoad: true,
|
||||
// }
|
||||
// err = segment.setIndexInfo(100, indexInfo)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// err = node.historical.loadService.segLoader.indexLoader.loadIndex(segment, 100)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// // do search
|
||||
// dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
//
|
||||
// const DIM = 16
|
||||
// var searchRawData []byte
|
||||
// var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
// for _, ele := range vec {
|
||||
// buf := make([]byte, 4)
|
||||
// binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
// searchRawData = append(searchRawData, buf...)
|
||||
// }
|
||||
// placeholderValue := milvuspb.PlaceholderValue{
|
||||
// Tag: "$0",
|
||||
// Type: milvuspb.PlaceholderType_FloatVector,
|
||||
// Values: [][]byte{searchRawData},
|
||||
// }
|
||||
//
|
||||
// placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
// Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
// }
|
||||
//
|
||||
// placeHolderGroupBlob, err := proto.Marshal(&placeholderGroup)
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// searchTimestamp := Timestamp(1020)
|
||||
// collection, err := node.historical.replica.getCollectionByID(collectionID)
|
||||
// assert.NoError(t, err)
|
||||
// plan, err := createPlan(*collection, dslString)
|
||||
// assert.NoError(t, err)
|
||||
// holder, err := parseSearchRequest(plan, placeHolderGroupBlob)
|
||||
// assert.NoError(t, err)
|
||||
// placeholderGroups := make([]*searchRequest, 0)
|
||||
// placeholderGroups = append(placeholderGroups, holder)
|
||||
//
|
||||
// // wait for segment building index
|
||||
// time.Sleep(1 * time.Second)
|
||||
//
|
||||
// _, err = segment.segmentSearch(plan, placeholderGroups, []Timestamp{searchTimestamp})
|
||||
// assert.Nil(t, err)
|
||||
//
|
||||
// plan.delete()
|
||||
// holder.delete()
|
||||
//
|
||||
// <-ctx.Done()
|
||||
//}
|
||||
|
|
|
@ -122,7 +122,7 @@ func (p *ParamTable) Init() {
|
|||
p.initSearchResultReceiveBufSize()
|
||||
|
||||
p.initStatsPublishInterval()
|
||||
//p.initStatsChannelName()
|
||||
p.initStatsChannelName()
|
||||
|
||||
p.initLogCfg()
|
||||
})
|
||||
|
|
|
@ -32,7 +32,6 @@ type Partition struct {
|
|||
collectionID UniqueID
|
||||
partitionID UniqueID
|
||||
segmentIDs []UniqueID
|
||||
enable bool
|
||||
}
|
||||
|
||||
func (p *Partition) ID() UniqueID {
|
||||
|
@ -57,7 +56,6 @@ func newPartition(collectionID UniqueID, partitionID UniqueID) *Partition {
|
|||
var newPartition = &Partition{
|
||||
collectionID: collectionID,
|
||||
partitionID: partitionID,
|
||||
enable: false,
|
||||
}
|
||||
|
||||
log.Debug("create partition", zap.Int64("partitionID", partitionID))
|
||||
|
|
|
@ -31,7 +31,7 @@ type Plan struct {
|
|||
cPlan C.CPlan
|
||||
}
|
||||
|
||||
func createPlan(col Collection, dsl string) (*Plan, error) {
|
||||
func createPlan(col *Collection, dsl string) (*Plan, error) {
|
||||
cDsl := C.CString(dsl)
|
||||
defer C.free(unsafe.Pointer(cDsl))
|
||||
var cPlan C.CPlan
|
||||
|
@ -46,7 +46,7 @@ func createPlan(col Collection, dsl string) (*Plan, error) {
|
|||
return newPlan, nil
|
||||
}
|
||||
|
||||
func createPlanByExpr(col Collection, expr []byte) (*Plan, error) {
|
||||
func createPlanByExpr(col *Collection, expr []byte) (*Plan, error) {
|
||||
var cPlan C.CPlan
|
||||
status := C.CreatePlanByExpr(col.collectionPtr, (*C.char)(unsafe.Pointer(&expr[0])), (C.int64_t)(len(expr)), &cPlan)
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestPlan_Plan(t *testing.T) {
|
|||
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
|
||||
plan, err := createPlan(*collection, dslString)
|
||||
plan, err := createPlan(collection, dslString)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, plan, nil)
|
||||
topk := plan.getTopK()
|
||||
|
@ -49,7 +49,7 @@ func TestPlan_PlaceholderGroup(t *testing.T) {
|
|||
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
|
||||
plan, err := createPlan(*collection, dslString)
|
||||
plan, err := createPlan(collection, dslString)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, plan)
|
||||
|
||||
|
|
|
@ -192,15 +192,13 @@ func (node *QueryNode) Start() error {
|
|||
// init services and manager
|
||||
// TODO: pass node.streaming.replica to search service
|
||||
node.searchService = newSearchService(node.queryNodeLoopCtx,
|
||||
node.historical.replica,
|
||||
node.streaming.replica,
|
||||
node.streaming.tSafeReplica,
|
||||
node.historical,
|
||||
node.streaming,
|
||||
node.msFactory)
|
||||
|
||||
node.retrieveService = newRetrieveService(node.queryNodeLoopCtx,
|
||||
node.historical.replica,
|
||||
node.streaming.replica,
|
||||
node.streaming.tSafeReplica,
|
||||
node.historical,
|
||||
node.streaming,
|
||||
node.msFactory,
|
||||
)
|
||||
|
||||
|
@ -208,7 +206,6 @@ func (node *QueryNode) Start() error {
|
|||
go node.scheduler.Start()
|
||||
|
||||
// start services
|
||||
go node.searchService.start()
|
||||
go node.retrieveService.start()
|
||||
go node.historical.start()
|
||||
node.UpdateStateCode(internalpb.StateCode_Healthy)
|
||||
|
|
|
@ -132,20 +132,10 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
|
|||
err = node.historical.replica.addPartition(collection.ID(), collectionMeta.PartitionIDs[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.historical.replica.addSegment(segmentID, collectionMeta.PartitionIDs[0], collectionID, segmentTypeGrowing)
|
||||
err = node.historical.replica.addSegment(segmentID, collectionMeta.PartitionIDs[0], collectionID, "", segmentTypeGrowing, true)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func initDmChannel(ctx context.Context, insertChannels []string, node *QueryNode) {
|
||||
watchReq := &querypb.WatchDmChannelsRequest{
|
||||
ChannelIDs: insertChannels,
|
||||
}
|
||||
_, err := node.WatchDmChannels(ctx, watchReq)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func initSearchChannel(ctx context.Context, searchChan string, resultChan string, node *QueryNode) {
|
||||
searchReq := &querypb.AddQueryChannelRequest{
|
||||
RequestChannelID: searchChan,
|
||||
|
|
|
@ -29,7 +29,7 @@ func TestReduce_AllFunc(t *testing.T) {
|
|||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
|
||||
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
|
||||
const DIM = 16
|
||||
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
|
@ -63,7 +63,7 @@ func TestReduce_AllFunc(t *testing.T) {
|
|||
log.Print("marshal placeholderGroup failed")
|
||||
}
|
||||
|
||||
plan, err := createPlan(*collection, dslString)
|
||||
plan, err := createPlan(collection, dslString)
|
||||
assert.NoError(t, err)
|
||||
holder, err := parseSearchRequest(plan, placeGroupByte)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -36,10 +36,9 @@ type retrieveCollection struct {
|
|||
releaseCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
collectionID UniqueID
|
||||
historicalReplica ReplicaInterface
|
||||
streamingReplica ReplicaInterface
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
collectionID UniqueID
|
||||
historical *historical
|
||||
streaming *streaming
|
||||
|
||||
msgBuffer chan *msgstream.RetrieveMsg
|
||||
unsolvedMsgMu sync.Mutex
|
||||
|
@ -57,9 +56,8 @@ type retrieveCollection struct {
|
|||
func newRetrieveCollection(releaseCtx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
collectionID UniqueID,
|
||||
historicalReplica ReplicaInterface,
|
||||
streamingReplica ReplicaInterface,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
historical *historical,
|
||||
streaming *streaming,
|
||||
retrieveResultStream msgstream.MsgStream) *retrieveCollection {
|
||||
receiveBufSize := Params.RetrieveReceiveBufSize
|
||||
msgBuffer := make(chan *msgstream.RetrieveMsg, receiveBufSize)
|
||||
|
@ -69,10 +67,9 @@ func newRetrieveCollection(releaseCtx context.Context,
|
|||
releaseCtx: releaseCtx,
|
||||
cancel: cancel,
|
||||
|
||||
collectionID: collectionID,
|
||||
historicalReplica: historicalReplica,
|
||||
streamingReplica: streamingReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
collectionID: collectionID,
|
||||
historical: historical,
|
||||
streaming: streaming,
|
||||
|
||||
tSafeWatchers: make(map[VChannel]*tSafeWatcher),
|
||||
|
||||
|
@ -118,7 +115,7 @@ func (rc *retrieveCollection) waitNewTSafe() Timestamp {
|
|||
}
|
||||
t := Timestamp(math.MaxInt64)
|
||||
for channel := range rc.tSafeWatchers {
|
||||
ts := rc.tSafeReplica.getTSafe(channel)
|
||||
ts := rc.streaming.tSafeReplica.getTSafe(channel)
|
||||
if ts <= t {
|
||||
t = ts
|
||||
}
|
||||
|
@ -133,7 +130,7 @@ func (rc *retrieveCollection) start() {
|
|||
|
||||
func (rc *retrieveCollection) register() {
|
||||
// register tSafe watcher and init watcher select case
|
||||
collection, err := rc.streamingReplica.getCollectionByID(rc.collectionID)
|
||||
collection, err := rc.streaming.replica.getCollectionByID(rc.collectionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return
|
||||
|
@ -141,9 +138,9 @@ func (rc *retrieveCollection) register() {
|
|||
|
||||
rc.watcherSelectCase = make([]reflect.SelectCase, 0)
|
||||
for _, channel := range collection.getWatchedDmChannels() {
|
||||
rc.tSafeReplica.addTSafe(channel)
|
||||
rc.streaming.tSafeReplica.addTSafe(channel)
|
||||
rc.tSafeWatchers[channel] = newTSafeWatcher()
|
||||
rc.tSafeReplica.registerTSafeWatcher(channel, rc.tSafeWatchers[channel])
|
||||
rc.streaming.tSafeReplica.registerTSafeWatcher(channel, rc.tSafeWatchers[channel])
|
||||
rc.watcherSelectCase = append(rc.watcherSelectCase, reflect.SelectCase{
|
||||
Dir: reflect.SelectRecv,
|
||||
Chan: reflect.ValueOf(rc.tSafeWatchers[channel].watcherChan()),
|
||||
|
@ -358,7 +355,7 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
|
|||
timestamp := retrieveMsg.Base.Timestamp
|
||||
|
||||
collectionID := retrieveMsg.CollectionID
|
||||
collection, err := rc.historicalReplica.getCollectionByID(collectionID)
|
||||
collection, err := rc.historical.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -378,8 +375,8 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
|
|||
var partitionIDsInStreaming []UniqueID
|
||||
partitionIDsInQuery := retrieveMsg.PartitionIDs
|
||||
if len(partitionIDsInQuery) == 0 {
|
||||
partitionIDsInHistoricalCol, err1 := rc.historicalReplica.getPartitionIDs(collectionID)
|
||||
partitionIDsInStreamingCol, err2 := rc.streamingReplica.getPartitionIDs(collectionID)
|
||||
partitionIDsInHistoricalCol, err1 := rc.historical.replica.getPartitionIDs(collectionID)
|
||||
partitionIDsInStreamingCol, err2 := rc.streaming.replica.getPartitionIDs(collectionID)
|
||||
if err1 != nil && err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
@ -390,11 +387,11 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
|
|||
partitionIDsInStreaming = partitionIDsInStreamingCol
|
||||
} else {
|
||||
for _, id := range partitionIDsInQuery {
|
||||
_, err1 := rc.historicalReplica.getPartitionByID(id)
|
||||
_, err1 := rc.historical.replica.getPartitionByID(id)
|
||||
if err1 == nil {
|
||||
partitionIDsInHistorical = append(partitionIDsInHistorical, id)
|
||||
}
|
||||
_, err2 := rc.streamingReplica.getPartitionByID(id)
|
||||
_, err2 := rc.streaming.replica.getPartitionByID(id)
|
||||
if err2 == nil {
|
||||
partitionIDsInStreaming = append(partitionIDsInStreaming, id)
|
||||
}
|
||||
|
@ -406,12 +403,12 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
|
|||
|
||||
var mergeList []*planpb.RetrieveResults
|
||||
for _, partitionID := range partitionIDsInHistorical {
|
||||
segmentIDs, err := rc.historicalReplica.getSegmentIDs(partitionID)
|
||||
segmentIDs, err := rc.historical.replica.getSegmentIDs(partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, segmentID := range segmentIDs {
|
||||
segment, err := rc.historicalReplica.getSegmentByID(segmentID)
|
||||
segment, err := rc.historical.replica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -424,12 +421,12 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
|
|||
}
|
||||
|
||||
for _, partitionID := range partitionIDsInStreaming {
|
||||
segmentIDs, err := rc.streamingReplica.getSegmentIDs(partitionID)
|
||||
segmentIDs, err := rc.streaming.replica.getSegmentIDs(partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, segmentID := range segmentIDs {
|
||||
segment, err := rc.streamingReplica.getSegmentByID(segmentID)
|
||||
segment, err := rc.streaming.replica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -27,9 +27,8 @@ type retrieveService struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
historicalReplica ReplicaInterface
|
||||
streamingReplica ReplicaInterface
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
historical *historical
|
||||
streaming *streaming
|
||||
|
||||
retrieveMsgStream msgstream.MsgStream
|
||||
retrieveResultMsgStream msgstream.MsgStream
|
||||
|
@ -39,9 +38,8 @@ type retrieveService struct {
|
|||
}
|
||||
|
||||
func newRetrieveService(ctx context.Context,
|
||||
historicalReplica ReplicaInterface,
|
||||
streamingReplica ReplicaInterface,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
historical *historical,
|
||||
streaming *streaming,
|
||||
factory msgstream.Factory) *retrieveService {
|
||||
|
||||
retrieveStream, _ := factory.NewQueryMsgStream(ctx)
|
||||
|
@ -62,9 +60,8 @@ func newRetrieveService(ctx context.Context,
|
|||
ctx: retrieveServiceCtx,
|
||||
cancel: retrieveServiceCancel,
|
||||
|
||||
historicalReplica: historicalReplica,
|
||||
streamingReplica: streamingReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
historical: historical,
|
||||
streaming: streaming,
|
||||
|
||||
retrieveMsgStream: retrieveStream,
|
||||
retrieveResultMsgStream: retrieveResultStream,
|
||||
|
@ -81,7 +78,7 @@ func (rs *retrieveService) start() {
|
|||
}
|
||||
|
||||
func (rs *retrieveService) collectionCheck(collectionID UniqueID) error {
|
||||
if ok := rs.historicalReplica.hasCollection(collectionID); !ok {
|
||||
if ok := rs.historical.replica.hasCollection(collectionID); !ok {
|
||||
err := errors.New("no collection found, collectionID = " + strconv.FormatInt(collectionID, 10))
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
|
@ -164,9 +161,8 @@ func (rs *retrieveService) startRetrieveCollection(collectionID UniqueID) {
|
|||
rc := newRetrieveCollection(ctx1,
|
||||
cancel,
|
||||
collectionID,
|
||||
rs.historicalReplica,
|
||||
rs.streamingReplica,
|
||||
rs.tSafeReplica,
|
||||
rs.historical,
|
||||
rs.streaming,
|
||||
rs.retrieveResultMsgStream)
|
||||
rs.retrieveCollections[collectionID] = rc
|
||||
rc.start()
|
||||
|
|
|
@ -14,14 +14,12 @@ package querynode
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
oplog "github.com/opentracing/opentracing-go/log"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
|
@ -29,16 +27,17 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
oplog "github.com/opentracing/opentracing-go/log"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type searchCollection struct {
|
||||
releaseCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
collectionID UniqueID
|
||||
historicalReplica ReplicaInterface
|
||||
streamingReplica ReplicaInterface
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
collectionID UniqueID
|
||||
historical *historical
|
||||
streaming *streaming
|
||||
|
||||
msgBuffer chan *msgstream.SearchMsg
|
||||
unsolvedMsgMu sync.Mutex // guards unsolvedMsg
|
||||
|
@ -50,6 +49,7 @@ type searchCollection struct {
|
|||
serviceableTimeMutex sync.Mutex // guards serviceableTime
|
||||
serviceableTime Timestamp
|
||||
|
||||
searchMsgStream msgstream.MsgStream
|
||||
searchResultMsgStream msgstream.MsgStream
|
||||
}
|
||||
|
||||
|
@ -58,29 +58,31 @@ type ResultEntityIds []UniqueID
|
|||
func newSearchCollection(releaseCtx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
collectionID UniqueID,
|
||||
historicalReplica ReplicaInterface,
|
||||
streamingReplica ReplicaInterface,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
searchResultStream msgstream.MsgStream) *searchCollection {
|
||||
historical *historical,
|
||||
streaming *streaming,
|
||||
factory msgstream.Factory) *searchCollection {
|
||||
|
||||
receiveBufSize := Params.SearchReceiveBufSize
|
||||
msgBuffer := make(chan *msgstream.SearchMsg, receiveBufSize)
|
||||
unsolvedMsg := make([]*msgstream.SearchMsg, 0)
|
||||
|
||||
searchStream, _ := factory.NewQueryMsgStream(releaseCtx)
|
||||
searchResultStream, _ := factory.NewQueryMsgStream(releaseCtx)
|
||||
|
||||
sc := &searchCollection{
|
||||
releaseCtx: releaseCtx,
|
||||
cancel: cancel,
|
||||
|
||||
collectionID: collectionID,
|
||||
historicalReplica: historicalReplica,
|
||||
streamingReplica: streamingReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
collectionID: collectionID,
|
||||
historical: historical,
|
||||
streaming: streaming,
|
||||
|
||||
tSafeWatchers: make(map[VChannel]*tSafeWatcher),
|
||||
|
||||
msgBuffer: msgBuffer,
|
||||
unsolvedMsg: unsolvedMsg,
|
||||
|
||||
searchMsgStream: searchStream,
|
||||
searchResultMsgStream: searchResultStream,
|
||||
}
|
||||
|
||||
|
@ -89,12 +91,23 @@ func newSearchCollection(releaseCtx context.Context,
|
|||
}
|
||||
|
||||
func (s *searchCollection) start() {
|
||||
go s.receiveSearchMsg()
|
||||
go s.searchMsgStream.Start()
|
||||
go s.searchResultMsgStream.Start()
|
||||
go s.consumeSearch()
|
||||
go s.doUnsolvedMsgSearch()
|
||||
}
|
||||
|
||||
func (s *searchCollection) close() {
|
||||
if s.searchMsgStream != nil {
|
||||
s.searchMsgStream.Close()
|
||||
}
|
||||
if s.searchResultMsgStream != nil {
|
||||
s.searchResultMsgStream.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *searchCollection) register() {
|
||||
collection, err := s.streamingReplica.getCollectionByID(s.collectionID)
|
||||
collection, err := s.streaming.replica.getCollectionByID(s.collectionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return
|
||||
|
@ -102,11 +115,12 @@ func (s *searchCollection) register() {
|
|||
|
||||
s.watcherSelectCase = make([]reflect.SelectCase, 0)
|
||||
log.Debug("register tSafe watcher and init watcher select case",
|
||||
zap.Any("collectionID", collection.ID()),
|
||||
zap.Any("dml channels", collection.getWatchedDmChannels()),
|
||||
zap.Any("collectionID", collection.ID()))
|
||||
)
|
||||
for _, channel := range collection.getWatchedDmChannels() {
|
||||
s.tSafeWatchers[channel] = newTSafeWatcher()
|
||||
s.tSafeReplica.registerTSafeWatcher(channel, s.tSafeWatchers[channel])
|
||||
s.streaming.tSafeReplica.registerTSafeWatcher(channel, s.tSafeWatchers[channel])
|
||||
s.watcherSelectCase = append(s.watcherSelectCase, reflect.SelectCase{
|
||||
Dir: reflect.SelectRecv,
|
||||
Chan: reflect.ValueOf(s.tSafeWatchers[channel].watcherChan()),
|
||||
|
@ -135,9 +149,10 @@ func (s *searchCollection) waitNewTSafe() Timestamp {
|
|||
log.Error("tSafe has been closed", zap.Any("collectionID", s.collectionID))
|
||||
return invalidTimestamp
|
||||
}
|
||||
//log.Debug("wait new tSafe", zap.Any("collectionID", s.collectionID))
|
||||
t := Timestamp(math.MaxInt64)
|
||||
for channel := range s.tSafeWatchers {
|
||||
ts := s.tSafeReplica.getTSafe(channel)
|
||||
ts := s.streaming.tSafeReplica.getTSafe(channel)
|
||||
if ts <= t {
|
||||
t = ts
|
||||
}
|
||||
|
@ -175,95 +190,191 @@ func (s *searchCollection) emptySearch(searchMsg *msgstream.SearchMsg) {
|
|||
err := s.search(searchMsg)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
err2 := s.publishFailedSearchResult(searchMsg, err.Error())
|
||||
if err2 != nil {
|
||||
log.Error("publish FailedSearchResult failed", zap.Error(err2))
|
||||
}
|
||||
s.publishFailedSearchResult(searchMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *searchCollection) receiveSearchMsg() {
|
||||
func (s *searchCollection) consumeSearch() {
|
||||
for {
|
||||
select {
|
||||
case <-s.releaseCtx.Done():
|
||||
log.Debug("stop searchCollection's receiveSearchMsg", zap.Int64("collectionID", s.collectionID))
|
||||
return
|
||||
case sm := <-s.msgBuffer:
|
||||
sp, ctx := trace.StartSpanFromContext(sm.TraceCtx())
|
||||
sm.SetTraceCtx(ctx)
|
||||
log.Debug("get search message from msgBuffer",
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
serviceTime := s.getServiceableTime()
|
||||
if sm.BeginTs() > serviceTime {
|
||||
bt, _ := tsoutil.ParseTS(sm.BeginTs())
|
||||
st, _ := tsoutil.ParseTS(serviceTime)
|
||||
log.Debug("querynode::receiveSearchMsg: add to unsolvedMsgs",
|
||||
zap.Any("sm.BeginTs", bt),
|
||||
zap.Any("serviceTime", st),
|
||||
zap.Any("delta seconds", (sm.BeginTs()-serviceTime)/(1000*1000*1000)),
|
||||
zap.Any("collectionID", s.collectionID),
|
||||
zap.Any("msgID", sm.ID()),
|
||||
)
|
||||
s.addToUnsolvedMsg(sm)
|
||||
sp.LogFields(
|
||||
oplog.String("send to unsolved buffer", "send to unsolved buffer"),
|
||||
oplog.Object("begin ts", bt),
|
||||
oplog.Object("serviceTime", st),
|
||||
oplog.Float64("delta seconds", float64(sm.BeginTs()-serviceTime)/(1000.0*1000.0*1000.0)),
|
||||
)
|
||||
sp.Finish()
|
||||
default:
|
||||
msgPack := s.searchMsgStream.Consume()
|
||||
if msgPack == nil || len(msgPack.Msgs) <= 0 {
|
||||
msgPackNil := msgPack == nil
|
||||
msgPackEmpty := true
|
||||
if msgPack != nil {
|
||||
msgPackEmpty = len(msgPack.Msgs) <= 0
|
||||
}
|
||||
log.Debug("consume search message failed", zap.Any("msgPack is Nil", msgPackNil),
|
||||
zap.Any("msgPackEmpty", msgPackEmpty))
|
||||
continue
|
||||
}
|
||||
log.Debug("doing search in receiveSearchMsg...",
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
err := s.search(sm)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
log.Debug("do search failed in receiveSearchMsg, prepare to publish failed search result",
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
err2 := s.publishFailedSearchResult(sm, err.Error())
|
||||
if err2 != nil {
|
||||
log.Error("publish FailedSearchResult failed", zap.Error(err2))
|
||||
for _, msg := range msgPack.Msgs {
|
||||
switch sm := msg.(type) {
|
||||
case *msgstream.SearchMsg:
|
||||
s.receiveSearch(sm)
|
||||
case *msgstream.LoadBalanceSegmentsMsg:
|
||||
s.loadBalance(sm)
|
||||
default:
|
||||
log.Warn("unsupported msg type in search channel", zap.Any("msg", sm))
|
||||
}
|
||||
}
|
||||
log.Debug("do search done in receiveSearchMsg",
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
sp.Finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *searchCollection) loadBalance(msg *msgstream.LoadBalanceSegmentsMsg) {
|
||||
log.Debug("consume load balance message",
|
||||
zap.Int64("msgID", msg.ID()))
|
||||
nodeID := Params.QueryNodeID
|
||||
for _, info := range msg.Infos {
|
||||
segmentID := info.SegmentID
|
||||
if nodeID == info.SourceNodeID {
|
||||
err := s.historical.replica.removeSegment(segmentID)
|
||||
if err != nil {
|
||||
log.Error("loadBalance failed when remove segment",
|
||||
zap.Error(err),
|
||||
zap.Any("segmentID", segmentID))
|
||||
}
|
||||
}
|
||||
if nodeID == info.DstNodeID {
|
||||
segment, err := s.historical.replica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
log.Error("loadBalance failed when making segment on service",
|
||||
zap.Error(err),
|
||||
zap.Any("segmentID", segmentID))
|
||||
continue // not return, try to load balance all segment
|
||||
}
|
||||
segment.setOnService(true)
|
||||
}
|
||||
}
|
||||
log.Debug("load balance done",
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
zap.Int("num of segment", len(msg.Infos)))
|
||||
}
|
||||
|
||||
func (s *searchCollection) receiveSearch(msg *msgstream.SearchMsg) {
|
||||
if msg.CollectionID != s.collectionID {
|
||||
log.Debug("not target collection search request",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("consume search message",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
)
|
||||
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
||||
msg.SetTraceCtx(ctx)
|
||||
|
||||
// check if collection has been released
|
||||
collection, err := s.historical.replica.getCollectionByID(msg.CollectionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
s.publishFailedSearchResult(msg, err.Error())
|
||||
return
|
||||
}
|
||||
if msg.BeginTs() >= collection.getReleaseTime() {
|
||||
err := errors.New("search failed, collection has been released, msgID = " +
|
||||
fmt.Sprintln(msg.ID()) +
|
||||
", collectionID = " +
|
||||
fmt.Sprintln(msg.CollectionID))
|
||||
log.Error(err.Error())
|
||||
s.publishFailedSearchResult(msg, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
serviceTime := s.getServiceableTime()
|
||||
if msg.BeginTs() > serviceTime {
|
||||
bt, _ := tsoutil.ParseTS(msg.BeginTs())
|
||||
st, _ := tsoutil.ParseTS(serviceTime)
|
||||
log.Debug("query node::receiveSearchMsg: add to unsolvedMsg",
|
||||
zap.Any("collectionID", s.collectionID),
|
||||
zap.Any("sm.BeginTs", bt),
|
||||
zap.Any("serviceTime", st),
|
||||
zap.Any("delta seconds", (msg.BeginTs()-serviceTime)/(1000*1000*1000)),
|
||||
zap.Any("msgID", msg.ID()),
|
||||
)
|
||||
s.addToUnsolvedMsg(msg)
|
||||
sp.LogFields(
|
||||
oplog.String("send to unsolved buffer", "send to unsolved buffer"),
|
||||
oplog.Object("begin ts", bt),
|
||||
oplog.Object("serviceTime", st),
|
||||
oplog.Float64("delta seconds", float64(msg.BeginTs()-serviceTime)/(1000.0*1000.0*1000.0)),
|
||||
)
|
||||
sp.Finish()
|
||||
return
|
||||
}
|
||||
log.Debug("doing search in receiveSearchMsg...",
|
||||
zap.Int64("collectionID", msg.CollectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
)
|
||||
err = s.search(msg)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
log.Debug("do search failed in receiveSearchMsg, prepare to publish failed search result",
|
||||
zap.Int64("collectionID", msg.CollectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
)
|
||||
s.publishFailedSearchResult(msg, err.Error())
|
||||
}
|
||||
log.Debug("do search done in receiveSearch",
|
||||
zap.Int64("collectionID", msg.CollectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
)
|
||||
sp.Finish()
|
||||
}
|
||||
|
||||
func (s *searchCollection) doUnsolvedMsgSearch() {
|
||||
log.Debug("starting doUnsolvedMsgSearch...", zap.Any("collectionID", s.collectionID))
|
||||
for {
|
||||
select {
|
||||
case <-s.releaseCtx.Done():
|
||||
log.Debug("stop searchCollection's doUnsolvedMsgSearch", zap.Int64("collectionID", s.collectionID))
|
||||
return
|
||||
default:
|
||||
//time.Sleep(10 * time.Millisecond)
|
||||
serviceTime := s.waitNewTSafe()
|
||||
s.setServiceableTime(serviceTime)
|
||||
log.Debug("query node::doUnsolvedMsgSearch: setServiceableTime",
|
||||
zap.Any("serviceTime", serviceTime),
|
||||
)
|
||||
st, _ := tsoutil.ParseTS(serviceTime)
|
||||
log.Debug("get tSafe from flow graph",
|
||||
zap.Int64("collectionID", s.collectionID),
|
||||
zap.Uint64("tSafe", serviceTime))
|
||||
zap.Any("tSafe", st))
|
||||
|
||||
s.setServiceableTime(serviceTime)
|
||||
log.Debug("query node::doUnsolvedMsgSearch: setServiceableTime",
|
||||
zap.Any("serviceTime", st),
|
||||
)
|
||||
|
||||
searchMsg := make([]*msgstream.SearchMsg, 0)
|
||||
tempMsg := s.popAllUnsolvedMsg()
|
||||
|
||||
for _, sm := range tempMsg {
|
||||
bt, _ := tsoutil.ParseTS(sm.EndTs())
|
||||
st, _ = tsoutil.ParseTS(serviceTime)
|
||||
log.Debug("get search message from unsolvedMsg",
|
||||
zap.Int64("collectionID", sm.CollectionID),
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
zap.Any("reqTime_p", bt),
|
||||
zap.Any("serviceTime_p", st),
|
||||
zap.Any("reqTime_l", sm.EndTs()),
|
||||
zap.Any("serviceTime_l", serviceTime),
|
||||
)
|
||||
if sm.EndTs() <= serviceTime {
|
||||
searchMsg = append(searchMsg, sm)
|
||||
continue
|
||||
}
|
||||
log.Debug("query node::doUnsolvedMsgSearch: add to unsolvedMsg",
|
||||
zap.Any("collectionID", s.collectionID),
|
||||
zap.Any("sm.BeginTs", bt),
|
||||
zap.Any("serviceTime", st),
|
||||
zap.Any("delta seconds", (sm.BeginTs()-serviceTime)/(1000*1000*1000)),
|
||||
zap.Any("msgID", sm.ID()),
|
||||
)
|
||||
s.addToUnsolvedMsg(sm)
|
||||
}
|
||||
|
||||
|
@ -274,23 +385,23 @@ func (s *searchCollection) doUnsolvedMsgSearch() {
|
|||
sp, ctx := trace.StartSpanFromContext(sm.TraceCtx())
|
||||
sm.SetTraceCtx(ctx)
|
||||
log.Debug("doing search in doUnsolvedMsgSearch...",
|
||||
zap.Int64("collectionID", sm.CollectionID),
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
)
|
||||
err := s.search(sm)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
log.Debug("do search failed in doUnsolvedMsgSearch, prepare to publish failed search result",
|
||||
zap.Int64("collectionID", sm.CollectionID),
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
err2 := s.publishFailedSearchResult(sm, err.Error())
|
||||
if err2 != nil {
|
||||
log.Error("publish FailedSearchResult failed", zap.Error(err2))
|
||||
}
|
||||
)
|
||||
s.publishFailedSearchResult(sm, err.Error())
|
||||
}
|
||||
sp.Finish()
|
||||
log.Debug("do search done in doUnsolvedMsgSearch",
|
||||
zap.Int64("collectionID", sm.CollectionID),
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
)
|
||||
}
|
||||
log.Debug("doUnsolvedMsgSearch, do search done", zap.Int("num of searchMsg", len(searchMsg)))
|
||||
}
|
||||
|
@ -306,20 +417,20 @@ func (s *searchCollection) search(searchMsg *msgstream.SearchMsg) error {
|
|||
searchTimestamp := searchMsg.Base.Timestamp
|
||||
|
||||
collectionID := searchMsg.CollectionID
|
||||
collection, err := s.historicalReplica.getCollectionByID(collectionID)
|
||||
collection, err := s.historical.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var plan *Plan
|
||||
if searchMsg.GetDslType() == commonpb.DslType_BoolExprV1 {
|
||||
expr := searchMsg.SerializedExprPlan
|
||||
plan, err = createPlanByExpr(*collection, expr)
|
||||
plan, err = createPlanByExpr(collection, expr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dsl := searchMsg.Dsl
|
||||
plan, err = createPlan(*collection, dsl)
|
||||
plan, err = createPlan(collection, dsl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -333,39 +444,6 @@ func (s *searchCollection) search(searchMsg *msgstream.SearchMsg) error {
|
|||
searchRequests := make([]*searchRequest, 0)
|
||||
searchRequests = append(searchRequests, searchReq)
|
||||
|
||||
searchResults := make([]*SearchResult, 0)
|
||||
matchedSegments := make([]*Segment, 0)
|
||||
|
||||
var searchPartitionIDsInHistorical []UniqueID
|
||||
var searchPartitionIDsInStreaming []UniqueID
|
||||
partitionIDsInQuery := searchMsg.PartitionIDs
|
||||
if len(partitionIDsInQuery) == 0 {
|
||||
partitionIDsInHistoricalCol, err1 := s.historicalReplica.getPartitionIDs(collectionID)
|
||||
partitionIDsInStreamingCol, err2 := s.streamingReplica.getPartitionIDs(collectionID)
|
||||
if err1 != nil && err2 != nil {
|
||||
return err2
|
||||
}
|
||||
if len(partitionIDsInHistoricalCol) == 0 {
|
||||
return errors.New("none of this collection's partition has been loaded")
|
||||
}
|
||||
searchPartitionIDsInHistorical = partitionIDsInHistoricalCol
|
||||
searchPartitionIDsInStreaming = partitionIDsInStreamingCol
|
||||
} else {
|
||||
for _, id := range partitionIDsInQuery {
|
||||
_, err1 := s.historicalReplica.getPartitionByID(id)
|
||||
if err1 == nil {
|
||||
searchPartitionIDsInHistorical = append(searchPartitionIDsInHistorical, id)
|
||||
}
|
||||
_, err2 := s.streamingReplica.getPartitionByID(id)
|
||||
if err2 == nil {
|
||||
searchPartitionIDsInStreaming = append(searchPartitionIDsInStreaming, id)
|
||||
}
|
||||
if err1 != nil && err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if searchMsg.GetDslType() == commonpb.DslType_BoolExprV1 {
|
||||
sp.LogFields(oplog.String("statistical time", "stats start"),
|
||||
oplog.Object("nq", queryNum),
|
||||
|
@ -376,47 +454,29 @@ func (s *searchCollection) search(searchMsg *msgstream.SearchMsg) error {
|
|||
oplog.Object("dsl", searchMsg.Dsl))
|
||||
}
|
||||
|
||||
searchResults := make([]*SearchResult, 0)
|
||||
matchedSegments := make([]*Segment, 0)
|
||||
sealedSegmentSearched := make([]UniqueID, 0)
|
||||
for _, partitionID := range searchPartitionIDsInHistorical {
|
||||
segmentIDs, err := s.historicalReplica.getSegmentIDs(partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, segmentID := range segmentIDs {
|
||||
segment, err := s.historicalReplica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
searchResult, err := segment.segmentSearch(plan, searchRequests, []Timestamp{searchTimestamp})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
searchResults = append(searchResults, searchResult)
|
||||
matchedSegments = append(matchedSegments, segment)
|
||||
sealedSegmentSearched = append(sealedSegmentSearched, segmentID)
|
||||
}
|
||||
// historical search
|
||||
hisSearchResults, hisSegmentResults, err := s.historical.search(searchRequests, collectionID, searchMsg.PartitionIDs, plan, searchTimestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
searchResults = append(searchResults, hisSearchResults...)
|
||||
matchedSegments = append(matchedSegments, hisSegmentResults...)
|
||||
for _, seg := range hisSegmentResults {
|
||||
sealedSegmentSearched = append(sealedSegmentSearched, seg.segmentID)
|
||||
}
|
||||
|
||||
//TODO:: get searched channels
|
||||
for _, partitionID := range searchPartitionIDsInStreaming {
|
||||
segmentIDs, err := s.streamingReplica.getSegmentIDs(partitionID)
|
||||
// streaming search
|
||||
for _, channel := range collection.getWatchedDmChannels() {
|
||||
strSearchResults, strSegmentResults, err := s.streaming.search(searchRequests, collectionID, searchMsg.PartitionIDs, channel, plan, searchTimestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, segmentID := range segmentIDs {
|
||||
segment, err := s.streamingReplica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
searchResult, err := segment.segmentSearch(plan, searchRequests, []Timestamp{searchTimestamp})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
searchResults = append(searchResults, searchResult)
|
||||
matchedSegments = append(matchedSegments, segment)
|
||||
}
|
||||
searchResults = append(searchResults, strSearchResults...)
|
||||
matchedSegments = append(matchedSegments, strSegmentResults...)
|
||||
}
|
||||
|
||||
sp.LogFields(oplog.String("statistical time", "segment search end"))
|
||||
|
@ -562,8 +622,9 @@ func (s *searchCollection) search(searchMsg *msgstream.SearchMsg) error {
|
|||
|
||||
func (s *searchCollection) publishSearchResult(msg msgstream.TsMsg, collectionID UniqueID) error {
|
||||
log.Debug("publishing search result...",
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
zap.Int64("collectionID", collectionID))
|
||||
)
|
||||
span, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
||||
defer span.Finish()
|
||||
msg.SetTraceCtx(ctx)
|
||||
|
@ -571,16 +632,20 @@ func (s *searchCollection) publishSearchResult(msg msgstream.TsMsg, collectionID
|
|||
msgPack.Msgs = append(msgPack.Msgs, msg)
|
||||
err := s.searchResultMsgStream.Produce(&msgPack)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
log.Error("publishing search result failed, err = "+err.Error(),
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
)
|
||||
} else {
|
||||
log.Debug("publish search result done",
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
zap.Int64("collectionID", collectionID))
|
||||
)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *searchCollection) publishFailedSearchResult(searchMsg *msgstream.SearchMsg, errMsg string) error {
|
||||
func (s *searchCollection) publishFailedSearchResult(searchMsg *msgstream.SearchMsg, errMsg string) {
|
||||
span, ctx := trace.StartSpanFromContext(searchMsg.TraceCtx())
|
||||
defer span.Finish()
|
||||
searchMsg.SetTraceCtx(ctx)
|
||||
|
@ -606,8 +671,6 @@ func (s *searchCollection) publishFailedSearchResult(searchMsg *msgstream.Search
|
|||
msgPack.Msgs = append(msgPack.Msgs, searchResultMsg)
|
||||
err := s.searchResultMsgStream.Produce(&msgPack)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Error("publish FailedSearchResult failed" + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -14,146 +14,48 @@ package querynode
|
|||
import "C"
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
)
|
||||
|
||||
type searchService struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
historicalReplica ReplicaInterface
|
||||
streamingReplica ReplicaInterface
|
||||
tSafeReplica TSafeReplicaInterface
|
||||
historical *historical
|
||||
streaming *streaming
|
||||
|
||||
searchMsgStream msgstream.MsgStream
|
||||
searchResultMsgStream msgstream.MsgStream
|
||||
queryNodeID UniqueID
|
||||
searchCollections map[UniqueID]*searchCollection
|
||||
|
||||
queryNodeID UniqueID
|
||||
searchCollections map[UniqueID]*searchCollection
|
||||
emptySearchCollection *searchCollection
|
||||
factory msgstream.Factory
|
||||
}
|
||||
|
||||
func newSearchService(ctx context.Context,
|
||||
historicalReplica ReplicaInterface,
|
||||
streamingReplica ReplicaInterface,
|
||||
tSafeReplica TSafeReplicaInterface,
|
||||
historical *historical,
|
||||
streaming *streaming,
|
||||
factory msgstream.Factory) *searchService {
|
||||
|
||||
searchStream, _ := factory.NewQueryMsgStream(ctx)
|
||||
searchResultStream, _ := factory.NewQueryMsgStream(ctx)
|
||||
log.Debug("newSearchService", zap.Any("SearchChannelNames", Params.SearchChannelNames), zap.Any("SearchResultChannels", Params.SearchResultChannelNames))
|
||||
|
||||
if len(Params.SearchChannelNames) > 0 && len(Params.SearchResultChannelNames) > 0 {
|
||||
// query node need to consume search channels and produce search result channels when init.
|
||||
consumeChannels := Params.SearchChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
searchStream.AsConsumer(consumeChannels, consumeSubName)
|
||||
log.Debug("query node AsConsumer", zap.Any("searchChannels", consumeChannels), zap.Any("consumeSubName", consumeSubName))
|
||||
producerChannels := Params.SearchResultChannelNames
|
||||
searchResultStream.AsProducer(producerChannels)
|
||||
log.Debug("query node AsProducer", zap.Any("searchResultChannels", producerChannels))
|
||||
}
|
||||
|
||||
searchServiceCtx, searchServiceCancel := context.WithCancel(ctx)
|
||||
return &searchService{
|
||||
ctx: searchServiceCtx,
|
||||
cancel: searchServiceCancel,
|
||||
|
||||
historicalReplica: historicalReplica,
|
||||
streamingReplica: streamingReplica,
|
||||
tSafeReplica: tSafeReplica,
|
||||
|
||||
searchMsgStream: searchStream,
|
||||
searchResultMsgStream: searchResultStream,
|
||||
historical: historical,
|
||||
streaming: streaming,
|
||||
|
||||
queryNodeID: Params.QueryNodeID,
|
||||
searchCollections: make(map[UniqueID]*searchCollection),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *searchService) start() {
|
||||
log.Debug("start search service")
|
||||
s.searchMsgStream.Start()
|
||||
s.searchResultMsgStream.Start()
|
||||
s.startEmptySearchCollection()
|
||||
s.consumeSearch()
|
||||
}
|
||||
|
||||
func (s *searchService) collectionCheck(collectionID UniqueID) error {
|
||||
// check if collection exists
|
||||
if ok := s.historicalReplica.hasCollection(collectionID); !ok {
|
||||
err := errors.New("no collection found, collectionID = " + strconv.FormatInt(collectionID, 10))
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *searchService) consumeSearch() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
default:
|
||||
msgPack := s.searchMsgStream.Consume()
|
||||
if msgPack == nil || len(msgPack.Msgs) <= 0 {
|
||||
msgPackNil := msgPack == nil
|
||||
msgPackEmpty := true
|
||||
if msgPack != nil {
|
||||
msgPackEmpty = len(msgPack.Msgs) <= 0
|
||||
}
|
||||
log.Debug("consume search message failed", zap.Any("msgPack is Nil", msgPackNil),
|
||||
zap.Any("msgPackEmpty", msgPackEmpty))
|
||||
|
||||
continue
|
||||
}
|
||||
for _, msg := range msgPack.Msgs {
|
||||
sm, ok := msg.(*msgstream.SearchMsg)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
log.Debug("consume search message",
|
||||
zap.Int64("msgID", msg.ID()),
|
||||
zap.Any("collectionID", sm.CollectionID))
|
||||
sp, ctx := trace.StartSpanFromContext(sm.TraceCtx())
|
||||
sm.SetTraceCtx(ctx)
|
||||
err := s.collectionCheck(sm.CollectionID)
|
||||
if err != nil {
|
||||
s.emptySearchCollection.emptySearch(sm)
|
||||
log.Debug("cannot found collection, do empty search done",
|
||||
zap.Int64("msgID", sm.ID()),
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
continue
|
||||
}
|
||||
_, ok = s.searchCollections[sm.CollectionID]
|
||||
if !ok {
|
||||
s.startSearchCollection(sm.CollectionID)
|
||||
log.Debug("new search collection, start search collection service",
|
||||
zap.Int64("collectionID", sm.CollectionID))
|
||||
}
|
||||
s.searchCollections[sm.CollectionID].msgBuffer <- sm
|
||||
sp.Finish()
|
||||
}
|
||||
}
|
||||
factory: factory,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *searchService) close() {
|
||||
log.Debug("search service closed")
|
||||
if s.searchMsgStream != nil {
|
||||
s.searchMsgStream.Close()
|
||||
}
|
||||
if s.searchResultMsgStream != nil {
|
||||
s.searchResultMsgStream.Close()
|
||||
}
|
||||
for collectionID := range s.searchCollections {
|
||||
s.stopSearchCollection(collectionID)
|
||||
}
|
||||
|
@ -161,30 +63,20 @@ func (s *searchService) close() {
|
|||
s.cancel()
|
||||
}
|
||||
|
||||
func (s *searchService) startSearchCollection(collectionID UniqueID) {
|
||||
func (s *searchService) addSearchCollection(collectionID UniqueID) {
|
||||
if _, ok := s.searchCollections[collectionID]; ok {
|
||||
log.Warn("search collection already exists", zap.Any("collectionID", collectionID))
|
||||
return
|
||||
}
|
||||
|
||||
ctx1, cancel := context.WithCancel(s.ctx)
|
||||
sc := newSearchCollection(ctx1,
|
||||
cancel,
|
||||
collectionID,
|
||||
s.historicalReplica,
|
||||
s.streamingReplica,
|
||||
s.tSafeReplica,
|
||||
s.searchResultMsgStream)
|
||||
s.historical,
|
||||
s.streaming,
|
||||
s.factory)
|
||||
s.searchCollections[collectionID] = sc
|
||||
sc.start()
|
||||
}
|
||||
|
||||
func (s *searchService) startEmptySearchCollection() {
|
||||
ctx1, cancel := context.WithCancel(s.ctx)
|
||||
sc := newSearchCollection(ctx1,
|
||||
cancel,
|
||||
UniqueID(-1),
|
||||
s.historicalReplica,
|
||||
s.streamingReplica,
|
||||
s.tSafeReplica,
|
||||
s.searchResultMsgStream)
|
||||
s.emptySearchCollection = sc
|
||||
sc.start()
|
||||
}
|
||||
|
||||
func (s *searchService) hasSearchCollection(collectionID UniqueID) bool {
|
||||
|
@ -196,7 +88,9 @@ func (s *searchService) stopSearchCollection(collectionID UniqueID) {
|
|||
sc, ok := s.searchCollections[collectionID]
|
||||
if !ok {
|
||||
log.Error("stopSearchCollection failed, collection doesn't exist", zap.Int64("collectionID", collectionID))
|
||||
return
|
||||
}
|
||||
sc.close()
|
||||
sc.cancel()
|
||||
delete(s.searchCollections, collectionID)
|
||||
}
|
||||
|
|
|
@ -142,15 +142,13 @@ func TestSearch_Search(t *testing.T) {
|
|||
|
||||
// start search service
|
||||
node.searchService = newSearchService(node.queryNodeLoopCtx,
|
||||
node.historical.replica,
|
||||
node.streaming.replica,
|
||||
node.streaming.tSafeReplica,
|
||||
node.historical,
|
||||
node.streaming,
|
||||
msFactory)
|
||||
go node.searchService.start()
|
||||
node.searchService.startSearchCollection(collectionID)
|
||||
node.searchService.addSearchCollection(collectionID)
|
||||
|
||||
// load segment
|
||||
err = node.historical.replica.addSegment(segmentID, defaultPartitionID, collectionID, segmentTypeSealed)
|
||||
err = node.historical.replica.addSegment(segmentID, defaultPartitionID, collectionID, "", segmentTypeSealed, true)
|
||||
assert.NoError(t, err)
|
||||
segment, err := node.historical.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
|
@ -182,22 +180,20 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
|
||||
// start search service
|
||||
node.searchService = newSearchService(node.queryNodeLoopCtx,
|
||||
node.streaming.replica,
|
||||
node.streaming.replica,
|
||||
node.streaming.tSafeReplica,
|
||||
node.historical,
|
||||
node.streaming,
|
||||
msFactory)
|
||||
go node.searchService.start()
|
||||
node.searchService.startSearchCollection(collectionID)
|
||||
node.searchService.addSearchCollection(collectionID)
|
||||
|
||||
// load segments
|
||||
err = node.historical.replica.addSegment(segmentID1, defaultPartitionID, collectionID, segmentTypeSealed)
|
||||
err = node.historical.replica.addSegment(segmentID1, defaultPartitionID, collectionID, "", segmentTypeSealed, true)
|
||||
assert.NoError(t, err)
|
||||
segment1, err := node.historical.replica.getSegmentByID(segmentID1)
|
||||
assert.NoError(t, err)
|
||||
err = loadFields(segment1, DIM, N)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.historical.replica.addSegment(segmentID2, defaultPartitionID, collectionID, segmentTypeSealed)
|
||||
err = node.historical.replica.addSegment(segmentID2, defaultPartitionID, collectionID, "", segmentTypeSealed, true)
|
||||
assert.NoError(t, err)
|
||||
segment2, err := node.historical.replica.getSegmentByID(segmentID2)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -52,12 +52,15 @@ type Segment struct {
|
|||
segmentID UniqueID
|
||||
partitionID UniqueID
|
||||
collectionID UniqueID
|
||||
|
||||
onService bool
|
||||
|
||||
vChannelID VChannel
|
||||
lastMemSize int64
|
||||
lastRowCount int64
|
||||
|
||||
once sync.Once // guards enableIndex
|
||||
enableIndex bool
|
||||
enableLoadBinLog bool
|
||||
once sync.Once // guards enableIndex
|
||||
enableIndex bool
|
||||
|
||||
rmMutex sync.Mutex // guards recentlyModified
|
||||
recentlyModified bool
|
||||
|
@ -110,7 +113,15 @@ func (s *Segment) getType() segmentType {
|
|||
return s.segmentType
|
||||
}
|
||||
|
||||
func newSegment(collection *Collection, segmentID int64, partitionID UniqueID, collectionID UniqueID, segType segmentType) *Segment {
|
||||
func (s *Segment) getOnService() bool {
|
||||
return s.onService
|
||||
}
|
||||
|
||||
func (s *Segment) setOnService(onService bool) {
|
||||
s.onService = onService
|
||||
}
|
||||
|
||||
func newSegment(collection *Collection, segmentID int64, partitionID UniqueID, collectionID UniqueID, vChannelID VChannel, segType segmentType, onService bool) *Segment {
|
||||
/*
|
||||
CSegmentInterface
|
||||
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
|
||||
|
@ -133,13 +144,14 @@ func newSegment(collection *Collection, segmentID int64, partitionID UniqueID, c
|
|||
log.Debug("create segment", zap.Int64("segmentID", segmentID))
|
||||
|
||||
var newSegment = &Segment{
|
||||
segmentPtr: segmentPtr,
|
||||
segmentType: segType,
|
||||
segmentID: segmentID,
|
||||
partitionID: partitionID,
|
||||
collectionID: collectionID,
|
||||
indexInfos: indexInfos,
|
||||
enableLoadBinLog: false,
|
||||
segmentPtr: segmentPtr,
|
||||
segmentType: segType,
|
||||
segmentID: segmentID,
|
||||
partitionID: partitionID,
|
||||
collectionID: collectionID,
|
||||
vChannelID: vChannelID,
|
||||
onService: onService,
|
||||
indexInfos: indexInfos,
|
||||
}
|
||||
|
||||
return newSegment
|
||||
|
@ -415,7 +427,7 @@ func (s *Segment) segmentPreInsert(numOfRecords int) (int64, error) {
|
|||
long int
|
||||
PreInsert(CSegmentInterface c_segment, long int size);
|
||||
*/
|
||||
if s.segmentType != segmentTypeGrowing || s.enableLoadBinLog {
|
||||
if s.segmentType != segmentTypeGrowing {
|
||||
return 0, nil
|
||||
}
|
||||
var offset int64
|
||||
|
@ -454,9 +466,7 @@ func (s *Segment) segmentInsert(offset int64, entityIDs *[]UniqueID, timestamps
|
|||
int sizeof_per_row,
|
||||
signed long int count);
|
||||
*/
|
||||
log.Debug("QueryNode::Segment::segmentInsert:", zap.Any("segmentType", s.segmentType))
|
||||
log.Debug("QueryNode::Segment::segmentInsert:", zap.Any("enableLoadBinLog", s.enableLoadBinLog))
|
||||
if s.segmentType != segmentTypeGrowing || s.enableLoadBinLog {
|
||||
if s.segmentType != segmentTypeGrowing {
|
||||
return nil
|
||||
}
|
||||
log.Debug("QueryNode::Segment::segmentInsert:", zap.Any("s.sgmentPtr", s.segmentPtr))
|
||||
|
@ -729,7 +739,3 @@ func (s *Segment) dropSegmentIndex(fieldID int64) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Segment) setLoadBinLogEnable(enable bool) {
|
||||
s.enableLoadBinLog = enable
|
||||
}
|
||||
|
|
|
@ -24,14 +24,14 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
queryPb "github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
)
|
||||
|
||||
// segmentLoader is only responsible for loading the field data from binlog
|
||||
type segmentLoader struct {
|
||||
replica ReplicaInterface
|
||||
historicalReplica ReplicaInterface
|
||||
|
||||
dataService types.DataService
|
||||
|
||||
|
@ -40,26 +40,108 @@ type segmentLoader struct {
|
|||
indexLoader *indexLoader
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) getInsertBinlogPaths(segmentID UniqueID) ([]*internalpb.StringList, []int64, error) {
|
||||
ctx := context.TODO()
|
||||
if loader.dataService == nil {
|
||||
return nil, nil, errors.New("null data service client")
|
||||
func (loader *segmentLoader) loadSegmentOfConditionHandOff(req *queryPb.LoadSegmentsRequest) error {
|
||||
return errors.New("TODO: implement hand off")
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) loadSegmentOfConditionLoadBalance(req *queryPb.LoadSegmentsRequest) error {
|
||||
return loader.loadSegment(req, false)
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) loadSegmentOfConditionGRPC(req *queryPb.LoadSegmentsRequest) error {
|
||||
return loader.loadSegment(req, true)
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) loadSegmentOfConditionNodeDown(req *queryPb.LoadSegmentsRequest) error {
|
||||
return loader.loadSegment(req, true)
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) loadSegment(req *queryPb.LoadSegmentsRequest, onService bool) error {
|
||||
// no segment needs to load, return
|
||||
if len(req.Infos) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
insertBinlogPathRequest := &datapb.GetInsertBinlogPathsRequest{
|
||||
SegmentID: segmentID,
|
||||
// start to load
|
||||
for _, info := range req.Infos {
|
||||
segmentID := info.SegmentID
|
||||
partitionID := info.PartitionID
|
||||
collectionID := info.CollectionID
|
||||
|
||||
// init replica
|
||||
hasCollectionInHistorical := loader.historicalReplica.hasCollection(collectionID)
|
||||
hasPartitionInHistorical := loader.historicalReplica.hasPartition(partitionID)
|
||||
if !hasCollectionInHistorical {
|
||||
err := loader.historicalReplica.addCollection(collectionID, req.Schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !hasPartitionInHistorical {
|
||||
err := loader.historicalReplica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
collection, err := loader.historicalReplica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
segment := newSegment(collection, segmentID, partitionID, collectionID, "", segmentTypeSealed, onService)
|
||||
err = loader.loadSegmentInternal(collectionID, segment, info.BinlogPaths)
|
||||
if err != nil {
|
||||
deleteSegment(segment)
|
||||
log.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
err = loader.historicalReplica.setSegment(segment)
|
||||
if err != nil {
|
||||
deleteSegment(segment)
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
pathResponse, err := loader.dataService.GetInsertBinlogPaths(ctx, insertBinlogPathRequest)
|
||||
if err != nil || pathResponse.Status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
return nil, nil, err
|
||||
// sendQueryNodeStats
|
||||
return loader.indexLoader.sendQueryNodeStats()
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) loadSegmentInternal(collectionID UniqueID,
|
||||
segment *Segment,
|
||||
binlogPaths []*queryPb.FieldBinlog) error {
|
||||
|
||||
vectorFieldIDs, err := loader.historicalReplica.getVecFieldIDsByCollectionID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pathResponse.FieldIDs) != len(pathResponse.Paths) || len(pathResponse.FieldIDs) <= 0 {
|
||||
return nil, nil, errors.New("illegal GetInsertBinlogPathsResponse")
|
||||
loadIndexFieldIDs := make([]int64, 0)
|
||||
for _, vecFieldID := range vectorFieldIDs {
|
||||
err = loader.indexLoader.setIndexInfo(collectionID, segment, vecFieldID)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
loadIndexFieldIDs = append(loadIndexFieldIDs, vecFieldID)
|
||||
}
|
||||
// we don't need load to vector fields
|
||||
binlogPaths = loader.filterOutVectorFields(binlogPaths, loadIndexFieldIDs)
|
||||
|
||||
log.Debug("loading insert...")
|
||||
err = loader.loadSegmentFieldsData(segment, binlogPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, id := range loadIndexFieldIDs {
|
||||
log.Debug("loading index...")
|
||||
err = loader.indexLoader.loadIndex(segment, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return pathResponse.Paths, pathResponse.FieldIDs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) GetSegmentStates(segmentID UniqueID) (*datapb.GetSegmentStatesResponse, error) {
|
||||
|
@ -82,7 +164,9 @@ func (loader *segmentLoader) GetSegmentStates(segmentID UniqueID) (*datapb.GetSe
|
|||
return statesResponse, nil
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) filterOutVectorFields(fieldIDs []int64, vectorFields []int64) []int64 {
|
||||
func (loader *segmentLoader) filterOutVectorFields(binlogPaths []*queryPb.FieldBinlog,
|
||||
vectorFields []int64) []*queryPb.FieldBinlog {
|
||||
|
||||
containsFunc := func(s []int64, e int64) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
|
@ -91,117 +175,101 @@ func (loader *segmentLoader) filterOutVectorFields(fieldIDs []int64, vectorField
|
|||
}
|
||||
return false
|
||||
}
|
||||
targetFields := make([]int64, 0)
|
||||
for _, id := range fieldIDs {
|
||||
if !containsFunc(vectorFields, id) {
|
||||
targetFields = append(targetFields, id)
|
||||
targetFields := make([]*queryPb.FieldBinlog, 0)
|
||||
for _, path := range binlogPaths {
|
||||
if !containsFunc(vectorFields, path.FieldID) {
|
||||
targetFields = append(targetFields, path)
|
||||
}
|
||||
}
|
||||
return targetFields
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) checkTargetFields(paths []*internalpb.StringList, srcFieldIDs []int64, dstFieldIDs []int64) (map[int64]*internalpb.StringList, error) {
|
||||
targetFields := make(map[int64]*internalpb.StringList)
|
||||
|
||||
containsFunc := func(s []int64, e int64) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for _, fieldID := range dstFieldIDs {
|
||||
if !containsFunc(srcFieldIDs, fieldID) {
|
||||
return nil, errors.New("uncompleted fields")
|
||||
}
|
||||
}
|
||||
|
||||
for i := range srcFieldIDs {
|
||||
targetFields[srcFieldIDs[i]] = paths[i]
|
||||
}
|
||||
|
||||
return targetFields, nil
|
||||
}
|
||||
|
||||
func (loader *segmentLoader) loadSegmentFieldsData(segment *Segment, targetFields map[int64]*internalpb.StringList) error {
|
||||
func (loader *segmentLoader) loadSegmentFieldsData(segment *Segment, binlogPaths []*queryPb.FieldBinlog) error {
|
||||
iCodec := storage.InsertCodec{}
|
||||
defer iCodec.Close()
|
||||
for id, p := range targetFields {
|
||||
if id == timestampFieldID {
|
||||
defer func() {
|
||||
err := iCodec.Close()
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}()
|
||||
blobs := make([]*storage.Blob, 0)
|
||||
for _, binlogPath := range binlogPaths {
|
||||
fieldID := binlogPath.FieldID
|
||||
if fieldID == timestampFieldID {
|
||||
// seg core doesn't need timestamp field
|
||||
continue
|
||||
}
|
||||
|
||||
paths := p.Values
|
||||
blobs := make([]*storage.Blob, 0)
|
||||
log.Debug("loadSegmentFieldsData", zap.Int64("segmentID", segment.segmentID), zap.String("path", fmt.Sprintln(paths)))
|
||||
paths := binlogPath.Binlogs
|
||||
log.Debug("load segment fields data",
|
||||
zap.Int64("segmentID", segment.segmentID),
|
||||
zap.Any("fieldID", fieldID),
|
||||
zap.String("paths", fmt.Sprintln(paths)),
|
||||
)
|
||||
blob := &storage.Blob{
|
||||
Key: strconv.FormatInt(fieldID, 10),
|
||||
Value: make([]byte, 0),
|
||||
}
|
||||
for _, path := range paths {
|
||||
binLog, err := loader.kv.Load(path)
|
||||
if err != nil {
|
||||
// TODO: return or continue?
|
||||
return err
|
||||
}
|
||||
blobs = append(blobs, &storage.Blob{
|
||||
Key: strconv.FormatInt(id, 10), // TODO: key???
|
||||
Value: []byte(binLog),
|
||||
})
|
||||
blob.Value = append(blob.Value, []byte(binLog)...)
|
||||
}
|
||||
_, _, insertData, err := iCodec.Deserialize(blobs)
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
_, _, insertData, err := iCodec.Deserialize(blobs)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
for fieldID, value := range insertData.Data {
|
||||
var numRows int
|
||||
var data interface{}
|
||||
switch fieldData := value.(type) {
|
||||
case *storage.BoolFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int8FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int16FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int32FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int64FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.FloatFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.DoubleFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case storage.StringFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.FloatVectorFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.BinaryVectorFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
default:
|
||||
return errors.New("unexpected field data type")
|
||||
}
|
||||
err = segment.segmentLoadFieldData(fieldID, numRows, data)
|
||||
if err != nil {
|
||||
// TODO: return or continue
|
||||
// TODO: return or continue?
|
||||
return err
|
||||
}
|
||||
if len(insertData.Data) != 1 {
|
||||
return errors.New("we expect only one field in deserialized insert data")
|
||||
}
|
||||
|
||||
for _, value := range insertData.Data {
|
||||
var numRows int
|
||||
var data interface{}
|
||||
|
||||
switch fieldData := value.(type) {
|
||||
case *storage.BoolFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int8FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int16FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int32FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.Int64FieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.FloatFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.DoubleFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case storage.StringFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.FloatVectorFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
case *storage.BinaryVectorFieldData:
|
||||
numRows = fieldData.NumRows
|
||||
data = fieldData.Data
|
||||
default:
|
||||
return errors.New("unexpected field data type")
|
||||
}
|
||||
err = segment.segmentLoadFieldData(id, numRows, data)
|
||||
if err != nil {
|
||||
// TODO: return or continue?
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -222,7 +290,7 @@ func newSegmentLoader(ctx context.Context, masterService types.MasterService, in
|
|||
|
||||
iLoader := newIndexLoader(ctx, masterService, indexService, replica)
|
||||
return &segmentLoader{
|
||||
replica: replica,
|
||||
historicalReplica: replica,
|
||||
|
||||
dataService: dataService,
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
deleteSegment(segment)
|
||||
deleteCollection(collection)
|
||||
|
@ -49,7 +49,7 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
deleteSegment(segment)
|
||||
|
@ -65,7 +65,7 @@ func TestSegment_getRowCount(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -113,7 +113,7 @@ func TestSegment_retrieve(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{}
|
||||
|
@ -173,7 +173,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -227,7 +227,7 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -275,7 +275,7 @@ func TestSegment_segmentInsert(t *testing.T) {
|
|||
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
|
||||
assert.Equal(t, collection.ID(), collectionID)
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -319,7 +319,7 @@ func TestSegment_segmentDelete(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -369,7 +369,7 @@ func TestSegment_segmentSearch(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -426,7 +426,7 @@ func TestSegment_segmentSearch(t *testing.T) {
|
|||
}
|
||||
|
||||
searchTimestamp := Timestamp(1020)
|
||||
plan, err := createPlan(*collection, dslString)
|
||||
plan, err := createPlan(collection, dslString)
|
||||
assert.NoError(t, err)
|
||||
holder, err := parseSearchRequest(plan, placeHolderGroupBlob)
|
||||
assert.NoError(t, err)
|
||||
|
@ -484,7 +484,7 @@ func TestSegment_segmentPreInsert(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
const DIM = 16
|
||||
|
@ -523,7 +523,7 @@ func TestSegment_segmentPreDelete(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -572,7 +572,7 @@ func TestSegment_segmentLoadFieldData(t *testing.T) {
|
|||
|
||||
segmentID := UniqueID(0)
|
||||
partitionID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, partitionID, collectionID, segmentTypeSealed)
|
||||
segment := newSegment(collection, segmentID, partitionID, collectionID, "", segmentTypeSealed, true)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
assert.Equal(t, partitionID, segment.partitionID)
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ func (sService *statsService) start() {
|
|||
for {
|
||||
select {
|
||||
case <-sService.ctx.Done():
|
||||
log.Debug("stats service closed")
|
||||
return
|
||||
case <-time.After(time.Duration(sleepTimeInterval) * time.Millisecond):
|
||||
sService.publicStatistic(nil)
|
||||
|
|
|
@ -13,6 +13,8 @@ package querynode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
)
|
||||
|
@ -53,3 +55,71 @@ func (s *streaming) close() {
|
|||
// free collectionReplica
|
||||
s.replica.freeAll()
|
||||
}
|
||||
|
||||
func (s *streaming) search(searchReqs []*searchRequest,
|
||||
collID UniqueID,
|
||||
partIDs []UniqueID,
|
||||
vChannel VChannel,
|
||||
plan *Plan,
|
||||
searchTs Timestamp) ([]*SearchResult, []*Segment, error) {
|
||||
|
||||
searchResults := make([]*SearchResult, 0)
|
||||
segmentResults := make([]*Segment, 0)
|
||||
|
||||
// get streaming partition ids
|
||||
var searchPartIDs []UniqueID
|
||||
if len(partIDs) == 0 {
|
||||
strPartIDs, err := s.replica.getPartitionIDs(collID)
|
||||
if len(strPartIDs) == 0 {
|
||||
// no partitions in collection, do empty search
|
||||
return nil, nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
searchPartIDs = strPartIDs
|
||||
} else {
|
||||
for _, id := range partIDs {
|
||||
_, err := s.replica.getPartitionByID(id)
|
||||
if err == nil {
|
||||
searchPartIDs = append(searchPartIDs, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// all partitions have been released
|
||||
if len(searchPartIDs) == 0 {
|
||||
return nil, nil, errors.New("partitions have been released , collectionID = " +
|
||||
fmt.Sprintln(collID) +
|
||||
"target partitionIDs = " +
|
||||
fmt.Sprintln(partIDs))
|
||||
}
|
||||
|
||||
for _, partID := range searchPartIDs {
|
||||
segIDs, err := s.replica.getSegmentIDsByVChannel(partID, vChannel)
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
for _, segID := range segIDs {
|
||||
seg, err := s.replica.getSegmentByID(segID)
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
|
||||
// TSafe less than searchTs means this vChannel is not available
|
||||
ts := s.tSafeReplica.getTSafe(seg.vChannelID)
|
||||
if ts < searchTs {
|
||||
continue
|
||||
}
|
||||
|
||||
searchResult, err := seg.segmentSearch(plan, searchReqs, []Timestamp{searchTs})
|
||||
if err != nil {
|
||||
return searchResults, segmentResults, err
|
||||
}
|
||||
searchResults = append(searchResults, searchResult)
|
||||
segmentResults = append(segmentResults, seg)
|
||||
}
|
||||
}
|
||||
|
||||
return searchResults, segmentResults, nil
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
@ -109,65 +108,147 @@ func (w *watchDmChannelsTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
||||
log.Debug("starting WatchDmChannels ...", zap.String("ChannelIDs", fmt.Sprintln(w.req.ChannelIDs)))
|
||||
// TODO: pass load type, col or partition
|
||||
|
||||
// 1. init channels in collection meta
|
||||
collectionID := w.req.CollectionID
|
||||
partitionID := w.req.PartitionID
|
||||
loadPartition := partitionID != 0
|
||||
|
||||
// TODO: Remove this and use unique vChannel
|
||||
channelTmp := make([]string, 0)
|
||||
for _, channel := range w.req.ChannelIDs {
|
||||
channelTmp = append(channelTmp, channel+strconv.FormatInt(collectionID, 10))
|
||||
// get all channels
|
||||
vChannels := make([]string, 0)
|
||||
// TODO: remove tmp
|
||||
vChannelsTmp := make([]string, 0)
|
||||
for _, info := range w.req.Infos {
|
||||
vChannels = append(vChannels, info.ChannelName)
|
||||
vChannelsTmp = append(vChannelsTmp, info.ChannelName+strconv.FormatInt(collectionID, 10))
|
||||
}
|
||||
log.Debug("starting WatchDmChannels ...",
|
||||
zap.Any("collectionName", w.req.Schema.Name),
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.String("ChannelIDs", fmt.Sprintln(vChannels)))
|
||||
|
||||
collection, err := w.node.streaming.replica.getCollectionByID(collectionID)
|
||||
//// get physical channels
|
||||
//desColReq := &milvuspb.DescribeCollectionRequest{
|
||||
// CollectionID: collectionID,
|
||||
//}
|
||||
//desColRsp, err := w.node.masterService.DescribeCollection(ctx, desColReq)
|
||||
//if err != nil {
|
||||
// log.Error("get physical channels failed, err = " + err.Error())
|
||||
// return err
|
||||
//}
|
||||
//VPChannels := make(map[string]string) // map[vChannel]pChannel
|
||||
//for _, ch := range vChannels {
|
||||
// for i := range desColRsp.VirtualChannelNames {
|
||||
// if desColRsp.VirtualChannelNames[i] == ch {
|
||||
// VPChannels[ch] = desColRsp.PhysicalChannelNames[i]
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//if len(VPChannels) != len(vChannels) {
|
||||
// return errors.New("get physical channels failed, illegal channel length, collectionID = " + fmt.Sprintln(collectionID))
|
||||
//}
|
||||
//log.Debug("get physical channels done", zap.Any("collectionID", collectionID))
|
||||
|
||||
// init replica
|
||||
if hasCollectionInStreaming := w.node.streaming.replica.hasCollection(collectionID); !hasCollectionInStreaming {
|
||||
err := w.node.streaming.replica.addCollection(collectionID, w.req.Schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.node.streaming.replica.initExcludedSegments(collectionID)
|
||||
collection, err := w.node.streaming.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collection.addWatchedDmChannels(vChannelsTmp)
|
||||
}
|
||||
if hasCollectionInHistorical := w.node.historical.replica.hasCollection(collectionID); !hasCollectionInHistorical {
|
||||
err := w.node.historical.replica.addCollection(collectionID, w.req.Schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if loadPartition {
|
||||
if hasPartitionInStreaming := w.node.streaming.replica.hasPartition(partitionID); !hasPartitionInStreaming {
|
||||
err := w.node.streaming.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hasPartitionInHistorical := w.node.historical.replica.hasPartition(partitionID); !hasPartitionInHistorical {
|
||||
err := w.node.historical.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debug("watchDMChannel, init replica done", zap.Any("collectionID", collectionID))
|
||||
|
||||
// get subscription name
|
||||
getUniqueSubName := func() string {
|
||||
prefixName := Params.MsgChannelSubName
|
||||
return prefixName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
|
||||
}
|
||||
consumeSubName := getUniqueSubName()
|
||||
|
||||
// group channels by to seeking or consuming
|
||||
toSeekChannels := make([]*internalpb.MsgPosition, 0)
|
||||
toSubChannels := make([]string, 0)
|
||||
for _, info := range w.req.Infos {
|
||||
if info.SeekPosition == nil || len(info.SeekPosition.MsgID) == 0 {
|
||||
toSubChannels = append(toSubChannels, info.ChannelName)
|
||||
continue
|
||||
}
|
||||
info.SeekPosition.MsgGroup = consumeSubName
|
||||
toSeekChannels = append(toSeekChannels, info.SeekPosition)
|
||||
}
|
||||
log.Debug("watchDMChannel, group channels done", zap.Any("collectionID", collectionID))
|
||||
|
||||
// add check points info
|
||||
checkPointInfos := make([]*queryPb.CheckPoint, 0)
|
||||
for _, info := range w.req.Infos {
|
||||
checkPointInfos = append(checkPointInfos, info.CheckPoints...)
|
||||
}
|
||||
err := w.node.streaming.replica.addExcludedSegments(collectionID, checkPointInfos)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
collection.addWatchedDmChannels(channelTmp)
|
||||
log.Debug("watchDMChannel, add check points info done", zap.Any("collectionID", collectionID))
|
||||
|
||||
// 2. get subscription name
|
||||
getUniqueSubName := func() string {
|
||||
prefixName := Params.MsgChannelSubName
|
||||
return prefixName + "-" + strconv.FormatInt(collectionID, 10)
|
||||
// create tSafe
|
||||
for _, channel := range vChannelsTmp {
|
||||
w.node.streaming.tSafeReplica.addTSafe(channel)
|
||||
}
|
||||
consumeSubName := getUniqueSubName()
|
||||
|
||||
// 3. group channels by to seeking or consuming
|
||||
consumeChannels := w.req.ChannelIDs
|
||||
toSeekInfo := make([]*internalpb.MsgPosition, 0)
|
||||
toDirSubChannels := make([]string, 0)
|
||||
for _, info := range w.req.Infos {
|
||||
if len(info.Pos.MsgID) == 0 {
|
||||
toDirSubChannels = append(toDirSubChannels, info.ChannelID)
|
||||
continue
|
||||
}
|
||||
info.Pos.MsgGroup = consumeSubName
|
||||
toSeekInfo = append(toSeekInfo, info.Pos)
|
||||
|
||||
log.Debug("prevent inserting segments", zap.String("segmentIDs", fmt.Sprintln(info.ExcludedSegments)))
|
||||
err := w.node.streaming.replica.addExcludedSegments(collectionID, info.ExcludedSegments)
|
||||
// add flow graph
|
||||
if loadPartition {
|
||||
err = w.node.streaming.dataSyncService.addPartitionFlowGraph(collectionID, partitionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("query node add partition flow graphs", zap.Any("channels", vChannels))
|
||||
} else {
|
||||
err = w.node.streaming.dataSyncService.addCollectionFlowGraph(collectionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("query node add collection flow graphs", zap.Any("channels", vChannels))
|
||||
}
|
||||
|
||||
// channels as consumer
|
||||
var nodeFGs map[VChannel]*queryNodeFlowGraph
|
||||
if loadPartition {
|
||||
nodeFGs, err = w.node.streaming.dataSyncService.getPartitionFlowGraphs(partitionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
nodeFGs, err = w.node.streaming.dataSyncService.getCollectionFlowGraphs(collectionID, vChannels)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 4. add flow graph
|
||||
err = w.node.streaming.dataSyncService.addCollectionFlowGraph(collectionID, consumeChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("query node add flow graphs, channels = " + strings.Join(consumeChannels, ", "))
|
||||
|
||||
// 5. channels as consumer
|
||||
nodeFGs, err := w.node.streaming.dataSyncService.getCollectionFlowGraphs(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, channel := range toDirSubChannels {
|
||||
for _, channel := range toSubChannels {
|
||||
for _, fg := range nodeFGs {
|
||||
if fg.channel == channel {
|
||||
err := fg.consumerFlowGraph(channel, consumeSubName)
|
||||
|
@ -179,12 +260,15 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
log.Debug("as consumer channels", zap.Any("channels", consumeChannels))
|
||||
log.Debug("as consumer channels",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("toSubChannels", toSubChannels))
|
||||
|
||||
// 6. seek channel
|
||||
for _, pos := range toSeekInfo {
|
||||
// seek channel
|
||||
for _, pos := range toSeekChannels {
|
||||
for _, fg := range nodeFGs {
|
||||
if fg.channel == pos.ChannelName {
|
||||
pos.MsgGroup = consumeSubName
|
||||
err := fg.seekQueryNodeFlowGraph(pos)
|
||||
if err != nil {
|
||||
errMsg := "msgStream seek error :" + err.Error()
|
||||
|
@ -194,23 +278,24 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
log.Debug("seek all channel done",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("toSeekChannels", toSeekChannels))
|
||||
|
||||
// add tSafe
|
||||
for _, channel := range channelTmp {
|
||||
w.node.streaming.tSafeReplica.addTSafe(channel)
|
||||
// start flow graphs
|
||||
if loadPartition {
|
||||
err = w.node.streaming.dataSyncService.startPartitionFlowGraph(partitionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = w.node.streaming.dataSyncService.startCollectionFlowGraph(collectionID, vChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 7. start search collection
|
||||
w.node.searchService.startSearchCollection(collectionID)
|
||||
log.Debug("start search collection", zap.Any("collectionID", collectionID))
|
||||
|
||||
// 8. start flow graphs
|
||||
err = w.node.streaming.dataSyncService.startCollectionFlowGraph(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("WatchDmChannels done", zap.String("ChannelIDs", fmt.Sprintln(w.req.ChannelIDs)))
|
||||
log.Debug("WatchDmChannels done", zap.String("ChannelIDs", fmt.Sprintln(vChannels)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -221,7 +306,7 @@ func (w *watchDmChannelsTask) PostExecute(ctx context.Context) error {
|
|||
// loadSegmentsTask
|
||||
func (l *loadSegmentsTask) Timestamp() Timestamp {
|
||||
if l.req.Base == nil {
|
||||
log.Error("nil base req in loadSegmentsTask", zap.Any("collectionID", l.req.CollectionID))
|
||||
log.Error("nil base req in loadSegmentsTask")
|
||||
return 0
|
||||
}
|
||||
return l.req.Base.Timestamp
|
||||
|
@ -242,61 +327,26 @@ func (l *loadSegmentsTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (l *loadSegmentsTask) Execute(ctx context.Context) error {
|
||||
// TODO: support db
|
||||
collectionID := l.req.CollectionID
|
||||
partitionID := l.req.PartitionID
|
||||
segmentIDs := l.req.SegmentIDs
|
||||
fieldIDs := l.req.FieldIDs
|
||||
schema := l.req.Schema
|
||||
|
||||
log.Debug("query node load segment", zap.String("loadSegmentRequest", fmt.Sprintln(l.req)))
|
||||
var err error
|
||||
|
||||
hasCollectionInHistorical := l.node.historical.replica.hasCollection(collectionID)
|
||||
hasPartitionInHistorical := l.node.historical.replica.hasPartition(partitionID)
|
||||
if !hasCollectionInHistorical {
|
||||
// loading init
|
||||
err := l.node.historical.replica.addCollection(collectionID, schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasCollectionInStreaming := l.node.streaming.replica.hasCollection(collectionID)
|
||||
if !hasCollectionInStreaming {
|
||||
err = l.node.streaming.replica.addCollection(collectionID, schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.node.streaming.replica.initExcludedSegments(collectionID)
|
||||
switch l.req.LoadCondition {
|
||||
case queryPb.TriggerCondition_handoff:
|
||||
err = l.node.historical.loader.loadSegmentOfConditionHandOff(l.req)
|
||||
case queryPb.TriggerCondition_loadBalance:
|
||||
err = l.node.historical.loader.loadSegmentOfConditionLoadBalance(l.req)
|
||||
case queryPb.TriggerCondition_grpcRequest:
|
||||
err = l.node.historical.loader.loadSegmentOfConditionGRPC(l.req)
|
||||
case queryPb.TriggerCondition_nodeDown:
|
||||
err = l.node.historical.loader.loadSegmentOfConditionNodeDown(l.req)
|
||||
}
|
||||
if !hasPartitionInHistorical {
|
||||
err := l.node.historical.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasPartitionInStreaming := l.node.streaming.replica.hasPartition(partitionID)
|
||||
if !hasPartitionInStreaming {
|
||||
err = l.node.streaming.replica.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
err := l.node.streaming.replica.enablePartition(partitionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if len(segmentIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = l.node.historical.loadService.loadSegmentPassively(collectionID, partitionID, segmentIDs, fieldIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("LoadSegments done", zap.String("segmentIDs", fmt.Sprintln(l.req.SegmentIDs)))
|
||||
log.Debug("LoadSegments done", zap.String("SegmentLoadInfos", fmt.Sprintln(l.req.Infos)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -328,43 +378,48 @@ func (r *releaseCollectionTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (r *releaseCollectionTask) Execute(ctx context.Context) error {
|
||||
log.Debug("receive release collection task", zap.Any("collectionID", r.req.CollectionID))
|
||||
r.node.streaming.dataSyncService.removeCollectionFlowGraph(r.req.CollectionID)
|
||||
collection, err := r.node.historical.replica.getCollectionByID(r.req.CollectionID)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
collection.setReleaseTime(r.req.Base.Timestamp)
|
||||
|
||||
const gracefulReleaseTime = 3
|
||||
go func() {
|
||||
errMsg := "release collection failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
|
||||
time.Sleep(gracefulReleaseTime * time.Second)
|
||||
|
||||
r.node.streaming.dataSyncService.removeCollectionFlowGraph(r.req.CollectionID)
|
||||
// remove all tSafes of the target collection
|
||||
for _, channel := range collection.getWatchedDmChannels() {
|
||||
r.node.streaming.tSafeReplica.removeTSafe(channel)
|
||||
}
|
||||
}
|
||||
|
||||
r.node.streaming.replica.removeExcludedSegments(r.req.CollectionID)
|
||||
|
||||
if r.node.searchService.hasSearchCollection(r.req.CollectionID) {
|
||||
r.node.streaming.replica.removeExcludedSegments(r.req.CollectionID)
|
||||
r.node.searchService.stopSearchCollection(r.req.CollectionID)
|
||||
}
|
||||
|
||||
hasCollectionInHistorical := r.node.historical.replica.hasCollection(r.req.CollectionID)
|
||||
if hasCollectionInHistorical {
|
||||
err := r.node.historical.replica.removeCollection(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
hasCollectionInHistorical := r.node.historical.replica.hasCollection(r.req.CollectionID)
|
||||
if hasCollectionInHistorical {
|
||||
err := r.node.historical.replica.removeCollection(r.req.CollectionID)
|
||||
if err != nil {
|
||||
log.Error(errMsg + err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hasCollectionInStreaming := r.node.streaming.replica.hasCollection(r.req.CollectionID)
|
||||
if hasCollectionInStreaming {
|
||||
err := r.node.streaming.replica.removeCollection(r.req.CollectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
hasCollectionInStreaming := r.node.streaming.replica.hasCollection(r.req.CollectionID)
|
||||
if hasCollectionInStreaming {
|
||||
err := r.node.streaming.replica.removeCollection(r.req.CollectionID)
|
||||
if err != nil {
|
||||
log.Error(errMsg + err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: for debugging, remove this
|
||||
time.Sleep(2 * time.Second)
|
||||
log.Debug("ReleaseCollection done", zap.Int64("collectionID", r.req.CollectionID))
|
||||
}()
|
||||
|
||||
log.Debug("ReleaseCollection done", zap.Int64("collectionID", r.req.CollectionID))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -395,7 +450,12 @@ func (r *releasePartitionsTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (r *releasePartitionsTask) Execute(ctx context.Context) error {
|
||||
log.Debug("receive release partition task",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("partitionIDs", r.req.PartitionIDs))
|
||||
|
||||
for _, id := range r.req.PartitionIDs {
|
||||
r.node.streaming.dataSyncService.removePartitionFlowGraph(id)
|
||||
hasPartitionInHistorical := r.node.historical.replica.hasPartition(id)
|
||||
if hasPartitionInHistorical {
|
||||
err := r.node.historical.replica.removePartition(id)
|
||||
|
@ -413,6 +473,10 @@ func (r *releasePartitionsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("release partition task done",
|
||||
zap.Any("collectionID", r.req.CollectionID),
|
||||
zap.Any("partitionIDs", r.req.PartitionIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,13 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
)
|
||||
|
||||
type tSafeWatcher struct {
|
||||
|
@ -28,6 +34,7 @@ func newTSafeWatcher() *tSafeWatcher {
|
|||
func (watcher *tSafeWatcher) notify() {
|
||||
if len(watcher.notifyChan) == 0 {
|
||||
watcher.notifyChan <- true
|
||||
//log.Debug("tSafe watcher notify done")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,24 +49,75 @@ func (watcher *tSafeWatcher) watcherChan() <-chan bool {
|
|||
|
||||
type tSafer interface {
|
||||
get() Timestamp
|
||||
set(t Timestamp)
|
||||
set(id UniqueID, t Timestamp)
|
||||
registerTSafeWatcher(t *tSafeWatcher)
|
||||
start()
|
||||
close()
|
||||
}
|
||||
|
||||
type tSafeMsg struct {
|
||||
t Timestamp
|
||||
id UniqueID // collectionID or partitionID
|
||||
}
|
||||
|
||||
type tSafe struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
channel VChannel
|
||||
tSafeMu sync.Mutex // guards all fields
|
||||
tSafe Timestamp
|
||||
watcherList []*tSafeWatcher
|
||||
tSafeChan chan tSafeMsg
|
||||
tSafeRecord map[UniqueID]Timestamp
|
||||
}
|
||||
|
||||
func newTSafe() tSafer {
|
||||
func newTSafe(ctx context.Context, channel VChannel) tSafer {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
const channelSize = 4096
|
||||
|
||||
var t tSafer = &tSafe{
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
channel: channel,
|
||||
watcherList: make([]*tSafeWatcher, 0),
|
||||
tSafeChan: make(chan tSafeMsg, channelSize),
|
||||
tSafeRecord: make(map[UniqueID]Timestamp),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (ts *tSafe) start() {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ts.ctx.Done():
|
||||
log.Debug("tSafe context done")
|
||||
return
|
||||
case m := <-ts.tSafeChan:
|
||||
ts.tSafeMu.Lock()
|
||||
ts.tSafeRecord[m.id] = m.t
|
||||
var tmpT Timestamp = math.MaxUint64
|
||||
for _, t := range ts.tSafeRecord {
|
||||
if t <= tmpT {
|
||||
tmpT = t
|
||||
}
|
||||
}
|
||||
ts.tSafe = tmpT
|
||||
for _, watcher := range ts.watcherList {
|
||||
watcher.notify()
|
||||
}
|
||||
|
||||
log.Debug("set tSafe done",
|
||||
zap.Any("id", m.id),
|
||||
zap.Any("channel", ts.channel),
|
||||
zap.Any("t", m.t),
|
||||
zap.Any("tSafe", ts.tSafe))
|
||||
ts.tSafeMu.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ts *tSafe) registerTSafeWatcher(t *tSafeWatcher) {
|
||||
ts.tSafeMu.Lock()
|
||||
defer ts.tSafeMu.Unlock()
|
||||
|
@ -72,20 +130,22 @@ func (ts *tSafe) get() Timestamp {
|
|||
return ts.tSafe
|
||||
}
|
||||
|
||||
func (ts *tSafe) set(t Timestamp) {
|
||||
func (ts *tSafe) set(id UniqueID, t Timestamp) {
|
||||
ts.tSafeMu.Lock()
|
||||
defer ts.tSafeMu.Unlock()
|
||||
|
||||
ts.tSafe = t
|
||||
for _, watcher := range ts.watcherList {
|
||||
watcher.notify()
|
||||
msg := tSafeMsg{
|
||||
t: t,
|
||||
id: id,
|
||||
}
|
||||
ts.tSafeChan <- msg
|
||||
}
|
||||
|
||||
func (ts *tSafe) close() {
|
||||
ts.tSafeMu.Lock()
|
||||
defer ts.tSafeMu.Unlock()
|
||||
|
||||
ts.cancel()
|
||||
for _, watcher := range ts.watcherList {
|
||||
close(watcher.notifyChan)
|
||||
}
|
||||
|
|
|
@ -12,14 +12,19 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
)
|
||||
|
||||
// TSafeReplicaInterface is the interface wrapper of tSafeReplica
|
||||
type TSafeReplicaInterface interface {
|
||||
getTSafe(vChannel VChannel) Timestamp
|
||||
setTSafe(vChannel VChannel, timestamp Timestamp)
|
||||
setTSafe(vChannel VChannel, id UniqueID, timestamp Timestamp)
|
||||
addTSafe(vChannel VChannel)
|
||||
removeTSafe(vChannel VChannel)
|
||||
registerTSafeWatcher(vChannel VChannel, watcher *tSafeWatcher)
|
||||
|
@ -35,21 +40,21 @@ func (t *tSafeReplica) getTSafe(vChannel VChannel) Timestamp {
|
|||
defer t.mu.Unlock()
|
||||
safer, err := t.getTSaferPrivate(vChannel)
|
||||
if err != nil {
|
||||
//log.Error("get tSafe failed", zap.Error(err))
|
||||
log.Error("get tSafe failed", zap.Error(err))
|
||||
return 0
|
||||
}
|
||||
return safer.get()
|
||||
}
|
||||
|
||||
func (t *tSafeReplica) setTSafe(vChannel VChannel, timestamp Timestamp) {
|
||||
func (t *tSafeReplica) setTSafe(vChannel VChannel, id UniqueID, timestamp Timestamp) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
safer, err := t.getTSaferPrivate(vChannel)
|
||||
if err != nil {
|
||||
//log.Error("set tSafe failed", zap.Error(err))
|
||||
log.Error("set tSafe failed", zap.Error(err))
|
||||
return
|
||||
}
|
||||
safer.set(timestamp)
|
||||
safer.set(id, timestamp)
|
||||
}
|
||||
|
||||
func (t *tSafeReplica) getTSaferPrivate(vChannel VChannel) (tSafer, error) {
|
||||
|
@ -64,8 +69,14 @@ func (t *tSafeReplica) getTSaferPrivate(vChannel VChannel) (tSafer, error) {
|
|||
func (t *tSafeReplica) addTSafe(vChannel VChannel) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.tSafes[vChannel] = newTSafe()
|
||||
//log.Debug("add tSafe done", zap.Any("channel", vChannel))
|
||||
ctx := context.Background()
|
||||
if _, ok := t.tSafes[vChannel]; !ok {
|
||||
t.tSafes[vChannel] = newTSafe(ctx, vChannel)
|
||||
t.tSafes[vChannel].start()
|
||||
log.Debug("add tSafe done", zap.Any("channel", vChannel))
|
||||
} else {
|
||||
log.Error("tSafe has been existed", zap.Any("channel", vChannel))
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tSafeReplica) removeTSafe(vChannel VChannel) {
|
||||
|
@ -84,7 +95,7 @@ func (t *tSafeReplica) registerTSafeWatcher(vChannel VChannel, watcher *tSafeWat
|
|||
defer t.mu.Unlock()
|
||||
safer, err := t.getTSaferPrivate(vChannel)
|
||||
if err != nil {
|
||||
//log.Error("register tSafe watcher failed", zap.Error(err))
|
||||
log.Error("register tSafe watcher failed", zap.Error(err))
|
||||
return
|
||||
}
|
||||
safer.registerTSafeWatcher(watcher)
|
||||
|
|
|
@ -12,13 +12,14 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTSafe_GetAndSet(t *testing.T) {
|
||||
tSafe := newTSafe()
|
||||
tSafe := newTSafe(context.Background(), "TestTSafe-channel")
|
||||
watcher := newTSafeWatcher()
|
||||
tSafe.registerTSafeWatcher(watcher)
|
||||
|
||||
|
@ -28,5 +29,5 @@ func TestTSafe_GetAndSet(t *testing.T) {
|
|||
assert.Equal(t, timestamp, Timestamp(1000))
|
||||
}()
|
||||
|
||||
tSafe.set(Timestamp(1000))
|
||||
tSafe.set(UniqueID(1), Timestamp(1000))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,299 @@
|
|||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package queryservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
)
|
||||
|
||||
type queryNodeCluster struct {
|
||||
sync.RWMutex
|
||||
clusterMeta *meta
|
||||
nodes map[int64]*queryNode
|
||||
}
|
||||
|
||||
func newQueryNodeCluster(clusterMeta *meta) *queryNodeCluster {
|
||||
nodes := make(map[int64]*queryNode)
|
||||
return &queryNodeCluster{
|
||||
clusterMeta: clusterMeta,
|
||||
nodes: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) GetComponentInfos(ctx context.Context) []*internalpb.ComponentInfo {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
subComponentInfos := make([]*internalpb.ComponentInfo, 0)
|
||||
for nodeID, node := range c.nodes {
|
||||
componentStates, err := node.client.GetComponentStates(ctx)
|
||||
if err != nil {
|
||||
subComponentInfos = append(subComponentInfos, &internalpb.ComponentInfo{
|
||||
NodeID: nodeID,
|
||||
StateCode: internalpb.StateCode_Abnormal,
|
||||
})
|
||||
continue
|
||||
}
|
||||
subComponentInfos = append(subComponentInfos, componentStates.State)
|
||||
}
|
||||
|
||||
return subComponentInfos
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) LoadSegments(ctx context.Context, nodeID int64, in *querypb.LoadSegmentsRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
//TODO::etcd
|
||||
log.Debug("load segment infos", zap.Any("infos", in))
|
||||
for _, info := range in.Infos {
|
||||
segmentID := info.SegmentID
|
||||
if info, ok := c.clusterMeta.segmentInfos[segmentID]; ok {
|
||||
info.SegmentState = querypb.SegmentState_sealing
|
||||
}
|
||||
segmentInfo := &querypb.SegmentInfo{
|
||||
SegmentID: segmentID,
|
||||
CollectionID: info.CollectionID,
|
||||
PartitionID: info.PartitionID,
|
||||
NodeID: nodeID,
|
||||
SegmentState: querypb.SegmentState_sealing,
|
||||
}
|
||||
c.clusterMeta.segmentInfos[segmentID] = segmentInfo
|
||||
}
|
||||
status, err := node.client.LoadSegments(ctx, in)
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
for _, info := range in.Infos {
|
||||
if !c.clusterMeta.hasCollection(info.CollectionID) {
|
||||
c.clusterMeta.addCollection(info.CollectionID, in.Schema)
|
||||
}
|
||||
|
||||
c.clusterMeta.addPartition(info.CollectionID, info.PartitionID)
|
||||
|
||||
if !node.hasCollection(info.CollectionID) {
|
||||
node.addCollection(info.CollectionID, in.Schema)
|
||||
}
|
||||
node.addPartition(info.CollectionID, info.PartitionID)
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
for _, info := range in.Infos {
|
||||
segmentID := info.SegmentID
|
||||
c.clusterMeta.deleteSegmentInfoByID(segmentID)
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
return nil, errors.New("Can't find query node by nodeID ")
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) ReleaseSegments(ctx context.Context, nodeID int64, in *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
status, err := node.client.ReleaseSegments(ctx, in)
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
for _, segmentID := range in.SegmentIDs {
|
||||
c.clusterMeta.deleteSegmentInfoByID(segmentID)
|
||||
}
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
return nil, errors.New("Can't find query node by nodeID ")
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) WatchDmChannels(ctx context.Context, nodeID int64, in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
channels := make([]string, 0)
|
||||
for _, info := range in.Infos {
|
||||
channels = append(channels, info.ChannelName)
|
||||
}
|
||||
log.Debug("wait queryNode watch dm channel")
|
||||
status, err := node.client.WatchDmChannels(ctx, in)
|
||||
log.Debug("queryNode watch dm channel done")
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
collectionID := in.CollectionID
|
||||
if !c.clusterMeta.hasCollection(collectionID) {
|
||||
c.clusterMeta.addCollection(collectionID, in.Schema)
|
||||
}
|
||||
c.clusterMeta.addDmChannel(collectionID, nodeID, channels)
|
||||
if !node.hasCollection(collectionID) {
|
||||
node.addCollection(collectionID, in.Schema)
|
||||
}
|
||||
node.addDmChannel(collectionID, channels)
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
return nil, errors.New("Can't find query node by nodeID ")
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) hasWatchedQueryChannel(ctx context.Context, nodeID int64, collectionID UniqueID) bool {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
//TODO::should reopen
|
||||
//collectionID = 0
|
||||
return c.nodes[nodeID].hasWatchedQueryChannel(collectionID)
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) AddQueryChannel(ctx context.Context, nodeID int64, in *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
status, err := node.client.AddQueryChannel(ctx, in)
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
//TODO::should reopen
|
||||
collectionID := in.CollectionID
|
||||
//collectionID := int64(0)
|
||||
if queryChannelInfo, ok := c.clusterMeta.queryChannelInfos[0]; ok {
|
||||
node.addQueryChannel(collectionID, queryChannelInfo)
|
||||
return status, err
|
||||
}
|
||||
log.Error("queryChannel for collection not assigned", zap.Int64("collectionID", collectionID))
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
return nil, errors.New("can't find query node by nodeID")
|
||||
}
|
||||
func (c *queryNodeCluster) removeQueryChannel(ctx context.Context, nodeID int64, in *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
status, err := node.client.RemoveQueryChannel(ctx, in)
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
//TODO::should reopen
|
||||
//collectionID := in.CollectionID
|
||||
collectionID := int64(0)
|
||||
if _, ok = node.watchedQueryChannels[collectionID]; ok {
|
||||
node.removeQueryChannel(collectionID)
|
||||
return status, err
|
||||
}
|
||||
log.Error("queryChannel for collection not watched", zap.Int64("collectionID", collectionID))
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
return nil, errors.New("can't find query node by nodeID")
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) releaseCollection(ctx context.Context, nodeID int64, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
status, err := node.client.ReleaseCollection(ctx, in)
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
node.releaseCollection(in.CollectionID)
|
||||
c.clusterMeta.releaseCollection(in.CollectionID)
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
return nil, errors.New("can't find query node by nodeID")
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) releasePartitions(ctx context.Context, nodeID int64, in *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if node, ok := c.nodes[nodeID]; ok {
|
||||
status, err := node.client.ReleasePartitions(ctx, in)
|
||||
if err == nil && status.ErrorCode == commonpb.ErrorCode_Success {
|
||||
for _, partitionID := range in.PartitionIDs {
|
||||
node.releasePartition(in.CollectionID, partitionID)
|
||||
c.clusterMeta.releasePartition(in.CollectionID, partitionID)
|
||||
}
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
return nil, errors.New("can't find query node by nodeID")
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getSegmentInfo(ctx context.Context, in *querypb.GetSegmentInfoRequest) ([]*querypb.SegmentInfo, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
segmentInfos := make([]*querypb.SegmentInfo, 0)
|
||||
for _, node := range c.nodes {
|
||||
res, err := node.client.GetSegmentInfo(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segmentInfos = append(segmentInfos, res.Infos...)
|
||||
}
|
||||
|
||||
return segmentInfos, nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getNumDmChannels(nodeID int64) (int, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if _, ok := c.nodes[nodeID]; !ok {
|
||||
return 0, errors.New("Can't find query node by nodeID ")
|
||||
}
|
||||
|
||||
numChannel := 0
|
||||
for _, info := range c.clusterMeta.collectionInfos {
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
numChannel++
|
||||
}
|
||||
}
|
||||
}
|
||||
return numChannel, nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) getNumSegments(nodeID int64) (int, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if _, ok := c.nodes[nodeID]; !ok {
|
||||
return 0, errors.New("Can't find query node by nodeID ")
|
||||
}
|
||||
|
||||
numSegment := 0
|
||||
for _, info := range c.clusterMeta.segmentInfos {
|
||||
if info.NodeID == nodeID {
|
||||
numSegment++
|
||||
}
|
||||
}
|
||||
return numSegment, nil
|
||||
}
|
||||
|
||||
func (c *queryNodeCluster) RegisterNode(ip string, port int64, id UniqueID) error {
|
||||
node, err := newQueryNode(ip, port, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if _, ok := c.nodes[id]; !ok {
|
||||
c.nodes[id] = node
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("node %d alredy exists in cluster", id)
|
||||
}
|
|
@ -16,17 +16,14 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
nodeclient "github.com/milvus-io/milvus/internal/distributed/querynode/client"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/retry"
|
||||
)
|
||||
|
||||
func (qs *QueryService) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
|
||||
|
@ -34,18 +31,7 @@ func (qs *QueryService) GetComponentStates(ctx context.Context) (*internalpb.Com
|
|||
NodeID: Params.QueryServiceID,
|
||||
StateCode: qs.stateCode.Load().(internalpb.StateCode),
|
||||
}
|
||||
subComponentInfos := make([]*internalpb.ComponentInfo, 0)
|
||||
for nodeID, node := range qs.queryNodes {
|
||||
componentStates, err := node.GetComponentStates(ctx)
|
||||
if err != nil {
|
||||
subComponentInfos = append(subComponentInfos, &internalpb.ComponentInfo{
|
||||
NodeID: nodeID,
|
||||
StateCode: internalpb.StateCode_Abnormal,
|
||||
})
|
||||
continue
|
||||
}
|
||||
subComponentInfos = append(subComponentInfos, componentStates.State)
|
||||
}
|
||||
subComponentInfos := qs.cluster.GetComponentInfos(ctx)
|
||||
return &internalpb.ComponentStates{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
|
@ -76,11 +62,10 @@ func (qs *QueryService) GetStatisticsChannel(ctx context.Context) (*milvuspb.Str
|
|||
}
|
||||
|
||||
func (qs *QueryService) RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
// TODO:: add mutex
|
||||
nodeID := req.Base.SourceID
|
||||
log.Debug("register query node", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()))
|
||||
|
||||
if _, ok := qs.queryNodes[nodeID]; ok {
|
||||
if _, ok := qs.cluster.nodes[nodeID]; ok {
|
||||
err := errors.New("nodeID already exists")
|
||||
log.Debug("register query node Failed nodeID already exist", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()))
|
||||
|
||||
|
@ -92,60 +77,24 @@ func (qs *QueryService) RegisterNode(ctx context.Context, req *querypb.RegisterN
|
|||
}, err
|
||||
}
|
||||
|
||||
registerNodeAddress := req.Address.Ip + ":" + strconv.FormatInt(req.Address.Port, 10)
|
||||
client, err := nodeclient.NewClient(registerNodeAddress)
|
||||
err := qs.cluster.RegisterNode(req.Address.Ip, req.Address.Port, req.Base.SourceID)
|
||||
if err != nil {
|
||||
log.Debug("register query node new NodeClient failed", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()))
|
||||
|
||||
return &querypb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
},
|
||||
InitParams: new(internalpb.InitParams),
|
||||
}, err
|
||||
}
|
||||
if err := client.Init(); err != nil {
|
||||
log.Debug("register query node client init failed", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()))
|
||||
|
||||
return &querypb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
InitParams: new(internalpb.InitParams),
|
||||
}, err
|
||||
}
|
||||
if err := client.Start(); err != nil {
|
||||
log.Debug("register query node client start failed", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()))
|
||||
return nil, err
|
||||
}
|
||||
qs.queryNodes[nodeID] = newQueryNodeInfo(client)
|
||||
|
||||
//TODO::return init params to queryNode
|
||||
startParams := []*commonpb.KeyValuePair{
|
||||
{Key: "StatsChannelName", Value: Params.StatsChannelName},
|
||||
{Key: "TimeTickChannelName", Value: Params.TimeTickChannelName},
|
||||
}
|
||||
qs.qcMutex.Lock()
|
||||
for _, queryChannel := range qs.queryChannels {
|
||||
startParams = append(startParams, &commonpb.KeyValuePair{
|
||||
Key: "SearchChannelName",
|
||||
Value: queryChannel.requestChannel,
|
||||
})
|
||||
startParams = append(startParams, &commonpb.KeyValuePair{
|
||||
Key: "SearchResultChannelName",
|
||||
Value: queryChannel.responseChannel,
|
||||
})
|
||||
}
|
||||
qs.qcMutex.Unlock()
|
||||
log.Debug("register query node success", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()), zap.Any("StartParams", startParams))
|
||||
|
||||
log.Debug("register query node success", zap.Any("QueryNodeID", nodeID), zap.String("address", req.Address.String()))
|
||||
return &querypb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
InitParams: &internalpb.InitParams{
|
||||
NodeID: nodeID,
|
||||
StartParams: startParams,
|
||||
NodeID: nodeID,
|
||||
//StartParams: startParams,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -153,19 +102,7 @@ func (qs *QueryService) RegisterNode(ctx context.Context, req *querypb.RegisterN
|
|||
func (qs *QueryService) ShowCollections(ctx context.Context, req *querypb.ShowCollectionsRequest) (*querypb.ShowCollectionsResponse, error) {
|
||||
dbID := req.DbID
|
||||
log.Debug("show collection start, dbID = ", zap.String("dbID", strconv.FormatInt(dbID, 10)))
|
||||
collections, err := qs.replica.getCollections(dbID)
|
||||
collectionIDs := make([]UniqueID, 0)
|
||||
for _, collection := range collections {
|
||||
collectionIDs = append(collectionIDs, collection.id)
|
||||
}
|
||||
if err != nil {
|
||||
return &querypb.ShowCollectionsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
collectionIDs := qs.meta.showCollections()
|
||||
log.Debug("show collection end")
|
||||
return &querypb.ShowCollectionsResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -176,44 +113,39 @@ func (qs *QueryService) ShowCollections(ctx context.Context, req *querypb.ShowCo
|
|||
}
|
||||
|
||||
func (qs *QueryService) LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
schema := req.Schema
|
||||
watchNeeded := false
|
||||
log.Debug("LoadCollectionRequest received", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Int64("collectionID", collectionID),
|
||||
zap.Stringer("schema", req.Schema))
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
}
|
||||
|
||||
_, err := qs.replica.getCollectionByID(dbID, collectionID)
|
||||
if err != nil {
|
||||
watchNeeded = true
|
||||
err = qs.replica.addCollection(dbID, collectionID, schema)
|
||||
hasCollection := qs.meta.hasCollection(collectionID)
|
||||
if !hasCollection {
|
||||
err := qs.meta.addCollection(collectionID, schema)
|
||||
if err != nil {
|
||||
status.Reason = err.Error()
|
||||
log.Error(err.Error())
|
||||
return status, err
|
||||
}
|
||||
}
|
||||
|
||||
loadCollectionTask := &LoadCollectionTask{
|
||||
BaseTask: BaseTask{
|
||||
ctx: qs.loopCtx,
|
||||
Condition: NewTaskCondition(qs.loopCtx),
|
||||
|
||||
triggerCondition: querypb.TriggerCondition_grpcRequest,
|
||||
},
|
||||
LoadCollectionRequest: req,
|
||||
masterService: qs.masterServiceClient,
|
||||
dataService: qs.dataServiceClient,
|
||||
queryNodes: qs.queryNodes,
|
||||
meta: qs.replica,
|
||||
watchNeeded: watchNeeded,
|
||||
}
|
||||
err = qs.sched.DdQueue.Enqueue(loadCollectionTask)
|
||||
if err != nil {
|
||||
status.Reason = err.Error()
|
||||
return status, err
|
||||
cluster: qs.cluster,
|
||||
meta: qs.meta,
|
||||
}
|
||||
qs.scheduler.Enqueue([]task{loadCollectionTask})
|
||||
|
||||
err = loadCollectionTask.WaitToFinish()
|
||||
err := loadCollectionTask.WaitToFinish()
|
||||
if err != nil {
|
||||
status.Reason = err.Error()
|
||||
return status, err
|
||||
|
@ -225,34 +157,31 @@ func (qs *QueryService) LoadCollection(ctx context.Context, req *querypb.LoadCol
|
|||
}
|
||||
|
||||
func (qs *QueryService) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
dbID := req.DbID
|
||||
//dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
log.Debug("ReleaseCollectionRequest received", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Int64("collectionID", collectionID))
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
}
|
||||
_, err := qs.replica.getCollectionByID(dbID, collectionID)
|
||||
if err != nil {
|
||||
hasCollection := qs.meta.hasCollection(collectionID)
|
||||
if !hasCollection {
|
||||
log.Error("release collection end, query service don't have the log of", zap.String("collectionID", fmt.Sprintln(collectionID)))
|
||||
return status, nil
|
||||
}
|
||||
|
||||
releaseCollectionTask := &ReleaseCollectionTask{
|
||||
BaseTask: BaseTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ctx: qs.loopCtx,
|
||||
Condition: NewTaskCondition(qs.loopCtx),
|
||||
|
||||
triggerCondition: querypb.TriggerCondition_grpcRequest,
|
||||
},
|
||||
ReleaseCollectionRequest: req,
|
||||
queryNodes: qs.queryNodes,
|
||||
meta: qs.replica,
|
||||
cluster: qs.cluster,
|
||||
}
|
||||
err = qs.sched.DdQueue.Enqueue(releaseCollectionTask)
|
||||
if err != nil {
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err.Error()
|
||||
return status, err
|
||||
}
|
||||
err = releaseCollectionTask.WaitToFinish()
|
||||
qs.scheduler.Enqueue([]task{releaseCollectionTask})
|
||||
|
||||
err := releaseCollectionTask.WaitToFinish()
|
||||
if err != nil {
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err.Error()
|
||||
|
@ -260,18 +189,12 @@ func (qs *QueryService) ReleaseCollection(ctx context.Context, req *querypb.Rele
|
|||
}
|
||||
|
||||
log.Debug("ReleaseCollectionRequest completed", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Int64("collectionID", collectionID))
|
||||
//TODO:: queryNode cancel subscribe dmChannels
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) ShowPartitions(ctx context.Context, req *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error) {
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
partitions, err := qs.replica.getPartitions(dbID, collectionID)
|
||||
partitionIDs := make([]UniqueID, 0)
|
||||
for _, partition := range partitions {
|
||||
partitionIDs = append(partitionIDs, partition.id)
|
||||
}
|
||||
partitionIDs, err := qs.meta.showPartitions(collectionID)
|
||||
if err != nil {
|
||||
return &querypb.ShowPartitionsResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -280,6 +203,7 @@ func (qs *QueryService) ShowPartitions(ctx context.Context, req *querypb.ShowPar
|
|||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
return &querypb.ShowPartitionsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
|
@ -293,7 +217,7 @@ func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPar
|
|||
collectionID := req.CollectionID
|
||||
partitionIDs := req.PartitionIDs
|
||||
log.Debug("LoadPartitionRequest received", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs))
|
||||
status, watchNeeded, err := LoadPartitionMetaCheck(qs.replica, req)
|
||||
status, err := LoadPartitionMetaCheck(qs.meta, req)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
@ -302,20 +226,16 @@ func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPar
|
|||
BaseTask: BaseTask{
|
||||
ctx: qs.loopCtx,
|
||||
Condition: NewTaskCondition(qs.loopCtx),
|
||||
|
||||
triggerCondition: querypb.TriggerCondition_grpcRequest,
|
||||
},
|
||||
LoadPartitionsRequest: req,
|
||||
masterService: qs.masterServiceClient,
|
||||
dataService: qs.dataServiceClient,
|
||||
queryNodes: qs.queryNodes,
|
||||
meta: qs.replica,
|
||||
watchNeeded: watchNeeded,
|
||||
}
|
||||
err = qs.sched.DdQueue.Enqueue(loadPartitionTask)
|
||||
if err != nil {
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err.Error()
|
||||
return status, err
|
||||
cluster: qs.cluster,
|
||||
meta: qs.meta,
|
||||
}
|
||||
qs.scheduler.Enqueue([]task{loadPartitionTask})
|
||||
|
||||
err = loadPartitionTask.WaitToFinish()
|
||||
if err != nil {
|
||||
|
@ -328,12 +248,11 @@ func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPar
|
|||
return status, nil
|
||||
}
|
||||
|
||||
func LoadPartitionMetaCheck(meta Replica, req *querypb.LoadPartitionsRequest) (*commonpb.Status, bool, error) {
|
||||
dbID := req.DbID
|
||||
func LoadPartitionMetaCheck(meta *meta, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) {
|
||||
//dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
partitionIDs := req.PartitionIDs
|
||||
schema := req.Schema
|
||||
watchNeeded := false
|
||||
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
|
@ -342,37 +261,38 @@ func LoadPartitionMetaCheck(meta Replica, req *querypb.LoadPartitionsRequest) (*
|
|||
if len(partitionIDs) == 0 {
|
||||
err := errors.New("partitionIDs are empty")
|
||||
status.Reason = err.Error()
|
||||
return status, watchNeeded, err
|
||||
return status, err
|
||||
}
|
||||
|
||||
_, err := meta.getCollectionByID(dbID, collectionID)
|
||||
if err != nil {
|
||||
err = meta.addCollection(dbID, collectionID, schema)
|
||||
hasCollection := meta.hasCollection(collectionID)
|
||||
if !hasCollection {
|
||||
err := meta.addCollection(collectionID, schema)
|
||||
if err != nil {
|
||||
status.Reason = err.Error()
|
||||
return status, watchNeeded, err
|
||||
return status, err
|
||||
}
|
||||
watchNeeded = true
|
||||
}
|
||||
|
||||
partitionIDsToLoad := make([]UniqueID, 0)
|
||||
for _, partitionID := range partitionIDs {
|
||||
_, err = meta.getPartitionByID(dbID, collectionID, partitionID)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
err = meta.addPartition(dbID, collectionID, partitionID)
|
||||
if err != nil {
|
||||
status.Reason = err.Error()
|
||||
return status, watchNeeded, err
|
||||
hasPartition := meta.hasPartition(collectionID, partitionID)
|
||||
if !hasPartition {
|
||||
err := meta.addPartition(collectionID, partitionID)
|
||||
if err != nil {
|
||||
status.Reason = err.Error()
|
||||
return status, err
|
||||
}
|
||||
partitionIDsToLoad = append(partitionIDsToLoad, partitionID)
|
||||
}
|
||||
}
|
||||
req.PartitionIDs = partitionIDsToLoad
|
||||
|
||||
status.ErrorCode = commonpb.ErrorCode_Success
|
||||
return status, watchNeeded, nil
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
|
||||
dbID := req.DbID
|
||||
//dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
partitionIDs := req.PartitionIDs
|
||||
log.Debug("ReleasePartitionRequest received", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Int64("collectionID", req.CollectionID), zap.Int64s("partitionIDs", partitionIDs))
|
||||
|
@ -381,8 +301,8 @@ func (qs *QueryService) ReleasePartitions(ctx context.Context, req *querypb.Rele
|
|||
}
|
||||
toReleasedPartitionID := make([]UniqueID, 0)
|
||||
for _, partitionID := range partitionIDs {
|
||||
_, err := qs.replica.getPartitionByID(dbID, collectionID, partitionID)
|
||||
if err == nil {
|
||||
hasPartition := qs.meta.hasPartition(collectionID, partitionID)
|
||||
if hasPartition {
|
||||
toReleasedPartitionID = append(toReleasedPartitionID, partitionID)
|
||||
}
|
||||
}
|
||||
|
@ -391,21 +311,17 @@ func (qs *QueryService) ReleasePartitions(ctx context.Context, req *querypb.Rele
|
|||
req.PartitionIDs = toReleasedPartitionID
|
||||
releasePartitionTask := &ReleasePartitionTask{
|
||||
BaseTask: BaseTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ctx: qs.loopCtx,
|
||||
Condition: NewTaskCondition(qs.loopCtx),
|
||||
|
||||
triggerCondition: querypb.TriggerCondition_grpcRequest,
|
||||
},
|
||||
ReleasePartitionsRequest: req,
|
||||
queryNodes: qs.queryNodes,
|
||||
meta: qs.replica,
|
||||
}
|
||||
err := qs.sched.DdQueue.Enqueue(releasePartitionTask)
|
||||
if err != nil {
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err.Error()
|
||||
return status, err
|
||||
cluster: qs.cluster,
|
||||
}
|
||||
qs.scheduler.Enqueue([]task{releasePartitionTask})
|
||||
|
||||
err = releasePartitionTask.WaitToFinish()
|
||||
err := releasePartitionTask.WaitToFinish()
|
||||
if err != nil {
|
||||
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
status.Reason = err.Error()
|
||||
|
@ -413,85 +329,71 @@ func (qs *QueryService) ReleasePartitions(ctx context.Context, req *querypb.Rele
|
|||
}
|
||||
}
|
||||
log.Debug("ReleasePartitionRequest completed", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs))
|
||||
//TODO:: queryNode cancel subscribe dmChannels
|
||||
//TODO:: queryNodeCluster cancel subscribe dmChannels
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error) {
|
||||
qs.qcMutex.Lock()
|
||||
allocatedQueryChannel := qs.queryChannels[0].requestChannel
|
||||
allocatedQueryResultChannel := qs.queryChannels[0].responseChannel
|
||||
addQueryChannelsRequest := &querypb.AddQueryChannelRequest{
|
||||
RequestChannelID: allocatedQueryChannel,
|
||||
ResultChannelID: allocatedQueryResultChannel,
|
||||
}
|
||||
log.Debug("query service create query channel", zap.String("queryChannelName", allocatedQueryChannel))
|
||||
for nodeID, node := range qs.queryNodes {
|
||||
log.Debug("node watch query channel", zap.String("nodeID", fmt.Sprintln(nodeID)))
|
||||
fn := func() error {
|
||||
_, err := node.AddQueryChannel(ctx, addQueryChannelsRequest)
|
||||
return err
|
||||
}
|
||||
err := retry.Retry(10, time.Millisecond*200, fn)
|
||||
if err != nil {
|
||||
qs.qcMutex.Unlock()
|
||||
return &querypb.CreateQueryChannelResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
}
|
||||
qs.qcMutex.Unlock()
|
||||
func (qs *QueryService) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
|
||||
collectionID := req.CollectionID
|
||||
queryChannel, queryResultChannel := qs.meta.GetQueryChannel(collectionID)
|
||||
|
||||
return &querypb.CreateQueryChannelResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
RequestChannel: allocatedQueryChannel,
|
||||
ResultChannel: allocatedQueryResultChannel,
|
||||
RequestChannel: queryChannel,
|
||||
ResultChannel: queryResultChannel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) {
|
||||
states, err := qs.replica.getPartitionStates(req.DbID, req.CollectionID, req.PartitionIDs)
|
||||
if err != nil {
|
||||
return &querypb.GetPartitionStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
PartitionDescriptions: states,
|
||||
}, err
|
||||
}
|
||||
return &querypb.GetPartitionStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
PartitionDescriptions: states,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
|
||||
segmentInfos := make([]*querypb.SegmentInfo, 0)
|
||||
totalMemSize := int64(0)
|
||||
for nodeID, node := range qs.queryNodes {
|
||||
segmentInfo, err := node.client.GetSegmentInfo(ctx, req)
|
||||
partitionIDs := req.PartitionIDs
|
||||
partitionStates := make([]*querypb.PartitionStates, 0)
|
||||
for _, partitionID := range partitionIDs {
|
||||
state, err := qs.meta.getPartitionStateByID(partitionID)
|
||||
if err != nil {
|
||||
return &querypb.GetSegmentInfoResponse{
|
||||
return &querypb.GetPartitionStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
segmentInfos = append(segmentInfos, segmentInfo.Infos...)
|
||||
for _, info := range segmentInfo.Infos {
|
||||
totalMemSize = totalMemSize + info.MemSize
|
||||
partitionState := &querypb.PartitionStates{
|
||||
PartitionID: partitionID,
|
||||
State: state,
|
||||
}
|
||||
log.Debug("getSegmentInfo", zap.Int64("nodeID", nodeID), zap.Int64("memory size", totalMemSize))
|
||||
partitionStates = append(partitionStates, partitionState)
|
||||
}
|
||||
|
||||
return &querypb.GetPartitionStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
PartitionDescriptions: partitionStates,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
|
||||
totalMemSize := int64(0)
|
||||
totalNumRows := int64(0)
|
||||
//TODO::get segment infos from meta
|
||||
//segmentIDs := req.SegmentIDs
|
||||
//segmentInfos, err := qs.meta.getSegmentInfos(segmentIDs)
|
||||
segmentInfos, err := qs.cluster.getSegmentInfo(ctx, req)
|
||||
if err != nil {
|
||||
return &querypb.GetSegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
for _, info := range segmentInfos {
|
||||
totalNumRows += info.NumRows
|
||||
totalMemSize += info.MemSize
|
||||
}
|
||||
log.Debug("getSegmentInfo", zap.Int64("num rows", totalNumRows), zap.Int64("memory size", totalMemSize))
|
||||
return &querypb.GetSegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
|
|
|
@ -0,0 +1,332 @@
|
|||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package queryservice
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
type meta struct {
|
||||
sync.RWMutex
|
||||
collectionInfos map[UniqueID]*querypb.CollectionInfo
|
||||
segmentInfos map[UniqueID]*querypb.SegmentInfo
|
||||
queryChannelInfos map[UniqueID]*querypb.QueryChannelInfo
|
||||
|
||||
partitionStates map[UniqueID]querypb.PartitionState
|
||||
}
|
||||
|
||||
func newMeta() *meta {
|
||||
collectionInfos := make(map[UniqueID]*querypb.CollectionInfo)
|
||||
segmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
|
||||
queryChannelInfos := make(map[UniqueID]*querypb.QueryChannelInfo)
|
||||
partitionStates := make(map[UniqueID]querypb.PartitionState)
|
||||
return &meta{
|
||||
collectionInfos: collectionInfos,
|
||||
segmentInfos: segmentInfos,
|
||||
queryChannelInfos: queryChannelInfos,
|
||||
partitionStates: partitionStates,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *meta) showCollections() []UniqueID {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
collections := make([]UniqueID, 0)
|
||||
for id := range m.collectionInfos {
|
||||
collections = append(collections, id)
|
||||
}
|
||||
return collections
|
||||
}
|
||||
|
||||
func (m *meta) showPartitions(collectionID UniqueID) ([]UniqueID, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
return info.PartitionIDs, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("showPartitions: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *meta) hasCollection(collectionID UniqueID) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if _, ok := m.collectionInfos[collectionID]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *meta) hasPartition(collectionID UniqueID, partitionID UniqueID) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
for _, id := range info.PartitionIDs {
|
||||
if partitionID == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *meta) addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if _, ok := m.collectionInfos[collectionID]; !ok {
|
||||
partitions := make([]UniqueID, 0)
|
||||
channels := make([]*querypb.DmChannelInfo, 0)
|
||||
newCollection := &querypb.CollectionInfo{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitions,
|
||||
ChannelInfos: channels,
|
||||
Schema: schema,
|
||||
}
|
||||
m.collectionInfos[collectionID] = newCollection
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("addCollection: collection already exists")
|
||||
}
|
||||
|
||||
func (m *meta) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if col, ok := m.collectionInfos[collectionID]; ok {
|
||||
log.Debug("add a partition to meta...", zap.Int64s("partitionIDs", col.PartitionIDs))
|
||||
for _, id := range col.PartitionIDs {
|
||||
if id == partitionID {
|
||||
return errors.New("addPartition: partition already exists in collectionInfos")
|
||||
}
|
||||
}
|
||||
col.PartitionIDs = append(col.PartitionIDs, partitionID)
|
||||
m.partitionStates[partitionID] = querypb.PartitionState_NotPresent
|
||||
log.Debug("add a partition to meta", zap.Int64s("partitionIDs", col.PartitionIDs))
|
||||
return nil
|
||||
}
|
||||
return errors.New("addPartition: can't find collection when add partition")
|
||||
}
|
||||
|
||||
func (m *meta) deleteSegmentInfoByID(segmentID UniqueID) {
|
||||
m.Lock()
|
||||
m.Unlock()
|
||||
delete(m.segmentInfos, segmentID)
|
||||
}
|
||||
|
||||
func (m *meta) setSegmentInfo(segmentID UniqueID, info *querypb.SegmentInfo) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.segmentInfos[segmentID] = info
|
||||
}
|
||||
|
||||
func (m *meta) getSegmentInfos(segmentIDs []UniqueID) ([]*querypb.SegmentInfo, error) {
|
||||
segmentInfos := make([]*querypb.SegmentInfo, 0)
|
||||
for _, segmentID := range segmentIDs {
|
||||
if info, ok := m.segmentInfos[segmentID]; ok {
|
||||
segmentInfos = append(segmentInfos, info)
|
||||
continue
|
||||
}
|
||||
return nil, errors.New("segment not exist")
|
||||
}
|
||||
return segmentInfos, nil
|
||||
}
|
||||
|
||||
func (m *meta) getSegmentInfoByID(segmentID UniqueID) (*querypb.SegmentInfo, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if info, ok := m.segmentInfos[segmentID]; ok {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("getSegmentInfoByID: can't find segmentID in segmentInfos")
|
||||
}
|
||||
|
||||
func (m *meta) updatePartitionState(partitionID UniqueID, state querypb.PartitionState) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if _, ok := m.partitionStates[partitionID]; ok {
|
||||
m.partitionStates[partitionID] = state
|
||||
}
|
||||
|
||||
return errors.New("updatePartitionState: can't find partition in partitionStates")
|
||||
}
|
||||
|
||||
func (m *meta) getPartitionStateByID(partitionID UniqueID) (querypb.PartitionState, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if state, ok := m.partitionStates[partitionID]; ok {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
return 0, errors.New("getPartitionStateByID: can't find partition in partitionStates")
|
||||
}
|
||||
|
||||
func (m *meta) releaseCollection(collectionID UniqueID) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
for _, partitionID := range info.PartitionIDs {
|
||||
delete(m.partitionStates, partitionID)
|
||||
}
|
||||
delete(m.collectionInfos, collectionID)
|
||||
}
|
||||
for id, info := range m.segmentInfos {
|
||||
if info.CollectionID == collectionID {
|
||||
delete(m.segmentInfos, id)
|
||||
}
|
||||
}
|
||||
delete(m.queryChannelInfos, collectionID)
|
||||
}
|
||||
|
||||
func (m *meta) releasePartition(collectionID UniqueID, partitionID UniqueID) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
newPartitionIDs := make([]UniqueID, 0)
|
||||
for _, id := range info.PartitionIDs {
|
||||
if id == partitionID {
|
||||
delete(m.partitionStates, partitionID)
|
||||
} else {
|
||||
newPartitionIDs = append(newPartitionIDs, id)
|
||||
}
|
||||
}
|
||||
info.PartitionIDs = newPartitionIDs
|
||||
}
|
||||
for id, info := range m.segmentInfos {
|
||||
if info.PartitionID == partitionID {
|
||||
delete(m.segmentInfos, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *meta) hasWatchedDmChannel(collectionID UniqueID, channelID string) (bool, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
channelInfos := info.ChannelInfos
|
||||
for _, channelInfo := range channelInfos {
|
||||
for _, channel := range channelInfo.ChannelIDs {
|
||||
if channel == channelID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, errors.New("hasWatchedDmChannel: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *meta) getDmChannelsByCollectionID(collectionID UniqueID) ([]string, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
channels := make([]string, 0)
|
||||
for _, channelsInfo := range info.ChannelInfos {
|
||||
channels = append(channels, channelsInfo.ChannelIDs...)
|
||||
}
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("getDmChannelsByCollectionID: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *meta) getDmChannelsByNodeID(collectionID UniqueID, nodeID int64) ([]string, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
channels := make([]string, 0)
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
channels = append(channels, channelInfo.ChannelIDs...)
|
||||
}
|
||||
}
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("getDmChannelsByNodeID: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *meta) addDmChannel(collectionID UniqueID, nodeID int64, channels []string) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
//before add channel, should ensure toAddedChannels not in meta
|
||||
if info, ok := m.collectionInfos[collectionID]; ok {
|
||||
findNodeID := false
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == nodeID {
|
||||
findNodeID = true
|
||||
channelInfo.ChannelIDs = append(channelInfo.ChannelIDs, channels...)
|
||||
}
|
||||
}
|
||||
if !findNodeID {
|
||||
newChannelInfo := &querypb.DmChannelInfo{
|
||||
NodeIDLoaded: nodeID,
|
||||
ChannelIDs: channels,
|
||||
}
|
||||
info.ChannelInfos = append(info.ChannelInfos, newChannelInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("addDmChannels: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (m *meta) GetQueryChannel(collectionID UniqueID) (string, string) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
//TODO::to remove
|
||||
collectionID = 0
|
||||
if info, ok := m.queryChannelInfos[collectionID]; ok {
|
||||
return info.QueryChannelID, info.QueryResultChannelID
|
||||
}
|
||||
|
||||
searchPrefix := Params.SearchChannelPrefix
|
||||
searchResultPrefix := Params.SearchResultChannelPrefix
|
||||
allocatedQueryChannel := searchPrefix + "-" + strconv.FormatInt(collectionID, 10)
|
||||
allocatedQueryResultChannel := searchResultPrefix + "-" + strconv.FormatInt(collectionID, 10)
|
||||
log.Debug("query service create query channel", zap.String("queryChannelName", allocatedQueryChannel), zap.String("queryResultChannelName", allocatedQueryResultChannel))
|
||||
|
||||
queryChannelInfo := &querypb.QueryChannelInfo{
|
||||
CollectionID: collectionID,
|
||||
QueryChannelID: allocatedQueryChannel,
|
||||
QueryResultChannelID: allocatedQueryResultChannel,
|
||||
}
|
||||
m.queryChannelInfos[collectionID] = queryChannelInfo
|
||||
//TODO::return channel according collectionID
|
||||
return allocatedQueryChannel, allocatedQueryResultChannel
|
||||
}
|
|
@ -1,277 +0,0 @@
|
|||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package queryservice
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
internalPb "github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
type Replica interface {
|
||||
getCollections(dbID UniqueID) ([]*collection, error)
|
||||
getPartitions(dbID UniqueID, collectionID UniqueID) ([]*partition, error)
|
||||
getSegments(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) ([]*segment, error)
|
||||
getCollectionByID(dbID UniqueID, collectionID UniqueID) (*collection, error)
|
||||
getPartitionByID(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) (*partition, error)
|
||||
addCollection(dbID UniqueID, collectionID UniqueID, schema *schemapb.CollectionSchema) error
|
||||
addPartition(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) error
|
||||
updatePartitionState(dbID UniqueID, collectionID UniqueID, partitionID UniqueID, state querypb.PartitionState) error
|
||||
getPartitionStates(dbID UniqueID, collectionID UniqueID, partitionIDs []UniqueID) ([]*querypb.PartitionStates, error)
|
||||
releaseCollection(dbID UniqueID, collectionID UniqueID) error
|
||||
releasePartition(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) error
|
||||
addDmChannel(dbID UniqueID, collectionID UniqueID, channel string, watchedStartPos *internalPb.MsgPosition) error
|
||||
addExcludeSegmentIDs(dbID UniqueID, collectionID UniqueID, excludeSegments []UniqueID) error
|
||||
//getAssignedNodeIDByChannelName(dbID UniqueID, collectionID UniqueID, channel string) (int64, error)
|
||||
}
|
||||
|
||||
type segment struct {
|
||||
id UniqueID
|
||||
}
|
||||
|
||||
type partition struct {
|
||||
id UniqueID
|
||||
segments map[UniqueID]*segment
|
||||
state querypb.PartitionState
|
||||
}
|
||||
|
||||
type collection struct {
|
||||
id UniqueID
|
||||
partitions map[UniqueID]*partition
|
||||
dmChannels []string
|
||||
dmChannels2Pos map[string]*internalPb.MsgPosition
|
||||
excludeSegmentIds []UniqueID
|
||||
schema *schemapb.CollectionSchema
|
||||
}
|
||||
|
||||
type metaReplica struct {
|
||||
dbID []UniqueID
|
||||
db2collections map[UniqueID][]*collection
|
||||
}
|
||||
|
||||
func newMetaReplica() Replica {
|
||||
db2collections := make(map[UniqueID][]*collection)
|
||||
db2collections[0] = make([]*collection, 0)
|
||||
dbIDs := make([]UniqueID, 0)
|
||||
dbIDs = append(dbIDs, UniqueID(0))
|
||||
return &metaReplica{
|
||||
dbID: dbIDs,
|
||||
db2collections: db2collections,
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *metaReplica) addCollection(dbID UniqueID, collectionID UniqueID, schema *schemapb.CollectionSchema) error {
|
||||
//TODO:: assert dbID = 0 exist
|
||||
if _, ok := mp.db2collections[dbID]; ok {
|
||||
partitions := make(map[UniqueID]*partition)
|
||||
channels := make([]string, 0)
|
||||
startPos := make(map[string]*internalPb.MsgPosition)
|
||||
excludeSegmentIDs := make([]UniqueID, 0)
|
||||
newCollection := &collection{
|
||||
id: collectionID,
|
||||
partitions: partitions,
|
||||
schema: schema,
|
||||
dmChannels: channels,
|
||||
dmChannels2Pos: startPos,
|
||||
excludeSegmentIds: excludeSegmentIDs,
|
||||
}
|
||||
mp.db2collections[dbID] = append(mp.db2collections[dbID], newCollection)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("addCollection: can't find dbID when add collection")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) addPartition(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) error {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collection.id == collectionID {
|
||||
partitions := collection.partitions
|
||||
segments := make(map[UniqueID]*segment)
|
||||
partition := &partition{
|
||||
id: partitionID,
|
||||
state: querypb.PartitionState_NotPresent,
|
||||
segments: segments,
|
||||
}
|
||||
partitions[partitionID] = partition
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("addPartition: can't find collection when add partition")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) getCollections(dbID UniqueID) ([]*collection, error) {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
return collections, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("getCollections: can't find collectionID")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) getPartitions(dbID UniqueID, collectionID UniqueID) ([]*partition, error) {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
partitions := make([]*partition, 0)
|
||||
for _, partition := range collection.partitions {
|
||||
partitions = append(partitions, partition)
|
||||
}
|
||||
return partitions, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("getPartitions: can't find partitionIDs")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) getSegments(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) ([]*segment, error) {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
if partition, ok := collection.partitions[partitionID]; ok {
|
||||
segments := make([]*segment, 0)
|
||||
for _, segment := range partition.segments {
|
||||
segments = append(segments, segment)
|
||||
}
|
||||
return segments, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, errors.New("getSegments: can't find segmentID")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) getCollectionByID(dbID UniqueID, collectionID UniqueID) (*collection, error) {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
return collection, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("getCollectionByID: can't find collectionID")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) getPartitionByID(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) (*partition, error) {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
partitions := collection.partitions
|
||||
if partition, ok := partitions[partitionID]; ok {
|
||||
return partition, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("getPartitionByID: can't find partitionID")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) updatePartitionState(dbID UniqueID,
|
||||
collectionID UniqueID,
|
||||
partitionID UniqueID,
|
||||
state querypb.PartitionState) error {
|
||||
for _, collection := range mp.db2collections[dbID] {
|
||||
if collection.id == collectionID {
|
||||
if partition, ok := collection.partitions[partitionID]; ok {
|
||||
partition.state = state
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("updatePartitionState: update partition state fail")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) getPartitionStates(dbID UniqueID,
|
||||
collectionID UniqueID,
|
||||
partitionIDs []UniqueID) ([]*querypb.PartitionStates, error) {
|
||||
partitionStates := make([]*querypb.PartitionStates, 0)
|
||||
for _, collection := range mp.db2collections[dbID] {
|
||||
if collection.id == collectionID {
|
||||
for _, partitionID := range partitionIDs {
|
||||
if partition, ok := collection.partitions[partitionID]; ok {
|
||||
partitionStates = append(partitionStates, &querypb.PartitionStates{
|
||||
PartitionID: partitionID,
|
||||
State: partition.state,
|
||||
})
|
||||
} else {
|
||||
partitionStates = append(partitionStates, &querypb.PartitionStates{
|
||||
PartitionID: partitionID,
|
||||
State: querypb.PartitionState_NotPresent,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return partitionStates, nil
|
||||
}
|
||||
|
||||
func (mp *metaReplica) releaseCollection(dbID UniqueID, collectionID UniqueID) error {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for i, coll := range collections {
|
||||
if collectionID == coll.id {
|
||||
newSize := len(collections) - 1
|
||||
newColls := make([]*collection, 0, newSize)
|
||||
collections[i] = collections[newSize]
|
||||
newColls = append(newColls, collections[:newSize]...)
|
||||
mp.db2collections[dbID] = newColls
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *metaReplica) releasePartition(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) error {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
if _, ok := collection.partitions[partitionID]; ok {
|
||||
delete(collection.partitions, partitionID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *metaReplica) addDmChannel(dbID UniqueID, collectionID UniqueID, channel string, watchedStartPos *internalPb.MsgPosition) error {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
collection.dmChannels = append(collection.dmChannels, channel)
|
||||
collection.dmChannels2Pos[channel] = watchedStartPos
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("addDmChannels: can't find dbID or collectionID")
|
||||
}
|
||||
|
||||
func (mp *metaReplica) addExcludeSegmentIDs(dbID UniqueID, collectionID UniqueID, excludeSegments []UniqueID) error {
|
||||
if collections, ok := mp.db2collections[dbID]; ok {
|
||||
for _, collection := range collections {
|
||||
if collectionID == collection.id {
|
||||
collection.excludeSegmentIds = append(collection.excludeSegmentIds, excludeSegments...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("addExcludeSegmentIDs: can't find dbID or collectionID")
|
||||
}
|
|
@ -19,33 +19,27 @@ import (
|
|||
)
|
||||
|
||||
func TestReplica_Release(t *testing.T) {
|
||||
replica := newMetaReplica()
|
||||
err := replica.addCollection(0, 1, nil)
|
||||
meta := newMeta()
|
||||
err := meta.addCollection(1, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
colls, err := replica.getCollections(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(colls))
|
||||
collections := meta.showCollections()
|
||||
assert.Equal(t, 1, len(collections))
|
||||
|
||||
err = replica.addPartition(0, 1, 100)
|
||||
err = meta.addPartition(1, 100)
|
||||
assert.NoError(t, err)
|
||||
partitions, err := replica.getPartitions(0, 1)
|
||||
partitions, err := meta.showPartitions(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(partitions))
|
||||
|
||||
err = replica.releasePartition(0, 1, 100)
|
||||
assert.NoError(t, err)
|
||||
partitions, err = replica.getPartitions(0, 1)
|
||||
meta.releasePartition(1, 100)
|
||||
partitions, err = meta.showPartitions(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(partitions))
|
||||
err = replica.releasePartition(0, 1, 100)
|
||||
assert.Error(t, err)
|
||||
meta.releasePartition(1, 100)
|
||||
|
||||
err = replica.releaseCollection(0, 1)
|
||||
assert.NoError(t, err)
|
||||
colls, err = replica.getCollections(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(colls))
|
||||
err = replica.releaseCollection(0, 1)
|
||||
assert.Error(t, err)
|
||||
meta.releaseCollection(1)
|
||||
collections = meta.showCollections()
|
||||
assert.Equal(t, 0, len(collections))
|
||||
meta.releaseCollection(1)
|
||||
}
|
|
@ -1,122 +1,239 @@
|
|||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
////
|
||||
//// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
//// with the License. You may obtain a copy of the License at
|
||||
////
|
||||
//// http://www.apache.org/licenses/LICENSE-2.0
|
||||
////
|
||||
//// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
//// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
//// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package queryservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
nodeclient "github.com/milvus-io/milvus/internal/distributed/querynode/client"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
)
|
||||
|
||||
type queryNodeInfo struct {
|
||||
type queryNode struct {
|
||||
id int64
|
||||
address struct {
|
||||
ip string
|
||||
port int64
|
||||
}
|
||||
client types.QueryNode
|
||||
|
||||
mu sync.Mutex // guards segments and channels2Col
|
||||
segments map[UniqueID][]UniqueID
|
||||
channels2Col map[UniqueID][]string
|
||||
//mu sync.Mutex // guards segments and channels2Col
|
||||
//nodeMeta *meta
|
||||
sync.RWMutex
|
||||
collectionInfos map[UniqueID]*querypb.CollectionInfo
|
||||
watchedQueryChannels map[UniqueID]*querypb.QueryChannelInfo
|
||||
//segments map[UniqueID][]UniqueID
|
||||
//channels2Col map[UniqueID][]string
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
|
||||
return qn.client.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) LoadSegments(ctx context.Context, in *querypb.LoadSegmentsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.LoadSegments(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) GetSegmentInfo(ctx context.Context, in *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
|
||||
return qn.client.GetSegmentInfo(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) WatchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.WatchDmChannels(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) AddDmChannels(channels []string, collectionID UniqueID) {
|
||||
qn.mu.Lock()
|
||||
defer qn.mu.Unlock()
|
||||
if _, ok := qn.channels2Col[collectionID]; !ok {
|
||||
chs := make([]string, 0)
|
||||
qn.channels2Col[collectionID] = chs
|
||||
}
|
||||
qn.channels2Col[collectionID] = append(qn.channels2Col[collectionID], channels...)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) getNumChannels() int {
|
||||
qn.mu.Lock()
|
||||
defer qn.mu.Unlock()
|
||||
numChannels := 0
|
||||
for _, chs := range qn.channels2Col {
|
||||
numChannels += len(chs)
|
||||
}
|
||||
return numChannels
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) AddSegments(segmentIDs []UniqueID, collectionID UniqueID) {
|
||||
qn.mu.Lock()
|
||||
defer qn.mu.Unlock()
|
||||
if _, ok := qn.segments[collectionID]; !ok {
|
||||
seg := make([]UniqueID, 0)
|
||||
qn.segments[collectionID] = seg
|
||||
}
|
||||
qn.segments[collectionID] = append(qn.segments[collectionID], segmentIDs...)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) getSegmentsLength() int {
|
||||
qn.mu.Lock()
|
||||
defer qn.mu.Unlock()
|
||||
return len(qn.segments)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) getNumSegments() int {
|
||||
qn.mu.Lock()
|
||||
defer qn.mu.Unlock()
|
||||
numSegments := 0
|
||||
for _, ids := range qn.segments {
|
||||
numSegments += len(ids)
|
||||
}
|
||||
return numSegments
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) AddQueryChannel(ctx context.Context, in *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
|
||||
return qn.client.AddQueryChannel(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) ReleaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
status, err := qn.client.ReleaseCollection(ctx, in)
|
||||
qn.mu.Lock()
|
||||
defer qn.mu.Unlock()
|
||||
func newQueryNode(ip string, port int64, id UniqueID) (*queryNode, error) {
|
||||
client, err := nodeclient.NewClient(fmt.Sprintf("%s:%d", ip, port))
|
||||
if err != nil {
|
||||
return status, err
|
||||
return nil, err
|
||||
}
|
||||
delete(qn.segments, in.CollectionID)
|
||||
delete(qn.channels2Col, in.CollectionID)
|
||||
return status, nil
|
||||
if err := client.Init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := client.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collectionInfo := make(map[UniqueID]*querypb.CollectionInfo)
|
||||
watchedChannels := make(map[UniqueID]*querypb.QueryChannelInfo)
|
||||
return &queryNode{
|
||||
id: id,
|
||||
address: struct {
|
||||
ip string
|
||||
port int64
|
||||
}{ip: ip, port: port},
|
||||
client: client,
|
||||
collectionInfos: collectionInfo,
|
||||
watchedQueryChannels: watchedChannels,
|
||||
//nodeMeta: newMetaReplica(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.ReleasePartitions(ctx, in)
|
||||
func (qn *queryNode) hasCollection(collectionID UniqueID) bool {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
||||
if _, ok := qn.collectionInfos[collectionID]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func newQueryNodeInfo(client types.QueryNode) *queryNodeInfo {
|
||||
segments := make(map[UniqueID][]UniqueID)
|
||||
channels := make(map[UniqueID][]string)
|
||||
return &queryNodeInfo{
|
||||
client: client,
|
||||
segments: segments,
|
||||
channels2Col: channels,
|
||||
func (qn *queryNode) hasPartition(collectionID UniqueID, partitionID UniqueID) bool {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
for _, id := range info.PartitionIDs {
|
||||
if partitionID == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (qn *queryNode) addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
if _, ok := qn.collectionInfos[collectionID]; !ok {
|
||||
partitions := make([]UniqueID, 0)
|
||||
channels := make([]*querypb.DmChannelInfo, 0)
|
||||
newCollection := &querypb.CollectionInfo{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitions,
|
||||
ChannelInfos: channels,
|
||||
Schema: schema,
|
||||
}
|
||||
qn.collectionInfos[collectionID] = newCollection
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("addCollection: collection already exists")
|
||||
}
|
||||
|
||||
func (qn *queryNode) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
if col, ok := qn.collectionInfos[collectionID]; ok {
|
||||
for _, id := range col.PartitionIDs {
|
||||
if id == partitionID {
|
||||
return errors.New("addPartition: partition already exists in collectionInfos")
|
||||
}
|
||||
}
|
||||
col.PartitionIDs = append(col.PartitionIDs, partitionID)
|
||||
return nil
|
||||
}
|
||||
return errors.New("addPartition: can't find collection when add partition")
|
||||
}
|
||||
|
||||
func (qn *queryNode) releaseCollection(collectionID UniqueID) {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
delete(qn.collectionInfos, collectionID)
|
||||
//TODO::should reopen
|
||||
//collectionID = 0
|
||||
delete(qn.watchedQueryChannels, collectionID)
|
||||
}
|
||||
|
||||
func (qn *queryNode) releasePartition(collectionID UniqueID, partitionID UniqueID) {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
newPartitionIDs := make([]UniqueID, 0)
|
||||
for _, id := range info.PartitionIDs {
|
||||
if id != partitionID {
|
||||
newPartitionIDs = append(newPartitionIDs, id)
|
||||
}
|
||||
}
|
||||
info.PartitionIDs = newPartitionIDs
|
||||
}
|
||||
}
|
||||
|
||||
func (qn *queryNode) hasWatchedDmChannel(collectionID UniqueID, channelID string) (bool, error) {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
channelInfos := info.ChannelInfos
|
||||
for _, channelInfo := range channelInfos {
|
||||
for _, channel := range channelInfo.ChannelIDs {
|
||||
if channel == channelID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, errors.New("hasWatchedDmChannel: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (qn *queryNode) getDmChannelsByCollectionID(collectionID UniqueID) ([]string, error) {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
channels := make([]string, 0)
|
||||
for _, channelsInfo := range info.ChannelInfos {
|
||||
channels = append(channels, channelsInfo.ChannelIDs...)
|
||||
}
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("getDmChannelsByCollectionID: can't find collection in collectionInfos")
|
||||
}
|
||||
|
||||
func (qn *queryNode) addDmChannel(collectionID UniqueID, channels []string) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
//before add channel, should ensure toAddedChannels not in meta
|
||||
if info, ok := qn.collectionInfos[collectionID]; ok {
|
||||
findNodeID := false
|
||||
for _, channelInfo := range info.ChannelInfos {
|
||||
if channelInfo.NodeIDLoaded == qn.id {
|
||||
findNodeID = true
|
||||
channelInfo.ChannelIDs = append(channelInfo.ChannelIDs, channels...)
|
||||
}
|
||||
}
|
||||
if !findNodeID {
|
||||
newChannelInfo := &querypb.DmChannelInfo{
|
||||
NodeIDLoaded: qn.id,
|
||||
ChannelIDs: channels,
|
||||
}
|
||||
info.ChannelInfos = append(info.ChannelInfos, newChannelInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("addDmChannels: can't find collection in watchedQueryChannel")
|
||||
}
|
||||
|
||||
//TODO::removeDmChannels
|
||||
|
||||
func (qn *queryNode) hasWatchedQueryChannel(collectionID UniqueID) bool {
|
||||
qn.RLock()
|
||||
defer qn.RUnlock()
|
||||
|
||||
if _, ok := qn.watchedQueryChannels[collectionID]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (qn *queryNode) addQueryChannel(collectionID UniqueID, queryChannel *querypb.QueryChannelInfo) {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
qn.watchedQueryChannels[collectionID] = queryChannel
|
||||
}
|
||||
|
||||
func (qn *queryNode) removeQueryChannel(collectionID UniqueID) error {
|
||||
qn.Lock()
|
||||
defer qn.Unlock()
|
||||
|
||||
delete(qn.watchedQueryChannels, collectionID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,12 +15,12 @@ import (
|
|||
"context"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
|
@ -39,16 +39,15 @@ type queryChannelInfo struct {
|
|||
type QueryService struct {
|
||||
loopCtx context.Context
|
||||
loopCancel context.CancelFunc
|
||||
kvBase *etcdkv.EtcdKV
|
||||
|
||||
queryServiceID uint64
|
||||
replica Replica
|
||||
sched *TaskScheduler
|
||||
meta *meta
|
||||
cluster *queryNodeCluster
|
||||
scheduler *TaskScheduler
|
||||
|
||||
dataServiceClient types.DataService
|
||||
masterServiceClient types.MasterService
|
||||
queryNodes map[int64]*queryNodeInfo
|
||||
queryChannels []*queryChannelInfo
|
||||
qcMutex *sync.Mutex
|
||||
|
||||
session *sessionutil.Session
|
||||
|
||||
|
@ -73,14 +72,14 @@ func (qs *QueryService) Init() error {
|
|||
}
|
||||
|
||||
func (qs *QueryService) Start() error {
|
||||
qs.sched.Start()
|
||||
qs.scheduler.Start()
|
||||
log.Debug("start scheduler ...")
|
||||
qs.UpdateStateCode(internalpb.StateCode_Healthy)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) Stop() error {
|
||||
qs.sched.Close()
|
||||
qs.scheduler.Close()
|
||||
log.Debug("close scheduler ...")
|
||||
qs.loopCancel()
|
||||
qs.UpdateStateCode(internalpb.StateCode_Abnormal)
|
||||
|
@ -93,7 +92,6 @@ func (qs *QueryService) UpdateStateCode(code internalpb.StateCode) {
|
|||
|
||||
func NewQueryService(ctx context.Context, factory msgstream.Factory) (*QueryService, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
nodes := make(map[int64]*queryNodeInfo)
|
||||
queryChannels := make([]*queryChannelInfo, 0)
|
||||
channelID := len(queryChannels)
|
||||
searchPrefix := Params.SearchChannelPrefix
|
||||
|
@ -107,18 +105,16 @@ func NewQueryService(ctx context.Context, factory msgstream.Factory) (*QueryServ
|
|||
})
|
||||
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
replica := newMetaReplica()
|
||||
scheduler := NewTaskScheduler(ctx1)
|
||||
meta := newMeta()
|
||||
service := &QueryService{
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
replica: replica,
|
||||
sched: scheduler,
|
||||
queryNodes: nodes,
|
||||
queryChannels: queryChannels,
|
||||
qcMutex: &sync.Mutex{},
|
||||
msFactory: factory,
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
meta: meta,
|
||||
msFactory: factory,
|
||||
}
|
||||
//TODO::set etcd kvbase
|
||||
service.scheduler = NewTaskScheduler(ctx1, meta, service.kvBase)
|
||||
service.cluster = newQueryNodeCluster(meta)
|
||||
|
||||
service.UpdateStateCode(internalpb.StateCode_Abnormal)
|
||||
log.Debug("QueryService", zap.Any("queryChannels", queryChannels))
|
||||
|
|
|
@ -31,7 +31,8 @@ func TestQueryService_Init(t *testing.T) {
|
|||
service.Start()
|
||||
|
||||
t.Run("Test create channel", func(t *testing.T) {
|
||||
response, err := service.CreateQueryChannel(ctx)
|
||||
request := &querypb.CreateQueryChannelRequest{}
|
||||
response, err := service.CreateQueryChannel(ctx, request)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, response.RequestChannel, "query-0")
|
||||
assert.Equal(t, response.ResultChannel, "queryResult-0")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -14,194 +14,141 @@ package queryservice
|
|||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"go.uber.org/zap"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
oplog "github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
type TaskQueue interface {
|
||||
utChan() <-chan int
|
||||
utEmpty() bool
|
||||
utFull() bool
|
||||
addUnissuedTask(t task) error
|
||||
FrontUnissuedTask() task
|
||||
PopUnissuedTask() task
|
||||
AddActiveTask(t task)
|
||||
PopActiveTask(ts Timestamp) task
|
||||
Enqueue(t task) error
|
||||
type TaskQueue struct {
|
||||
tasks *list.List
|
||||
|
||||
maxTask int64
|
||||
taskChan chan int // to block scheduler
|
||||
|
||||
scheduler *TaskScheduler
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type BaseTaskQueue struct {
|
||||
unissuedTasks *list.List
|
||||
activeTasks map[Timestamp]task
|
||||
utLock sync.Mutex
|
||||
atLock sync.Mutex
|
||||
|
||||
maxTaskNum int64
|
||||
|
||||
utBufChan chan int // to block scheduler
|
||||
|
||||
sched *TaskScheduler
|
||||
func (queue *TaskQueue) Chan() <-chan int {
|
||||
return queue.taskChan
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) utChan() <-chan int {
|
||||
return queue.utBufChan
|
||||
func (queue *TaskQueue) taskEmpty() bool {
|
||||
queue.Lock()
|
||||
defer queue.Unlock()
|
||||
return queue.tasks.Len() == 0
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) utEmpty() bool {
|
||||
queue.utLock.Lock()
|
||||
defer queue.utLock.Unlock()
|
||||
return queue.unissuedTasks.Len() == 0
|
||||
func (queue *TaskQueue) taskFull() bool {
|
||||
return int64(queue.tasks.Len()) >= queue.maxTask
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) utFull() bool {
|
||||
return int64(queue.unissuedTasks.Len()) >= queue.maxTaskNum
|
||||
}
|
||||
func (queue *TaskQueue) addTask(tasks []task) {
|
||||
queue.Lock()
|
||||
defer queue.Unlock()
|
||||
|
||||
func (queue *BaseTaskQueue) addUnissuedTask(t task) error {
|
||||
queue.utLock.Lock()
|
||||
defer queue.utLock.Unlock()
|
||||
for _, t := range tasks {
|
||||
if queue.tasks.Len() == 0 {
|
||||
queue.taskChan <- 1
|
||||
queue.tasks.PushBack(t)
|
||||
continue
|
||||
}
|
||||
|
||||
if queue.utFull() {
|
||||
return errors.New("task queue is full")
|
||||
}
|
||||
if queue.unissuedTasks.Len() <= 0 {
|
||||
queue.unissuedTasks.PushBack(t)
|
||||
queue.utBufChan <- 1
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.Timestamp() >= queue.unissuedTasks.Back().Value.(task).Timestamp() {
|
||||
queue.unissuedTasks.PushBack(t)
|
||||
queue.utBufChan <- 1
|
||||
return nil
|
||||
}
|
||||
|
||||
for e := queue.unissuedTasks.Front(); e != nil; e = e.Next() {
|
||||
if t.Timestamp() <= e.Value.(task).Timestamp() {
|
||||
queue.unissuedTasks.InsertBefore(t, e)
|
||||
queue.utBufChan <- 1
|
||||
return nil
|
||||
for e := queue.tasks.Back(); e != nil; e = e.Prev() {
|
||||
if t.TaskPriority() > e.Value.(task).TaskPriority() {
|
||||
if e.Prev() == nil {
|
||||
queue.taskChan <- 1
|
||||
queue.tasks.InsertBefore(t, e)
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
//TODO:: take care of timestamp
|
||||
queue.taskChan <- 1
|
||||
queue.tasks.InsertAfter(t, e)
|
||||
break
|
||||
}
|
||||
}
|
||||
return errors.New("unexpected error in addUnissuedTask")
|
||||
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) FrontUnissuedTask() task {
|
||||
queue.utLock.Lock()
|
||||
defer queue.utLock.Unlock()
|
||||
func (queue *TaskQueue) PopTask() task {
|
||||
queue.Lock()
|
||||
defer queue.Unlock()
|
||||
|
||||
if queue.unissuedTasks.Len() <= 0 {
|
||||
if queue.tasks.Len() <= 0 {
|
||||
log.Warn("sorry, but the unissued task list is empty!")
|
||||
return nil
|
||||
}
|
||||
|
||||
return queue.unissuedTasks.Front().Value.(task)
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) PopUnissuedTask() task {
|
||||
queue.utLock.Lock()
|
||||
defer queue.utLock.Unlock()
|
||||
|
||||
if queue.unissuedTasks.Len() <= 0 {
|
||||
log.Warn("sorry, but the unissued task list is empty!")
|
||||
return nil
|
||||
}
|
||||
|
||||
ft := queue.unissuedTasks.Front()
|
||||
queue.unissuedTasks.Remove(ft)
|
||||
ft := queue.tasks.Front()
|
||||
queue.tasks.Remove(ft)
|
||||
|
||||
return ft.Value.(task)
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) AddActiveTask(t task) {
|
||||
queue.atLock.Lock()
|
||||
defer queue.atLock.Unlock()
|
||||
|
||||
ts := t.Timestamp()
|
||||
_, ok := queue.activeTasks[ts]
|
||||
if ok {
|
||||
log.Debug("queryService", zap.Uint64("task with timestamp ts already in active task list! ts:", ts))
|
||||
}
|
||||
|
||||
queue.activeTasks[ts] = t
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) PopActiveTask(ts Timestamp) task {
|
||||
queue.atLock.Lock()
|
||||
defer queue.atLock.Unlock()
|
||||
|
||||
t, ok := queue.activeTasks[ts]
|
||||
if ok {
|
||||
log.Debug("queryService", zap.Uint64("task with timestamp ts has been deleted in active task list! ts:", ts))
|
||||
delete(queue.activeTasks, ts)
|
||||
return t
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (queue *BaseTaskQueue) Enqueue(t task) error {
|
||||
return queue.addUnissuedTask(t)
|
||||
}
|
||||
|
||||
type DdTaskQueue struct {
|
||||
BaseTaskQueue
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (queue *DdTaskQueue) Enqueue(t task) error {
|
||||
queue.lock.Lock()
|
||||
defer queue.lock.Unlock()
|
||||
return queue.BaseTaskQueue.Enqueue(t)
|
||||
}
|
||||
|
||||
func NewDdTaskQueue(sched *TaskScheduler) *DdTaskQueue {
|
||||
return &DdTaskQueue{
|
||||
BaseTaskQueue: BaseTaskQueue{
|
||||
unissuedTasks: list.New(),
|
||||
activeTasks: make(map[Timestamp]task),
|
||||
maxTaskNum: 1024,
|
||||
utBufChan: make(chan int, 1024),
|
||||
sched: sched,
|
||||
},
|
||||
func NewTaskQueue(scheduler *TaskScheduler) *TaskQueue {
|
||||
return &TaskQueue{
|
||||
tasks: list.New(),
|
||||
maxTask: 1024,
|
||||
taskChan: make(chan int, 1024),
|
||||
scheduler: scheduler,
|
||||
}
|
||||
}
|
||||
|
||||
type TaskScheduler struct {
|
||||
DdQueue TaskQueue
|
||||
triggerTaskQueue *TaskQueue
|
||||
activateTaskChan chan task
|
||||
meta *meta
|
||||
taskIDAllocator func() (UniqueID, error)
|
||||
kvBase *etcdkv.EtcdKV
|
||||
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewTaskScheduler(ctx context.Context) *TaskScheduler {
|
||||
func NewTaskScheduler(ctx context.Context, meta *meta, kv *etcdkv.EtcdKV) *TaskScheduler {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
taskChan := make(chan task, 1024)
|
||||
s := &TaskScheduler{
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
meta: meta,
|
||||
activateTaskChan: taskChan,
|
||||
kvBase: kv,
|
||||
}
|
||||
s.DdQueue = NewDdTaskQueue(s)
|
||||
s.triggerTaskQueue = NewTaskQueue(s)
|
||||
//TODO::add etcd
|
||||
//idAllocator := allocator.NewGlobalIDAllocator("queryService taskID", s.kvBase)
|
||||
//s.taskIDAllocator = func() (UniqueID, error) {
|
||||
// return idAllocator.AllocOne()
|
||||
//}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) scheduleDdTask() task {
|
||||
return sched.DdQueue.PopUnissuedTask()
|
||||
func (scheduler *TaskScheduler) Enqueue(tasks []task) {
|
||||
//TODO::open when add etcd
|
||||
//for _, t := range tasks {
|
||||
// id, err := scheduler.taskIDAllocator()
|
||||
// if err != nil {
|
||||
// log.Error(err.Error())
|
||||
// }
|
||||
// t.SetID(id)
|
||||
//}
|
||||
scheduler.triggerTaskQueue.addTask(tasks)
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) processTask(t task, q TaskQueue) {
|
||||
func (scheduler *TaskScheduler) processTask(t task) {
|
||||
span, ctx := trace.StartSpanFromContext(t.TraceCtx(),
|
||||
opentracing.Tags{
|
||||
"Type": t.Name(),
|
||||
"Type": t.Type(),
|
||||
"ID": t.ID(),
|
||||
})
|
||||
defer span.Finish()
|
||||
|
@ -217,13 +164,6 @@ func (sched *TaskScheduler) processTask(t task, q TaskQueue) {
|
|||
return
|
||||
}
|
||||
|
||||
span.LogFields(oplog.Int64("scheduler process AddActiveTask", t.ID()))
|
||||
q.AddActiveTask(t)
|
||||
|
||||
defer func() {
|
||||
span.LogFields(oplog.Int64("scheduler process PopActiveTask", t.ID()))
|
||||
q.PopActiveTask(t.Timestamp())
|
||||
}()
|
||||
span.LogFields(oplog.Int64("scheduler process Execute", t.ID()))
|
||||
err = t.Execute(ctx)
|
||||
if err != nil {
|
||||
|
@ -235,29 +175,78 @@ func (sched *TaskScheduler) processTask(t task, q TaskQueue) {
|
|||
err = t.PostExecute(ctx)
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) definitionLoop() {
|
||||
defer sched.wg.Done()
|
||||
func (scheduler *TaskScheduler) scheduleLoop() {
|
||||
defer scheduler.wg.Done()
|
||||
var w sync.WaitGroup
|
||||
for {
|
||||
select {
|
||||
case <-sched.ctx.Done():
|
||||
case <-scheduler.ctx.Done():
|
||||
return
|
||||
case <-sched.DdQueue.utChan():
|
||||
if !sched.DdQueue.utEmpty() {
|
||||
t := sched.scheduleDdTask()
|
||||
sched.processTask(t, sched.DdQueue)
|
||||
}
|
||||
case <-scheduler.triggerTaskQueue.Chan():
|
||||
t := scheduler.triggerTaskQueue.PopTask()
|
||||
log.Debug("pop a triggerTask from triggerTaskQueue")
|
||||
scheduler.processTask(t)
|
||||
//TODO::add active task to etcd
|
||||
w.Add(2)
|
||||
go scheduler.addActivateTask(&w, t)
|
||||
//TODO::handle active task return error, maybe node down...
|
||||
go scheduler.processActivateTask(&w)
|
||||
w.Wait()
|
||||
//TODO:: delete trigger task from etcd
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) Start() error {
|
||||
sched.wg.Add(1)
|
||||
go sched.definitionLoop()
|
||||
func (scheduler *TaskScheduler) addActivateTask(wg *sync.WaitGroup, t task) {
|
||||
defer wg.Done()
|
||||
var activeTaskWg sync.WaitGroup
|
||||
log.Debug("num of child task", zap.Int("num child task", len(t.GetChildTask())))
|
||||
for _, childTask := range t.GetChildTask() {
|
||||
if childTask != nil {
|
||||
log.Debug("add a activate task to activateChan")
|
||||
scheduler.activateTaskChan <- childTask
|
||||
activeTaskWg.Add(1)
|
||||
go scheduler.waitActivateTaskDone(&activeTaskWg, childTask)
|
||||
}
|
||||
}
|
||||
scheduler.activateTaskChan <- nil
|
||||
activeTaskWg.Wait()
|
||||
}
|
||||
|
||||
func (scheduler *TaskScheduler) waitActivateTaskDone(wg *sync.WaitGroup, t task) {
|
||||
defer wg.Done()
|
||||
err := t.WaitToFinish()
|
||||
if err != nil {
|
||||
//TODO:: redo task
|
||||
log.Error("waitActivateTaskDone: activate task return err")
|
||||
}
|
||||
log.Debug("one activate task done")
|
||||
}
|
||||
|
||||
func (scheduler *TaskScheduler) processActivateTask(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-scheduler.ctx.Done():
|
||||
return
|
||||
case t := <-scheduler.activateTaskChan:
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
log.Debug("pop a activate task from activateChan")
|
||||
scheduler.processTask(t)
|
||||
//TODO:: delete active task from etcd
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *TaskScheduler) Start() error {
|
||||
scheduler.wg.Add(1)
|
||||
go scheduler.scheduleLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) Close() {
|
||||
sched.cancel()
|
||||
sched.wg.Wait()
|
||||
func (scheduler *TaskScheduler) Close() {
|
||||
scheduler.cancel()
|
||||
scheduler.wg.Wait()
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ type QueryService interface {
|
|||
ShowPartitions(ctx context.Context, req *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error)
|
||||
LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error)
|
||||
CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error)
|
||||
CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error)
|
||||
GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error)
|
||||
GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error)
|
||||
}
|
||||
|
|
|
@ -961,6 +961,8 @@ class TestSearchDSL(object):
|
|||
# The following cases are used to build invalid query expr
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
def test_query_no_must(self, connect, collection):
|
||||
'''
|
||||
method: build query without must expr
|
||||
|
@ -971,6 +973,7 @@ class TestSearchDSL(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_no_vector_term_only(self, connect, collection):
|
||||
'''
|
||||
|
@ -985,6 +988,7 @@ class TestSearchDSL(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_no_vector_range_only(self, connect, collection):
|
||||
'''
|
||||
|
@ -1007,6 +1011,7 @@ class TestSearchDSL(object):
|
|||
assert len(res) == nq
|
||||
assert len(res[0]) == default_top_k
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_wrong_format(self, connect, collection):
|
||||
'''
|
||||
|
@ -1021,6 +1026,7 @@ class TestSearchDSL(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_empty(self, connect, collection):
|
||||
'''
|
||||
|
@ -1170,6 +1176,8 @@ class TestSearchDSL(object):
|
|||
# The following cases are used to build invalid term query expr
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_term_key_error(self, connect, collection):
|
||||
'''
|
||||
|
@ -1189,6 +1197,7 @@ class TestSearchDSL(object):
|
|||
def get_invalid_term(self, request):
|
||||
return request.param
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
|
||||
'''
|
||||
|
@ -1229,6 +1238,7 @@ class TestSearchDSL(object):
|
|||
assert len(res[0]) == default_top_k
|
||||
connect.drop_collection(collection_term)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_term_one_field_not_existed(self, connect, collection):
|
||||
'''
|
||||
|
@ -1248,6 +1258,8 @@ class TestSearchDSL(object):
|
|||
# The following cases are used to build valid range query expr
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_range_key_error(self, connect, collection):
|
||||
'''
|
||||
|
@ -1267,6 +1279,7 @@ class TestSearchDSL(object):
|
|||
def get_invalid_range(self, request):
|
||||
return request.param
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
|
||||
'''
|
||||
|
@ -1280,6 +1293,7 @@ class TestSearchDSL(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_range_string_ranges(self, connect, collection):
|
||||
'''
|
||||
|
@ -1332,6 +1346,7 @@ class TestSearchDSL(object):
|
|||
assert len(res) == nq
|
||||
assert len(res[0]) == default_top_k
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_range_one_field_not_existed(self, connect, collection):
|
||||
'''
|
||||
|
@ -1399,6 +1414,7 @@ class TestSearchDSL(object):
|
|||
assert len(res) == nq
|
||||
assert len(res[0]) == 0
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_single_term_multi_fields(self, connect, collection):
|
||||
'''
|
||||
|
@ -1462,6 +1478,7 @@ class TestSearchDSL(object):
|
|||
assert len(res) == nq
|
||||
assert len(res[0]) == 0
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_single_range_multi_fields(self, connect, collection):
|
||||
'''
|
||||
|
@ -1518,6 +1535,8 @@ class TestSearchDSL(object):
|
|||
# The following cases are used to build multi vectors query expr
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
def test_query_multi_vectors_same_field(self, connect, collection):
|
||||
'''
|
||||
method: build query with two vectors same field
|
||||
|
@ -1540,6 +1559,8 @@ class TestSearchDSLBools(object):
|
|||
# The following cases are used to build invalid query expr
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_query_no_bool(self, connect, collection):
|
||||
'''
|
||||
|
@ -1552,6 +1573,7 @@ class TestSearchDSLBools(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_should_only_term(self, connect, collection):
|
||||
'''
|
||||
|
@ -1563,6 +1585,7 @@ class TestSearchDSLBools(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_query_should_only_vector(self, connect, collection):
|
||||
'''
|
||||
|
@ -1574,6 +1597,7 @@ class TestSearchDSLBools(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
def test_query_must_not_only_term(self, connect, collection):
|
||||
'''
|
||||
method: build query without must, with must_not.term instead
|
||||
|
@ -1584,6 +1608,7 @@ class TestSearchDSLBools(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
def test_query_must_not_vector(self, connect, collection):
|
||||
'''
|
||||
method: build query without must, with must_not.vector instead
|
||||
|
@ -1594,6 +1619,7 @@ class TestSearchDSLBools(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
def test_query_must_should(self, connect, collection):
|
||||
'''
|
||||
method: build query must, and with should.term
|
||||
|
@ -1648,12 +1674,14 @@ class TestSearchInvalid(object):
|
|||
# pytest.skip("sq8h not support in CPU mode")
|
||||
return request.param
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_search_with_invalid_collection(self, connect, get_collection_name):
|
||||
collection_name = get_collection_name
|
||||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection_name, default_query)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition):
|
||||
# tag = " "
|
||||
|
@ -1661,12 +1689,14 @@ class TestSearchInvalid(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, default_query, partition_names=tag)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(2)
|
||||
def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
|
||||
fields = [get_invalid_field]
|
||||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, default_query, fields=fields)
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(1)
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_search_with_not_existed_field(self, connect, collection):
|
||||
|
@ -1685,6 +1715,7 @@ class TestSearchInvalid(object):
|
|||
def get_top_k(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.skip("bigsheep-search-without-load")
|
||||
@pytest.mark.level(1)
|
||||
@pytest.mark.tags(CaseLabel.tags_smoke)
|
||||
def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
|
||||
|
|
Loading…
Reference in New Issue