Remove not used QueryChannel in Proxy and Query Cluster (#16856)

Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
pull/17279/head
congqixia 2022-05-30 19:50:04 +08:00 committed by GitHub
parent cecbd40a22
commit c88514bc49
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 243 additions and 2368 deletions

View File

@ -168,12 +168,6 @@ queryNode:
flowGraph:
maxQueueLength: 1024 # Maximum length of task queue in flowgraph
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
msgStream:
search:
recvBufSize: 512 # msgPack channel buffer size
pulsarBufSize: 512 # pulsar channel buffer size
searchResult:
recvBufSize: 64 # msgPack channel buffer size
# Segcore will divide a segment into multiple chunks.
segcore:
chunkRows: 32768 # The number of vectors in a chunk.

View File

@ -161,12 +161,6 @@ queryNode:
flowGraph:
maxQueueLength: 1024 # Maximum length of task queue in flowgraph
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
msgStream:
search:
recvBufSize: 512 # msgPack channel buffer size
pulsarBufSize: 512 # pulsar channel buffer size
searchResult:
recvBufSize: 64 # msgPack channel buffer size
# Segcore will divide a segment into multiple chunks to enbale small index
segcore:
chunkRows: 32768 # The number of vectors in a chunk.

View File

@ -384,10 +384,6 @@ func (m *MockQueryCoord) ReleasePartitions(ctx context.Context, req *querypb.Rel
return nil, nil
}
func (m *MockQueryCoord) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
return nil, nil
}
func (m *MockQueryCoord) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return nil, nil
}

View File

@ -234,20 +234,6 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
return ret.(*commonpb.Status), err
}
// CreateQueryChannel creates the channels for querying in QueryCoord.
func (c *Client) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(querypb.QueryCoordClient).CreateQueryChannel(ctx, req)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*querypb.CreateQueryChannelResponse), err
}
// GetPartitionStates gets the states of the specified partition.
func (c *Client) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {

View File

@ -90,9 +90,6 @@ func Test_NewClient(t *testing.T) {
r10, err := client.ReleaseCollection(ctx, nil)
retCheck(retNotNil, r10, err)
r11, err := client.CreateQueryChannel(ctx, nil)
retCheck(retNotNil, r11, err)
r12, err := client.ShowPartitions(ctx, nil)
retCheck(retNotNil, r12, err)

View File

@ -371,11 +371,6 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
return s.queryCoord.ReleasePartitions(ctx, req)
}
// CreateQueryChannel creates the channels for querying in QueryCoord.
func (s *Server) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
return s.queryCoord.CreateQueryChannel(ctx, req)
}
// GetSegmentInfo gets the information of the specified segment from QueryCoord.
func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return s.queryCoord.GetSegmentInfo(ctx, req)

View File

@ -45,7 +45,6 @@ type MockQueryCoord struct {
showcolResp *querypb.ShowCollectionsResponse
showpartResp *querypb.ShowPartitionsResponse
partResp *querypb.GetPartitionStatesResponse
channelResp *querypb.CreateQueryChannelResponse
infoResp *querypb.GetSegmentInfoResponse
metricResp *milvuspb.GetMetricsResponse
replicasResp *milvuspb.GetReplicasResponse
@ -128,10 +127,6 @@ func (m *MockQueryCoord) ReleasePartitions(ctx context.Context, req *querypb.Rel
return m.status, m.err
}
func (m *MockQueryCoord) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
return m.channelResp, m.err
}
func (m *MockQueryCoord) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return m.infoResp, m.err
}
@ -269,7 +264,6 @@ func Test_NewServer(t *testing.T) {
showcolResp: &querypb.ShowCollectionsResponse{},
showpartResp: &querypb.ShowPartitionsResponse{},
partResp: &querypb.GetPartitionStatesResponse{},
channelResp: &querypb.CreateQueryChannelResponse{},
infoResp: &querypb.GetSegmentInfoResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}},
metricResp: &milvuspb.GetMetricsResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}},
}
@ -358,12 +352,6 @@ func Test_NewServer(t *testing.T) {
assert.NotNil(t, resp)
})
t.Run("CreateQueryChannel", func(t *testing.T) {
resp, err := server.CreateQueryChannel(ctx, nil)
assert.Nil(t, err)
assert.NotNil(t, resp)
})
t.Run("GetTimeTickChannel", func(t *testing.T) {
resp, err := server.GetTimeTickChannel(ctx, nil)
assert.Nil(t, err)

View File

@ -133,34 +133,6 @@ func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResp
return ret.(*milvuspb.StringResponse), err
}
// AddQueryChannel adds query channel for QueryNode component.
func (c *Client) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(querypb.QueryNodeClient).AddQueryChannel(ctx, req)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*commonpb.Status), err
}
// RemoveQueryChannel removes the query channel for QueryNode component.
func (c *Client) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(querypb.QueryNodeClient).RemoveQueryChannel(ctx, req)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*commonpb.Status), err
}
// WatchDmChannels watches the channels about data manipulation.
func (c *Client) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {

View File

@ -69,12 +69,6 @@ func Test_NewClient(t *testing.T) {
r3, err := client.GetStatisticsChannel(ctx)
retCheck(retNotNil, r3, err)
r4, err := client.AddQueryChannel(ctx, nil)
retCheck(retNotNil, r4, err)
r5, err := client.RemoveQueryChannel(ctx, nil)
retCheck(retNotNil, r5, err)
r6, err := client.WatchDmChannels(ctx, nil)
retCheck(retNotNil, r6, err)

View File

@ -252,18 +252,6 @@ func (s *Server) GetComponentStates(ctx context.Context, req *internalpb.GetComp
return s.querynode.GetComponentStates(ctx)
}
// AddQueryChannel adds query channel for QueryNode component.
func (s *Server) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
// ignore ctx
return s.querynode.AddQueryChannel(ctx, req)
}
// RemoveQueryChannel removes the query channel for QueryNode component.
func (s *Server) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
// ignore ctx
return s.querynode.RemoveQueryChannel(ctx, req)
}
// WatchDmChannels watches the channels about data manipulation.
func (s *Server) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
// ignore ctx

View File

@ -76,14 +76,6 @@ func (m *MockQueryNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.Strin
return m.strResp, m.err
}
func (m *MockQueryNode) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
return m.status, m.err
}
func (m *MockQueryNode) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
return m.status, m.err
}
func (m *MockQueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
return m.status, m.err
}
@ -257,20 +249,6 @@ func Test_NewServer(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("AddQueryChannel", func(t *testing.T) {
req := &querypb.AddQueryChannelRequest{}
resp, err := server.AddQueryChannel(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("RemoveQueryChannel", func(t *testing.T) {
req := &querypb.RemoveQueryChannelRequest{}
resp, err := server.RemoveQueryChannel(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("WatchDmChannels", func(t *testing.T) {
req := &querypb.WatchDmChannelsRequest{}
resp, err := server.WatchDmChannels(ctx, req)

View File

@ -23,7 +23,6 @@ service QueryCoord {
rpc LoadCollection(LoadCollectionRequest) returns (common.Status) {}
rpc ReleaseCollection(ReleaseCollectionRequest) returns (common.Status) {}
rpc CreateQueryChannel(CreateQueryChannelRequest) returns (CreateQueryChannelResponse) {}
rpc GetPartitionStates(GetPartitionStatesRequest) returns (GetPartitionStatesResponse) {}
rpc GetSegmentInfo(GetSegmentInfoRequest) returns (GetSegmentInfoResponse) {}
rpc LoadBalance(LoadBalanceRequest) returns (common.Status) {}
@ -41,8 +40,6 @@ service QueryNode {
rpc GetTimeTickChannel(internal.GetTimeTickChannelRequest) returns(milvus.StringResponse) {}
rpc GetStatisticsChannel(internal.GetStatisticsChannelRequest) returns(milvus.StringResponse){}
rpc AddQueryChannel(AddQueryChannelRequest) returns (common.Status) {}
rpc RemoveQueryChannel(RemoveQueryChannelRequest) returns (common.Status) {}
rpc WatchDmChannels(WatchDmChannelsRequest) returns (common.Status) {}
rpc WatchDeltaChannels(WatchDeltaChannelsRequest) returns (common.Status) {}
rpc LoadSegments(LoadSegmentsRequest) returns (common.Status) {}
@ -118,17 +115,6 @@ message ReleasePartitionsRequest {
int64 nodeID = 5;
}
message CreateQueryChannelRequest {
int64 collectionID = 1;
int64 proxyID = 2;
}
message CreateQueryChannelResponse {
common.Status status = 1;
string query_channel = 2;
string query_result_channel = 3;
}
message GetPartitionStatesRequest {
common.MsgBase base = 1;
int64 dbID = 2;
@ -169,24 +155,6 @@ message ShardLeadersList { // All leaders of all replicas of one shard
}
//-----------------query node grpc request and response proto----------------
message AddQueryChannelRequest {
common.MsgBase base = 1;
int64 nodeID = 2;
int64 collectionID = 3;
string query_channel = 4;
string query_result_channel = 5;
internal.MsgPosition seek_position = 6;
repeated SegmentInfo global_sealed_segments = 7;
}
message RemoveQueryChannelRequest {
common.MsgBase base = 1;
int64 nodeID = 2;
int64 collectionID = 3;
string query_channel = 4;
string query_result_channel = 5;
}
message LoadMetaInfo {
LoadType load_type = 1;
int64 collectionID = 2;

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,6 @@ package proxy
import (
"context"
"errors"
"fmt"
"runtime"
"sort"
@ -30,7 +29,6 @@ import (
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/uniquegenerator"
@ -41,10 +39,6 @@ import (
type channelsMgr interface {
getChannels(collectionID UniqueID) ([]pChan, error)
getVChannels(collectionID UniqueID) ([]vChan, error)
createDQLStream(collectionID UniqueID) error
getDQLStream(collectionID UniqueID) (msgstream.MsgStream, error)
removeDQLStream(collectionID UniqueID) error
removeAllDQLStream() error
createDMLMsgStream(collectionID UniqueID) error
getDMLStream(collectionID UniqueID) (msgstream.MsgStream, error)
removeDMLStream(collectionID UniqueID) error
@ -108,28 +102,6 @@ func getDmlChannelsFunc(ctx context.Context, rc types.RootCoord) getChannelsFunc
}
}
// getDqlChannelsFunc returns a function about how to get query channels of a collection.
func getDqlChannelsFunc(ctx context.Context, proxyID int64, qc createQueryChannelInterface) getChannelsFuncType {
return func(collectionID UniqueID) (map[vChan]pChan, error) {
req := &querypb.CreateQueryChannelRequest{
CollectionID: collectionID,
ProxyID: proxyID,
}
resp, err := qc.CreateQueryChannel(ctx, req)
if err != nil {
return nil, err
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
return nil, errors.New(resp.Status.Reason)
}
m := make(map[vChan]pChan)
m[resp.QueryChannel] = resp.QueryChannel
return m, nil
}
}
// streamType indicates which type of message stream should be created.
type streamType int
@ -474,7 +446,6 @@ var _ channelsMgr = (*channelsMgrImpl)(nil)
// channelsMgrImpl implements channelsMgr.
type channelsMgrImpl struct {
dmlChannelsMgr *singleTypeChannelsMgr
dqlChannelsMgr *singleTypeChannelsMgr
}
func (mgr *channelsMgrImpl) getChannels(collectionID UniqueID) ([]pChan, error) {
@ -485,22 +456,6 @@ func (mgr *channelsMgrImpl) getVChannels(collectionID UniqueID) ([]vChan, error)
return mgr.dmlChannelsMgr.getVChannels(collectionID)
}
func (mgr *channelsMgrImpl) createDQLStream(collectionID UniqueID) error {
return mgr.dqlChannelsMgr.createMsgStream(collectionID)
}
func (mgr *channelsMgrImpl) getDQLStream(collectionID UniqueID) (msgstream.MsgStream, error) {
return mgr.dqlChannelsMgr.getStream(collectionID)
}
func (mgr *channelsMgrImpl) removeDQLStream(collectionID UniqueID) error {
return mgr.dqlChannelsMgr.removeStream(collectionID)
}
func (mgr *channelsMgrImpl) removeAllDQLStream() error {
return mgr.dqlChannelsMgr.removeAllStream()
}
func (mgr *channelsMgrImpl) createDMLMsgStream(collectionID UniqueID) error {
return mgr.dmlChannelsMgr.createMsgStream(collectionID)
}
@ -521,12 +476,10 @@ func (mgr *channelsMgrImpl) removeAllDMLStream() error {
func newChannelsMgrImpl(
getDmlChannelsFunc getChannelsFuncType,
dmlRepackFunc repackFuncType,
getDqlChannelsFunc getChannelsFuncType,
dqlRepackFunc repackFuncType,
msgStreamFactory msgstream.Factory,
) *channelsMgrImpl {
return &channelsMgrImpl{
dmlChannelsMgr: newSingleTypeChannelsMgr(getDmlChannelsFunc, msgStreamFactory, dmlRepackFunc, dmlStreamType),
dqlChannelsMgr: newSingleTypeChannelsMgr(getDqlChannelsFunc, msgStreamFactory, dqlRepackFunc, dqlStreamType),
}
}

View File

@ -27,9 +27,8 @@ import (
func TestChannelsMgrImpl_getChannels(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
mgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
@ -45,9 +44,8 @@ func TestChannelsMgrImpl_getChannels(t *testing.T) {
func TestChannelsMgrImpl_getVChannels(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
mgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
@ -63,9 +61,8 @@ func TestChannelsMgrImpl_getVChannels(t *testing.T) {
func TestChannelsMgrImpl_createDMLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
mgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
@ -85,9 +82,8 @@ func TestChannelsMgrImpl_createDMLMsgStream(t *testing.T) {
func TestChannelsMgrImpl_getDMLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
mgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
@ -103,9 +99,8 @@ func TestChannelsMgrImpl_getDMLMsgStream(t *testing.T) {
func TestChannelsMgrImpl_removeDMLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
mgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
@ -130,9 +125,8 @@ func TestChannelsMgrImpl_removeDMLMsgStream(t *testing.T) {
func TestChannelsMgrImpl_removeAllDMLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
mgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer mgr.removeAllDMLStream()
num := 10
@ -143,79 +137,6 @@ func TestChannelsMgrImpl_removeAllDMLMsgStream(t *testing.T) {
}
}
func TestChannelsMgrImpl_createDQLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
err := mgr.createDQLStream(collID)
assert.Equal(t, nil, err)
}
func TestChannelsMgrImpl_getDQLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
_, err := mgr.getDQLStream(collID)
assert.NotEqual(t, nil, err)
err = mgr.createDQLStream(collID)
assert.Equal(t, nil, err)
_, err = mgr.getDQLStream(collID)
assert.Equal(t, nil, err)
}
func TestChannelsMgrImpl_removeDQLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
defer mgr.removeAllDMLStream()
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
_, err := mgr.getDQLStream(collID)
assert.NotEqual(t, nil, err)
err = mgr.removeDQLStream(collID)
assert.NotEqual(t, nil, err)
err = mgr.createDQLStream(collID)
assert.Equal(t, nil, err)
_, err = mgr.getDQLStream(collID)
assert.Equal(t, nil, err)
err = mgr.removeDQLStream(collID)
assert.Equal(t, nil, err)
_, err = mgr.getDQLStream(collID)
assert.NotEqual(t, nil, err)
}
func TestChannelsMgrImpl_removeAllDQLMsgStream(t *testing.T) {
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
mgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
defer mgr.removeAllDMLStream()
num := 10
for i := 0; i < num; i++ {
collID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
err := mgr.createDQLStream(collID)
assert.Equal(t, nil, err)
}
}
func TestGetAllKeysAndGetAllValues(t *testing.T) {
chanMapping := make(map[vChan]pChan)
chanMapping["v1"] = "p1"

View File

@ -138,8 +138,6 @@ func (node *Proxy) ReleaseDQLMessageStream(ctx context.Context, request *proxypb
return unhealthyStatus(), nil
}
_ = node.chMgr.removeDQLStream(request.CollectionID)
logutil.Logger(ctx).Debug("complete to release DQL message stream",
zap.Any("role", typeutil.ProxyRole),
zap.Any("db", request.DbID),

View File

@ -19,8 +19,6 @@ package proxy
import (
"context"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
)
@ -44,8 +42,3 @@ type timestampAllocatorInterface interface {
type getChannelsService interface {
GetChannels(collectionID UniqueID) (map[vChan]pChan, error)
}
// createQueryChannelInterface defines CreateQueryChannel
type createQueryChannelInterface interface {
CreateQueryChannel(ctx context.Context, request *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error)
}

View File

@ -35,7 +35,6 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/logutil"
@ -158,30 +157,6 @@ func (node *Proxy) Init() error {
}
log.Info("init session for Proxy done")
if node.queryCoord != nil {
log.Debug("create query channel for Proxy")
resp, err := node.queryCoord.CreateQueryChannel(node.ctx, &querypb.CreateQueryChannelRequest{})
if err != nil {
log.Warn("failed to create query channel for Proxy", zap.Error(err))
return err
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
log.Warn("failed to create query channel for Proxy",
zap.String("error_code", resp.Status.ErrorCode.String()),
zap.String("reason", resp.Status.Reason))
return errors.New(resp.Status.Reason)
}
// TODO SearchResultChannelNames and RetrieveResultChannelNames should not be part in the Param table
// we should maintain a separate map for search result
Params.ProxyCfg.SearchResultChannelNames = []string{resp.QueryResultChannel}
Params.ProxyCfg.RetrieveResultChannelNames = []string{resp.QueryResultChannel}
log.Debug("Proxy CreateQueryChannel success", zap.Any("SearchResultChannelNames", Params.ProxyCfg.SearchResultChannelNames))
log.Debug("Proxy CreateQueryChannel success", zap.Any("RetrieveResultChannelNames", Params.ProxyCfg.RetrieveResultChannelNames))
log.Debug("create query channel for Proxy done", zap.String("QueryResultChannel", resp.QueryResultChannel))
}
node.factory.Init(&Params)
log.Debug("init parameters for factory", zap.String("role", typeutil.ProxyRole), zap.Any("parameters", Params.ServiceParam))
@ -221,8 +196,7 @@ func (node *Proxy) Init() error {
log.Debug("create channels manager", zap.String("role", typeutil.ProxyRole))
dmlChannelsFunc := getDmlChannelsFunc(node.ctx, node.rootCoord)
dqlChannelsFunc := getDqlChannelsFunc(node.ctx, node.session.ServerID, node.queryCoord)
chMgr := newChannelsMgrImpl(dmlChannelsFunc, defaultInsertRepackFunc, dqlChannelsFunc, nil, node.factory)
chMgr := newChannelsMgrImpl(dmlChannelsFunc, defaultInsertRepackFunc, nil, node.factory)
node.chMgr = chMgr
log.Debug("create channels manager done", zap.String("role", typeutil.ProxyRole))

View File

@ -3058,7 +3058,7 @@ func TestProxy_Import(t *testing.T) {
t.Run("test import get vChannel failed (the first one)", func(t *testing.T) {
defer wg.Done()
proxy.stateCode.Store(internalpb.StateCode_Healthy)
proxy.chMgr = newChannelsMgrImpl(master.GetChannels, nil, nil, nil, msgStreamFactory)
proxy.chMgr = newChannelsMgrImpl(master.GetChannels, nil, nil, msgStreamFactory)
resp, err := proxy.Import(context.TODO(),
&milvuspb.ImportRequest{
CollectionName: "import_collection",

View File

@ -281,19 +281,6 @@ func (coord *QueryCoordMock) ReleasePartitions(ctx context.Context, req *querypb
panic("implement me")
}
func (coord *QueryCoordMock) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
if !coord.healthy() {
return &querypb.CreateQueryChannelResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "unhealthy",
},
}, nil
}
panic("implement me")
}
func (coord *QueryCoordMock) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) {
if !coord.healthy() {
return &querypb.GetPartitionStatesResponse{

View File

@ -53,16 +53,6 @@ func (m *QueryNodeMock) SyncReplicaSegments(ctx context.Context, req *querypb.Sy
return &commonpb.Status{}, nil
}
// TODO
func (m *QueryNodeMock) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
return nil, nil
}
// TODO
func (m *QueryNodeMock) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
return nil, nil

View File

@ -764,7 +764,6 @@ func (dct *dropCollectionTask) Execute(ctx context.Context) error {
}
_ = dct.chMgr.removeDMLStream(collID)
_ = dct.chMgr.removeDQLStream(collID)
globalMetaCache.RemoveCollection(ctx, dct.CollectionName)
return nil
}
@ -2859,7 +2858,6 @@ func (rct *releaseCollectionTask) Execute(ctx context.Context) (err error) {
rct.result, err = rct.queryCoord.ReleaseCollection(ctx, request)
_ = rct.chMgr.removeDQLStream(collID)
globalMetaCache.RemoveCollection(ctx, rct.CollectionName)
return err

View File

@ -1097,9 +1097,8 @@ func TestDropCollectionTask(t *testing.T) {
InitMetaCache(rc, qc)
master := newMockGetChannelsService()
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
channelMgr := newChannelsMgrImpl(master.GetChannels, nil, query.GetChannels, nil, factory)
channelMgr := newChannelsMgrImpl(master.GetChannels, nil, nil, factory)
defer channelMgr.removeAllDMLStream()
prefix := "TestDropCollectionTask"
@ -1649,7 +1648,6 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
var err error
Params.Init()
Params.ProxyCfg.RetrieveResultChannelNames = []string{funcutil.GenRandomStr()}
rc := NewRootCoordMock()
rc.Start()
@ -1723,11 +1721,9 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
assert.NoError(t, err)
dmlChannelsFunc := getDmlChannelsFunc(ctx, rc)
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, query.GetChannels, nil, factory)
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, nil, factory)
defer chMgr.removeAllDMLStream()
defer chMgr.removeAllDQLStream()
err = chMgr.createDMLMsgStream(collectionID)
assert.NoError(t, err)
@ -1905,7 +1901,6 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
var err error
Params.Init()
Params.ProxyCfg.RetrieveResultChannelNames = []string{funcutil.GenRandomStr()}
rc := NewRootCoordMock()
rc.Start()
@ -1980,11 +1975,9 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
assert.NoError(t, err)
dmlChannelsFunc := getDmlChannelsFunc(ctx, rc)
query := newMockGetChannelsService()
factory := newSimpleMockMsgStreamFactory()
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, query.GetChannels, nil, factory)
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, nil, factory)
defer chMgr.removeAllDMLStream()
defer chMgr.removeAllDQLStream()
err = chMgr.createDMLMsgStream(collectionID)
assert.NoError(t, err)

View File

@ -55,10 +55,7 @@ type Cluster interface {
watchDmChannels(ctx context.Context, nodeID int64, in *querypb.WatchDmChannelsRequest) error
watchDeltaChannels(ctx context.Context, nodeID int64, in *querypb.WatchDeltaChannelsRequest) error
hasWatchedQueryChannel(ctx context.Context, nodeID int64, collectionID UniqueID) bool
hasWatchedDeltaChannel(ctx context.Context, nodeID int64, collectionID UniqueID) bool
addQueryChannel(ctx context.Context, nodeID int64, in *querypb.AddQueryChannelRequest) error
removeQueryChannel(ctx context.Context, nodeID int64, in *querypb.RemoveQueryChannelRequest) error
releaseCollection(ctx context.Context, nodeID int64, in *querypb.ReleaseCollectionRequest) error
releasePartitions(ctx context.Context, nodeID int64, in *querypb.ReleasePartitionsRequest) error
getSegmentInfo(ctx context.Context, in *querypb.GetSegmentInfoRequest) ([]*querypb.SegmentInfo, error)
@ -311,64 +308,6 @@ func (c *queryNodeCluster) hasWatchedDeltaChannel(ctx context.Context, nodeID in
return c.nodes[nodeID].hasWatchedDeltaChannel(collectionID)
}
func (c *queryNodeCluster) hasWatchedQueryChannel(ctx context.Context, nodeID int64, collectionID UniqueID) bool {
c.RLock()
defer c.RUnlock()
return c.nodes[nodeID].hasWatchedQueryChannel(collectionID)
}
func (c *queryNodeCluster) addQueryChannel(ctx context.Context, nodeID int64, in *querypb.AddQueryChannelRequest) error {
c.RLock()
var targetNode Node
if node, ok := c.nodes[nodeID]; ok {
targetNode = node
}
c.RUnlock()
if targetNode != nil {
emptyChangeInfo := &querypb.SealedSegmentsChangeInfo{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_SealedSegmentsChangeInfo,
},
}
msgPosition, err := c.clusterMeta.sendSealedSegmentChangeInfos(in.CollectionID, in.QueryChannel, emptyChangeInfo)
if err != nil {
return err
}
// update watch position to latest
in.SeekPosition = msgPosition
err = targetNode.addQueryChannel(ctx, in)
if err != nil {
return err
}
return nil
}
return fmt.Errorf("addQueryChannel: can't find QueryNode by nodeID, nodeID = %d", nodeID)
}
func (c *queryNodeCluster) removeQueryChannel(ctx context.Context, nodeID int64, in *querypb.RemoveQueryChannelRequest) error {
c.RLock()
var targetNode Node
if node, ok := c.nodes[nodeID]; ok {
targetNode = node
}
c.RUnlock()
if targetNode != nil {
err := targetNode.removeQueryChannel(ctx, in)
if err != nil {
log.Warn("removeQueryChannel: queryNode remove query channel error", zap.String("error", err.Error()))
return err
}
return nil
}
return fmt.Errorf("removeQueryChannel: can't find QueryNode by nodeID, nodeID = %d", nodeID)
}
func (c *queryNodeCluster) releaseCollection(ctx context.Context, nodeID int64, in *querypb.ReleaseCollectionRequest) error {
c.RLock()
var targetNode Node
@ -724,10 +663,6 @@ func (c *queryNodeCluster) isOnline(nodeID int64) (bool, error) {
// log.Debug("PrintMeta: query coordinator cluster info: collectionInfo", zap.Int64("nodeID", id), zap.Int64("collectionID", info.CollectionID), zap.Any("info", info))
// }
//
// queryChannelInfos := node.showWatchedQueryChannels()
// for _, info := range queryChannelInfos {
// log.Debug("PrintMeta: query coordinator cluster info: watchedQueryChannelInfo", zap.Int64("nodeID", id), zap.Int64("collectionID", info.CollectionID), zap.Any("info", info))
// }
// }
// }
//}

View File

@ -496,30 +496,6 @@ func TestGrpcRequest(t *testing.T) {
assert.Nil(t, err)
})
t.Run("Test AddQueryChannel", func(t *testing.T) {
info := cluster.clusterMeta.getQueryChannelInfoByID(defaultCollectionID)
addQueryChannelReq := &querypb.AddQueryChannelRequest{
NodeID: nodeID,
CollectionID: defaultCollectionID,
QueryChannel: info.QueryChannel,
QueryResultChannel: info.QueryResultChannel,
}
err = cluster.addQueryChannel(baseCtx, nodeID, addQueryChannelReq)
assert.Nil(t, err)
})
t.Run("Test RemoveQueryChannel", func(t *testing.T) {
info := cluster.clusterMeta.getQueryChannelInfoByID(defaultCollectionID)
removeQueryChannelReq := &querypb.RemoveQueryChannelRequest{
NodeID: nodeID,
CollectionID: defaultCollectionID,
QueryChannel: info.QueryChannel,
QueryResultChannel: info.QueryResultChannel,
}
err = cluster.removeQueryChannel(baseCtx, nodeID, removeQueryChannelReq)
assert.Nil(t, err)
})
t.Run("Test GetSegmentInfo", func(t *testing.T) {
getSegmentInfoReq := &querypb.GetSegmentInfoRequest{
Base: &commonpb.MsgBase{

View File

@ -47,32 +47,6 @@ func newGlobalMetaBroker(ctx context.Context, rootCoord types.RootCoord, dataCoo
return parser, nil
}
func (broker *globalMetaBroker) releaseDQLMessageStream(ctx context.Context, collectionID UniqueID) error {
ctx2, cancel2 := context.WithTimeout(ctx, timeoutForRPC)
defer cancel2()
releaseDQLMessageStreamReq := &proxypb.ReleaseDQLMessageStreamRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_RemoveQueryChannels,
},
CollectionID: collectionID,
}
// TODO(yah01): check whether RootCoord returns error if QueryChannel not exists
res, err := broker.rootCoord.ReleaseDQLMessageStream(ctx2, releaseDQLMessageStreamReq)
if err != nil {
log.Error("releaseDQLMessageStream occur error", zap.Int64("collectionID", collectionID), zap.Error(err))
return err
}
if res.ErrorCode != commonpb.ErrorCode_Success {
err = errors.New(res.Reason)
log.Error("releaseDQLMessageStream occur error", zap.Int64("collectionID", collectionID), zap.Error(err))
return err
}
log.Info("releaseDQLMessageStream successfully", zap.Int64("collectionID", collectionID))
return nil
}
// invalidateCollectionMetaCache notifies RootCoord to remove all the collection meta cache with the specified collectionID in Proxies
func (broker *globalMetaBroker) invalidateCollectionMetaCache(ctx context.Context, collectionID UniqueID) error {
ctx1, cancel1 := context.WithTimeout(ctx, timeoutForRPC)

View File

@ -39,8 +39,6 @@ func TestGlobalMetaBroker_RootCoord(t *testing.T) {
assert.Nil(t, err)
t.Run("successCase", func(t *testing.T) {
err = handler.releaseDQLMessageStream(ctx, defaultCollectionID)
assert.Nil(t, err)
err = handler.invalidateCollectionMetaCache(ctx, defaultCollectionID)
assert.NoError(t, err)
enableIndex, _, err := handler.getIndexBuildID(ctx, defaultCollectionID, defaultSegmentID)
@ -52,8 +50,6 @@ func TestGlobalMetaBroker_RootCoord(t *testing.T) {
t.Run("returnError", func(t *testing.T) {
rootCoord.returnError = true
err = handler.releaseDQLMessageStream(ctx, defaultCollectionID)
assert.Error(t, err)
err = handler.invalidateCollectionMetaCache(ctx, defaultCollectionID)
assert.Error(t, err)
_, _, err = handler.getIndexBuildID(ctx, defaultCollectionID, defaultSegmentID)
@ -65,8 +61,6 @@ func TestGlobalMetaBroker_RootCoord(t *testing.T) {
t.Run("returnGrpcError", func(t *testing.T) {
rootCoord.returnGrpcError = true
err = handler.releaseDQLMessageStream(ctx, defaultCollectionID)
assert.Error(t, err)
err = handler.invalidateCollectionMetaCache(ctx, defaultCollectionID)
assert.Error(t, err)
_, _, err = handler.getIndexBuildID(ctx, defaultCollectionID, defaultSegmentID)

View File

@ -768,39 +768,6 @@ func (qc *QueryCoord) ReleasePartitions(ctx context.Context, req *querypb.Releas
return status, nil
}
// CreateQueryChannel assigns unique querychannel and resultchannel to the specified collecion
func (qc *QueryCoord) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
log.Debug("createQueryChannelRequest received",
zap.String("role", typeutil.QueryCoordRole),
zap.Int64("collectionID", req.CollectionID))
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
if qc.stateCode.Load() != internalpb.StateCode_Healthy {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
err := errors.New("QueryCoord is not healthy")
status.Reason = err.Error()
log.Error("createQueryChannel failed", zap.String("role", typeutil.QueryCoordRole), zap.Error(err))
return &querypb.CreateQueryChannelResponse{
Status: status,
}, nil
}
collectionID := req.CollectionID
info := qc.meta.getQueryChannelInfoByID(collectionID)
log.Debug("createQueryChannelRequest completed",
zap.String("role", typeutil.QueryCoordRole),
zap.Int64("collectionID", collectionID),
zap.String("request channel", info.QueryChannel),
zap.String("result channel", info.QueryResultChannel))
return &querypb.CreateQueryChannelResponse{
Status: status,
QueryChannel: info.QueryChannel,
QueryResultChannel: info.QueryResultChannel,
}, nil
}
// GetPartitionStates returns state of the partition, including notExist, notPresent, onDisk, partitionInMemory, inMemory, partitionInGPU, InGPU
func (qc *QueryCoord) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) {
log.Info("getPartitionStatesRequest received",

View File

@ -366,14 +366,6 @@ func TestGrpcTask(t *testing.T) {
assert.Nil(t, err)
})
t.Run("Test CreateQueryChannel", func(t *testing.T) {
res, err := queryCoord.CreateQueryChannel(ctx, &querypb.CreateQueryChannelRequest{
CollectionID: defaultCollectionID,
})
assert.Equal(t, commonpb.ErrorCode_Success, res.Status.ErrorCode)
assert.Nil(t, err)
})
t.Run("Test LoadBalance", func(t *testing.T) {
res, err := queryCoord.LoadBalance(ctx, &querypb.LoadBalanceRequest{
Base: &commonpb.MsgBase{
@ -766,14 +758,6 @@ func TestGrpcTaskBeforeHealthy(t *testing.T) {
assert.Nil(t, err)
})
t.Run("Test CreateQueryChannel", func(t *testing.T) {
res, err := unHealthyCoord.CreateQueryChannel(ctx, &querypb.CreateQueryChannelRequest{
CollectionID: defaultCollectionID,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, res.Status.ErrorCode)
assert.Nil(t, err)
})
t.Run("Test GetMetrics", func(t *testing.T) {
metricReq := make(map[string]string)
metricReq[metricsinfo.MetricTypeKey] = "system_info"

View File

@ -33,7 +33,6 @@ import (
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
@ -87,14 +86,12 @@ type Meta interface {
setDeltaChannel(collectionID UniqueID, info []*datapb.VchannelInfo) error
getQueryChannelInfoByID(collectionID UniqueID) *querypb.QueryChannelInfo
getQueryStreamByID(collectionID UniqueID, queryChannel string) (msgstream.MsgStream, error)
setLoadType(collectionID UniqueID, loadType querypb.LoadType) error
setLoadPercentage(collectionID UniqueID, partitionID UniqueID, percentage int64, loadType querypb.LoadType) error
//printMeta()
saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2SealedSegmentChangeInfos, error)
removeGlobalSealedSegInfos(collectionID UniqueID, partitionIDs []UniqueID) (col2SealedSegmentChangeInfos, error)
sendSealedSegmentChangeInfos(collectionID UniqueID, queryChannel string, changeInfos *querypb.SealedSegmentsChangeInfo) (*internalpb.MsgPosition, error)
getWatchedChannelsByNodeID(nodeID int64) *querypb.UnsubscribeChannelInfo
@ -127,8 +124,6 @@ type MetaReplica struct {
deltaChannelMu sync.RWMutex
dmChannelInfos map[string]*querypb.DmChannelWatchInfo
dmChannelMu sync.RWMutex
queryStreams map[UniqueID]msgstream.MsgStream
streamMu sync.RWMutex
segmentsInfo *segmentsInfo
//partitionStates map[UniqueID]*querypb.PartitionStates
@ -141,7 +136,6 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAl
queryChannelInfos := make(map[UniqueID]*querypb.QueryChannelInfo)
deltaChannelInfos := make(map[UniqueID][]*datapb.VchannelInfo)
dmChannelInfos := make(map[string]*querypb.DmChannelWatchInfo)
queryMsgStream := make(map[UniqueID]msgstream.MsgStream)
m := &MetaReplica{
ctx: childCtx,
@ -153,7 +147,6 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAl
queryChannelInfos: queryChannelInfos,
deltaChannelInfos: deltaChannelInfos,
dmChannelInfos: dmChannelInfos,
queryStreams: queryMsgStream,
segmentsInfo: newSegmentsInfo(kv),
replicas: NewReplicaInfos(),
@ -632,36 +625,28 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
}
col2SegmentChangeInfos[collectionID] = segmentsChangeInfo
}
queryChannelInfosMap := make(map[UniqueID]*querypb.QueryChannelInfo)
for collectionID, segmentChangeInfos := range col2SegmentChangeInfos {
// get msgStream to produce sealedSegmentChangeInfos to query channel
for collectionID, infos := range saves {
// TODO silverxia change QueryChannelInfo struct to simplifed one
// queryChannelInfo contains the GlobalSealedSegment list
queryChannelInfo := m.getQueryChannelInfoByID(collectionID)
msgPosition, err := m.sendSealedSegmentChangeInfos(collectionID, queryChannelInfo.QueryChannel, segmentChangeInfos)
if err != nil {
return nil, err
}
queryChannelInfo.SeekPosition = msgPosition
// update segmentInfo, queryChannelInfo meta to cache and etcd
// merge save segment info and existing GlobalSealedSegments
seg2Info := make(map[UniqueID]*querypb.SegmentInfo)
for _, segmentInfo := range queryChannelInfo.GlobalSealedSegments {
segmentID := segmentInfo.SegmentID
seg2Info[segmentID] = segmentInfo
}
if infos, ok := saves[collectionID]; ok {
for _, segmentInfo := range infos {
segmentID := segmentInfo.SegmentID
seg2Info[segmentID] = segmentInfo
}
for _, segmentInfo := range infos {
segmentID := segmentInfo.SegmentID
seg2Info[segmentID] = segmentInfo
}
globalSealedSegmentInfos := make([]*querypb.SegmentInfo, 0)
globalSealedSegmentInfos := make([]*querypb.SegmentInfo, len(seg2Info))
for _, info := range seg2Info {
globalSealedSegmentInfos = append(globalSealedSegmentInfos, info)
}
queryChannelInfo.GlobalSealedSegments = globalSealedSegmentInfos
queryChannelInfosMap[collectionID] = queryChannelInfo
}
// save segmentInfo to etcd
@ -700,6 +685,7 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
panic(err)
}
// Write back to cache
m.channelMu.Lock()
for collectionID, channelInfo := range queryChannelInfosMap {
m.queryChannelInfos[collectionID] = channelInfo
@ -733,11 +719,6 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
// produce sealedSegmentChangeInfos to query channel
queryChannelInfo := m.getQueryChannelInfoByID(collectionID)
msgPosition, err := m.sendSealedSegmentChangeInfos(collectionID, queryChannelInfo.QueryChannel, segmentChangeInfos)
if err != nil {
return nil, err
}
queryChannelInfo.SeekPosition = msgPosition
// update segmentInfo, queryChannelInfo meta to cache and etcd
seg2Info := make(map[UniqueID]*querypb.SegmentInfo)
@ -759,7 +740,7 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
// remove meta from etcd
for _, info := range removes {
if err = m.segmentsInfo.removeSegment(info); err != nil {
if err := m.segmentsInfo.removeSegment(info); err != nil {
panic(err)
}
}
@ -788,55 +769,6 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
return col2SealedSegmentChangeInfos{collectionID: segmentChangeInfos}, nil
}
// send sealed segment change infos into query channels
func (m *MetaReplica) sendSealedSegmentChangeInfos(collectionID UniqueID, queryChannel string, changeInfos *querypb.SealedSegmentsChangeInfo) (*internalpb.MsgPosition, error) {
// get msgStream to produce sealedSegmentChangeInfos to query channel
queryStream, err := m.getQueryStreamByID(collectionID, queryChannel)
if err != nil {
log.Error("sendSealedSegmentChangeInfos: get query stream failed", zap.Int64("collectionID", collectionID), zap.Error(err))
return nil, err
}
var msgPack = &msgstream.MsgPack{
Msgs: []msgstream.TsMsg{},
}
id, err := m.idAllocator()
if err != nil {
log.Error("sendSealedSegmentChangeInfos: allocator trigger taskID failed", zap.Int64("collectionID", collectionID), zap.Error(err))
return nil, err
}
changeInfos.Base.MsgID = id
segmentChangeMsg := &msgstream.SealedSegmentsChangeInfoMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{0},
},
SealedSegmentsChangeInfo: *changeInfos,
}
msgPack.Msgs = append(msgPack.Msgs, segmentChangeMsg)
messageIDInfos, err := queryStream.ProduceMark(msgPack)
if err != nil {
log.Error("sendSealedSegmentChangeInfos: send sealed segment change info failed", zap.Int64("collectionID", collectionID), zap.Error(err))
return nil, err
}
messageIDs, ok := messageIDInfos[queryChannel]
if !ok {
return nil, fmt.Errorf("sendSealedSegmentChangeInfos: send sealed segment change info to wrong query channel, collectionID = %d, query channel = %s", collectionID, queryChannel)
}
// len(messageIDs) = 1
if len(messageIDs) != 1 {
return nil, fmt.Errorf("sendSealedSegmentChangeInfos: length of the positions in stream is not correct, collectionID = %d, query channel = %s, len = %d", collectionID, queryChannel, len(messageIDs))
}
log.Info("updateGlobalSealedSegmentInfos: send sealed segment change info to queryChannel", zap.Any("msgPack", msgPack))
return &internalpb.MsgPosition{
ChannelName: queryChannel,
MsgID: messageIDs[0].Serialize(),
}, nil
}
func (m *MetaReplica) showSegmentInfos(collectionID UniqueID, partitionIDs []UniqueID) []*querypb.SegmentInfo {
ignorePartitionCmp := len(partitionIDs) == 0
partitionFilter := make(map[int64]struct{})
@ -1023,29 +955,6 @@ func (m *MetaReplica) getQueryChannelInfoByID(collectionID UniqueID) *querypb.Qu
return proto.Clone(channelInfo).(*querypb.QueryChannelInfo)
}
func (m *MetaReplica) getQueryStreamByID(collectionID UniqueID, queryChannel string) (msgstream.MsgStream, error) {
m.streamMu.Lock()
defer m.streamMu.Unlock()
var queryStream msgstream.MsgStream
var err error
if stream, ok := m.queryStreams[collectionID]; ok {
queryStream = stream
} else {
queryStream, err = m.factory.NewMsgStream(m.ctx)
if err != nil {
log.Error("updateGlobalSealedSegmentInfos: create msgStream failed", zap.Error(err))
return nil, err
}
queryStream.AsProducer([]string{queryChannel})
m.queryStreams[collectionID] = queryStream
log.Info("getQueryStreamByID: create query msgStream for collection", zap.Int64("collectionID", collectionID))
}
return queryStream, nil
}
func (m *MetaReplica) setLoadType(collectionID UniqueID, loadType querypb.LoadType) error {
m.collectionMu.Lock()
defer m.collectionMu.Unlock()

View File

@ -43,7 +43,6 @@ type queryNodeClientMock struct {
}
func newQueryNodeTest(ctx context.Context, address string, id UniqueID, kv *etcdkv.EtcdKV) (Node, error) {
watchedChannels := make(map[UniqueID]*querypb.QueryChannelInfo)
watchedDeltaChannels := make(map[UniqueID][]*datapb.VchannelInfo)
childCtx, cancel := context.WithCancel(ctx)
client, err := newQueryNodeClientMock(childCtx, address)
@ -58,7 +57,6 @@ func newQueryNodeTest(ctx context.Context, address string, id UniqueID, kv *etcd
address: address,
client: client,
kvClient: kv,
watchedQueryChannels: watchedChannels,
watchedDeltaChannels: watchedDeltaChannels,
}
@ -118,14 +116,6 @@ func (client *queryNodeClientMock) GetStatisticsChannel(ctx context.Context) (*m
return client.grpcClient.GetStatisticsChannel(ctx, &internalpb.GetStatisticsChannelRequest{})
}
func (client *queryNodeClientMock) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
return client.grpcClient.AddQueryChannel(ctx, req)
}
func (client *queryNodeClientMock) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
return client.grpcClient.RemoveQueryChannel(ctx, req)
}
func (client *queryNodeClientMock) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
return client.grpcClient.WatchDmChannels(ctx, req)
}

View File

@ -63,8 +63,6 @@ type queryNodeServerMock struct {
queryNodeID int64
rwmutex sync.RWMutex // guard for all modification
addQueryChannels rpcHandler
removeQueryChannels rpcHandler
watchDmChannels rpcHandler
watchDeltaChannels rpcHandler
loadSegment rpcHandler
@ -88,8 +86,6 @@ func newQueryNodeServerMock(ctx context.Context) *queryNodeServerMock {
grpcErrChan: make(chan error),
rwmutex: sync.RWMutex{},
addQueryChannels: returnSuccessResult,
removeQueryChannels: returnSuccessResult,
watchDmChannels: returnSuccessResult,
watchDeltaChannels: returnSuccessResult,
loadSegment: returnSuccessResult,
@ -203,14 +199,6 @@ func (qs *queryNodeServerMock) GetComponentStates(ctx context.Context, req *inte
}, nil
}
func (qs *queryNodeServerMock) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
return qs.addQueryChannels()
}
func (qs *queryNodeServerMock) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
return qs.removeQueryChannels()
}
func (qs *queryNodeServerMock) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
return qs.watchDmChannels()
}

View File

@ -60,11 +60,6 @@ type UniqueID = typeutil.UniqueID
// Timestamp is an alias for the Int64 type
type Timestamp = typeutil.Timestamp
type queryChannelInfo struct {
requestChannel string
responseChannel string
}
// Params is param table of query coordinator
var Params paramtable.ComponentParam

View File

@ -51,10 +51,6 @@ type Node interface {
//removeDmChannel(collectionID UniqueID, channels []string) error
hasWatchedDeltaChannel(collectionID UniqueID) bool
hasWatchedQueryChannel(collectionID UniqueID) bool
//showWatchedQueryChannels() []*querypb.QueryChannelInfo
addQueryChannel(ctx context.Context, in *querypb.AddQueryChannelRequest) error
removeQueryChannel(ctx context.Context, in *querypb.RemoveQueryChannelRequest) error
setState(state nodeState)
getState() nodeState
@ -78,7 +74,6 @@ type queryNode struct {
kvClient *etcdkv.EtcdKV
sync.RWMutex
watchedQueryChannels map[UniqueID]*querypb.QueryChannelInfo
watchedDeltaChannels map[UniqueID][]*datapb.VchannelInfo
state nodeState
stateLock sync.RWMutex
@ -90,7 +85,6 @@ type queryNode struct {
}
func newQueryNode(ctx context.Context, address string, id UniqueID, kv *etcdkv.EtcdKV) (Node, error) {
watchedChannels := make(map[UniqueID]*querypb.QueryChannelInfo)
watchedDeltaChannels := make(map[UniqueID][]*datapb.VchannelInfo)
childCtx, cancel := context.WithCancel(ctx)
client, err := nodeclient.NewClient(childCtx, address)
@ -105,7 +99,6 @@ func newQueryNode(ctx context.Context, address string, id UniqueID, kv *etcdkv.E
address: address,
client: client,
kvClient: kv,
watchedQueryChannels: watchedChannels,
watchedDeltaChannels: watchedDeltaChannels,
state: disConnect,
}
@ -142,17 +135,6 @@ func (qn *queryNode) stop() {
qn.cancel()
}
func (qn *queryNode) hasWatchedQueryChannel(collectionID UniqueID) bool {
qn.RLock()
defer qn.RUnlock()
if _, ok := qn.watchedQueryChannels[collectionID]; ok {
return true
}
return false
}
func (qn *queryNode) hasWatchedDeltaChannel(collectionID UniqueID) bool {
qn.RLock()
defer qn.RUnlock()
@ -161,18 +143,6 @@ func (qn *queryNode) hasWatchedDeltaChannel(collectionID UniqueID) bool {
return ok
}
//func (qn *queryNode) showWatchedQueryChannels() []*querypb.QueryChannelInfo {
// qn.RLock()
// defer qn.RUnlock()
//
// results := make([]*querypb.QueryChannelInfo, 0)
// for _, info := range qn.watchedQueryChannels {
// results = append(results, proto.Clone(info).(*querypb.QueryChannelInfo))
// }
//
// return results
//}
func (qn *queryNode) setDeltaChannelInfo(collectionID int64, infos []*datapb.VchannelInfo) {
qn.Lock()
defer qn.Unlock()
@ -180,20 +150,6 @@ func (qn *queryNode) setDeltaChannelInfo(collectionID int64, infos []*datapb.Vch
qn.watchedDeltaChannels[collectionID] = infos
}
func (qn *queryNode) setQueryChannelInfo(info *querypb.QueryChannelInfo) {
qn.Lock()
defer qn.Unlock()
qn.watchedQueryChannels[info.CollectionID] = info
}
func (qn *queryNode) removeQueryChannelInfo(collectionID UniqueID) {
qn.Lock()
defer qn.Unlock()
delete(qn.watchedQueryChannels, collectionID)
}
func (qn *queryNode) setState(state nodeState) {
qn.stateLock.Lock()
defer qn.stateLock.Unlock()
@ -255,45 +211,6 @@ func (qn *queryNode) watchDeltaChannels(ctx context.Context, in *querypb.WatchDe
return err
}
func (qn *queryNode) addQueryChannel(ctx context.Context, in *querypb.AddQueryChannelRequest) error {
if !qn.isOnline() {
return errors.New("AddQueryChannel: queryNode is offline")
}
status, err := qn.client.AddQueryChannel(qn.ctx, in)
if err != nil {
return err
}
if status.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(status.Reason)
}
queryChannelInfo := &querypb.QueryChannelInfo{
CollectionID: in.CollectionID,
QueryChannel: in.QueryChannel,
QueryResultChannel: in.QueryResultChannel,
}
qn.setQueryChannelInfo(queryChannelInfo)
return nil
}
func (qn *queryNode) removeQueryChannel(ctx context.Context, in *querypb.RemoveQueryChannelRequest) error {
if !qn.isOnline() {
return nil
}
status, err := qn.client.RemoveQueryChannel(qn.ctx, in)
if err != nil {
return err
}
if status.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(status.Reason)
}
qn.removeQueryChannelInfo(in.CollectionID)
return nil
}
func (qn *queryNode) releaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) error {
if !qn.isOnline() {
log.Warn("ReleaseCollection: the QueryNode has been offline, the release request is no longer needed", zap.Int64("nodeID", qn.id))
@ -310,7 +227,6 @@ func (qn *queryNode) releaseCollection(ctx context.Context, in *querypb.ReleaseC
qn.Lock()
delete(qn.watchedDeltaChannels, in.CollectionID)
delete(qn.watchedQueryChannels, in.CollectionID)
qn.Unlock()
return nil

View File

@ -310,18 +310,6 @@ func TestGrpcRequestWithNodeOffline(t *testing.T) {
assert.NotNil(t, err)
})
t.Run("Test AddQueryChannel", func(t *testing.T) {
req := &querypb.AddQueryChannelRequest{}
err = node.addQueryChannel(baseCtx, req)
assert.NotNil(t, err)
})
t.Run("Test RemoveQueryChannel", func(t *testing.T) {
req := &querypb.RemoveQueryChannelRequest{}
err = node.removeQueryChannel(baseCtx, req)
assert.Nil(t, err)
})
t.Run("Test ReleaseCollection", func(t *testing.T) {
req := &querypb.ReleaseCollectionRequest{}
err = node.releaseCollection(baseCtx, req)

View File

@ -182,7 +182,7 @@ func (bt *baseTask) getParentTask() task {
}
// GetChildTask function returns all the child tasks of the trigger task
// Child task may be loadSegmentTask, watchDmChannelTask or watchQueryChannelTask
// Child task may be loadSegmentTask or watchDmChannelTask
func (bt *baseTask) getChildTask() []task {
bt.childTasksMu.RLock()
defer bt.childTasksMu.RUnlock()
@ -327,19 +327,10 @@ func (lct *loadCollectionTask) updateTaskProcess() {
break
}
// wait watchDeltaChannel and watchQueryChannel task done after loading segment
// wait watchDeltaChannel task done after loading segment
nodeID := getDstNodeIDByTask(t)
if t.msgType() == commonpb.MsgType_LoadSegments {
if !lct.cluster.hasWatchedDeltaChannel(lct.ctx, nodeID, collectionID) ||
!lct.cluster.hasWatchedQueryChannel(lct.ctx, nodeID, collectionID) {
allDone = false
break
}
}
// wait watchQueryChannel task done after watch dmChannel
if t.msgType() == commonpb.MsgType_WatchDmChannels {
if !lct.cluster.hasWatchedQueryChannel(lct.ctx, nodeID, collectionID) {
if !lct.cluster.hasWatchedDeltaChannel(lct.ctx, nodeID, collectionID) {
allDone = false
break
}
@ -680,17 +671,8 @@ func (rct *releaseCollectionTask) execute(ctx context.Context) error {
// if nodeID ==0, it means that the release request has not been assigned to the specified query node
if rct.NodeID <= 0 {
ctx2, cancel2 := context.WithTimeout(rct.ctx, timeoutForRPC)
defer cancel2()
err := rct.broker.releaseDQLMessageStream(ctx2, collectionID)
if err != nil {
log.Error("releaseCollectionTask: release collection end, releaseDQLMessageStream occur error", zap.Int64("collectionID", rct.CollectionID), zap.Int64("msgID", rct.Base.MsgID), zap.Error(err))
rct.setResultInfo(err)
return err
}
// invalidate all the collection meta cache with the specified collectionID
err = rct.broker.invalidateCollectionMetaCache(ctx, collectionID)
err := rct.broker.invalidateCollectionMetaCache(ctx, collectionID)
if err != nil {
log.Error("releaseCollectionTask: release collection end, invalidateCollectionMetaCache occur error", zap.Int64("collectionID", rct.CollectionID), zap.Int64("msgID", rct.Base.MsgID), zap.Error(err))
rct.setResultInfo(err)
@ -789,19 +771,10 @@ func (lpt *loadPartitionTask) updateTaskProcess() {
allDone = false
}
// wait watchDeltaChannel and watchQueryChannel task done after loading segment
// wait watchDeltaChannel task done after loading segment
nodeID := getDstNodeIDByTask(t)
if t.msgType() == commonpb.MsgType_LoadSegments {
if !lpt.cluster.hasWatchedDeltaChannel(lpt.ctx, nodeID, collectionID) ||
!lpt.cluster.hasWatchedQueryChannel(lpt.ctx, nodeID, collectionID) {
allDone = false
break
}
}
// wait watchQueryChannel task done after watching dmChannel
if t.msgType() == commonpb.MsgType_WatchDmChannels {
if !lpt.cluster.hasWatchedQueryChannel(lpt.ctx, nodeID, collectionID) {
if !lpt.cluster.hasWatchedDeltaChannel(lpt.ctx, nodeID, collectionID) {
allDone = false
break
}
@ -1567,87 +1540,6 @@ func (wdt *watchDeltaChannelTask) postExecute(context.Context) error {
return nil
}
type watchQueryChannelTask struct {
*baseTask
*querypb.AddQueryChannelRequest
cluster Cluster
}
func (wqt *watchQueryChannelTask) msgBase() *commonpb.MsgBase {
return wqt.Base
}
func (wqt *watchQueryChannelTask) marshal() ([]byte, error) {
return proto.Marshal(wqt.AddQueryChannelRequest)
}
func (wqt *watchQueryChannelTask) isValid() bool {
online, err := wqt.cluster.isOnline(wqt.NodeID)
if err != nil {
return false
}
return wqt.ctx != nil && online
}
func (wqt *watchQueryChannelTask) msgType() commonpb.MsgType {
return wqt.Base.MsgType
}
func (wqt *watchQueryChannelTask) timestamp() Timestamp {
return wqt.Base.Timestamp
}
func (wqt *watchQueryChannelTask) updateTaskProcess() {
parentTask := wqt.getParentTask()
if parentTask == nil {
log.Warn("watchQueryChannelTask: parentTask should not be nil")
return
}
parentTask.updateTaskProcess()
}
func (wqt *watchQueryChannelTask) preExecute(context.Context) error {
wqt.setResultInfo(nil)
log.Info("start do watchQueryChannelTask",
zap.Int64("collectionID", wqt.CollectionID),
zap.String("queryChannel", wqt.QueryChannel),
zap.String("queryResultChannel", wqt.QueryResultChannel),
zap.Int64("loaded nodeID", wqt.NodeID),
zap.Int64("taskID", wqt.getTaskID()))
return nil
}
func (wqt *watchQueryChannelTask) execute(ctx context.Context) error {
defer wqt.reduceRetryCount()
err := wqt.cluster.addQueryChannel(wqt.ctx, wqt.NodeID, wqt.AddQueryChannelRequest)
if err != nil {
log.Warn("watchQueryChannelTask: watchQueryChannel occur error",
zap.Int64("taskID", wqt.getTaskID()),
zap.String("channel", wqt.AddQueryChannelRequest.QueryChannel),
zap.Error(err))
wqt.setResultInfo(err)
return err
}
log.Info("watchQueryChannelTask Execute done",
zap.Int64("collectionID", wqt.CollectionID),
zap.String("queryChannel", wqt.QueryChannel),
zap.String("queryResultChannel", wqt.QueryResultChannel),
zap.Int64("taskID", wqt.getTaskID()))
return nil
}
func (wqt *watchQueryChannelTask) postExecute(context.Context) error {
log.Info("watchQueryChannelTask postExecute done",
zap.Int64("collectionID", wqt.CollectionID),
zap.String("queryChannel", wqt.QueryChannel),
zap.String("queryResultChannel", wqt.QueryResultChannel),
zap.Int64("taskID", wqt.getTaskID()))
return nil
}
//****************************handoff task********************************//
type handoffTask struct {
*baseTask

View File

@ -35,7 +35,6 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/trace"
oplog "github.com/opentracing/opentracing-go/log"
)
@ -381,18 +380,8 @@ func (scheduler *TaskScheduler) unmarshalTask(taskID UniqueID, t string) (task,
}
newTask = watchDeltaChannelTask
case commonpb.MsgType_WatchQueryChannels:
//TODO::trigger condition may be different
loadReq := querypb.AddQueryChannelRequest{}
err = proto.Unmarshal([]byte(t), &loadReq)
if err != nil {
return nil, err
}
watchQueryChannelTask := &watchQueryChannelTask{
baseTask: baseTask,
AddQueryChannelRequest: &loadReq,
cluster: scheduler.cluster,
}
newTask = watchQueryChannelTask
//Deprecated WatchQueryChannel
log.Warn("legacy WatchQueryChannels type found, ignore")
case commonpb.MsgType_LoadBalanceSegments:
//TODO::trigger condition may be different
loadReq := querypb.LoadBalanceRequest{}
@ -490,8 +479,6 @@ func (scheduler *TaskScheduler) processTask(t task) error {
protoSize = proto.Size(childTask.(*watchDmChannelTask).WatchDmChannelsRequest)
case commonpb.MsgType_WatchDeltaChannels:
protoSize = proto.Size(childTask.(*watchDeltaChannelTask).WatchDeltaChannelsRequest)
case commonpb.MsgType_WatchQueryChannels:
protoSize = proto.Size(childTask.(*watchQueryChannelTask).AddQueryChannelRequest)
default:
//TODO::
}
@ -771,7 +758,6 @@ func (scheduler *TaskScheduler) waitActivateTaskDone(wg *sync.WaitGroup, t task,
saves[stateKey] = strconv.Itoa(int(taskUndo))
}
}
//TODO::queryNode auto watch queryChannel, then update etcd use same id directly
err = scheduler.client.MultiSaveAndRemove(saves, removes)
if err != nil {
log.Error("waitActivateTaskDone: error when save and remove task from etcd", zap.Int64("triggerTaskID", triggerTask.getTaskID()))
@ -898,14 +884,14 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
segmentInfosToSave := make(map[UniqueID][]*querypb.SegmentInfo)
segmentInfosToRemove := make(map[UniqueID][]*querypb.SegmentInfo)
var sealedSegmentChangeInfos col2SealedSegmentChangeInfos
//var sealedSegmentChangeInfos col2SealedSegmentChangeInfos
var err error
switch triggerTask.msgType() {
case commonpb.MsgType_ReleaseCollection:
// release all segmentInfo of the collection when release collection
req := triggerTask.(*releaseCollectionTask).ReleaseCollectionRequest
collectionID := req.CollectionID
sealedSegmentChangeInfos, err = meta.removeGlobalSealedSegInfos(collectionID, nil)
_, err = meta.removeGlobalSealedSegInfos(collectionID, nil)
case commonpb.MsgType_ReleasePartitions:
// release all segmentInfo of the partitions when release partitions
@ -920,7 +906,7 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
segmentInfosToRemove[collectionID] = append(segmentInfosToRemove[collectionID], info)
}
}
sealedSegmentChangeInfos, err = meta.removeGlobalSealedSegInfos(collectionID, req.PartitionIDs)
_, err = meta.removeGlobalSealedSegInfos(collectionID, req.PartitionIDs)
default:
// save new segmentInfo when load segment
@ -975,28 +961,11 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
log.Info("update segment info",
zap.Int64("triggerTaskID", triggerTask.getTaskID()),
zap.Any("segment", segmentInfosToSave))
sealedSegmentChangeInfos, err = meta.saveGlobalSealedSegInfos(segmentInfosToSave)
_, err = meta.saveGlobalSealedSegInfos(segmentInfosToSave)
}
// no need to rollback since etcd meta is not changed
if err != nil {
log.Error("Failed to update global sealed seg infos, begin to rollback", zap.Error(err))
rollBackSegmentChangeInfoErr := retry.Do(ctx, func() error {
rollBackChangeInfos := reverseSealedSegmentChangeInfo(sealedSegmentChangeInfos)
for collectionID, infos := range rollBackChangeInfos {
channelInfo := meta.getQueryChannelInfoByID(collectionID)
_, sendErr := meta.sendSealedSegmentChangeInfos(collectionID, channelInfo.QueryChannel, infos)
if sendErr != nil {
return sendErr
}
}
return nil
}, retry.Attempts(20))
if rollBackSegmentChangeInfoErr != nil {
log.Error("scheduleLoop: Restore the information of global sealed segments in query node failed", zap.Error(rollBackSegmentChangeInfoErr))
panic(rollBackSegmentChangeInfoErr)
} else {
log.Info("Successfully roll back segment info change")
}
return err
}
@ -1030,7 +999,6 @@ func reverseSealedSegmentChangeInfo(changeInfosMap map[UniqueID]*querypb.SealedS
// generateDerivedInternalTasks generate watchDeltaChannel and watchQueryChannel tasks
func generateDerivedInternalTasks(triggerTask task, meta Meta, cluster Cluster) ([]task, error) {
var derivedInternalTasks []task
watchQueryChannelInfo := make(map[int64]map[UniqueID]UniqueID)
watchDeltaChannelInfo := make(map[int64]map[UniqueID]UniqueID)
addChannelWatchInfoFn := func(nodeID int64, collectionID UniqueID, replicaID UniqueID, watchInfo map[int64]map[UniqueID]UniqueID) {
if _, ok := watchInfo[nodeID]; !ok {
@ -1046,48 +1014,10 @@ func generateDerivedInternalTasks(triggerTask task, meta Meta, cluster Cluster)
collectionID := loadSegmentTask.CollectionID
replicaID := loadSegmentTask.GetReplicaID()
nodeID := loadSegmentTask.DstNodeID
if !cluster.hasWatchedQueryChannel(triggerTask.traceCtx(), nodeID, collectionID) {
addChannelWatchInfoFn(nodeID, collectionID, replicaID, watchQueryChannelInfo)
}
if !cluster.hasWatchedDeltaChannel(triggerTask.traceCtx(), nodeID, collectionID) {
addChannelWatchInfoFn(nodeID, collectionID, replicaID, watchDeltaChannelInfo)
}
}
if childTask.msgType() == commonpb.MsgType_WatchDmChannels {
watchDmChannelTask := childTask.(*watchDmChannelTask)
collectionID := watchDmChannelTask.CollectionID
nodeID := watchDmChannelTask.NodeID
replicaID := watchDmChannelTask.GetReplicaID()
if !cluster.hasWatchedQueryChannel(triggerTask.traceCtx(), nodeID, collectionID) {
addChannelWatchInfoFn(nodeID, collectionID, replicaID, watchQueryChannelInfo)
}
}
}
for nodeID, collectionIDs := range watchQueryChannelInfo {
for collectionID := range collectionIDs {
queryChannelInfo := meta.getQueryChannelInfoByID(collectionID)
msgBase := proto.Clone(triggerTask.msgBase()).(*commonpb.MsgBase)
msgBase.MsgType = commonpb.MsgType_WatchQueryChannels
addQueryChannelRequest := &querypb.AddQueryChannelRequest{
Base: msgBase,
NodeID: nodeID,
CollectionID: collectionID,
QueryChannel: queryChannelInfo.QueryChannel,
QueryResultChannel: queryChannelInfo.QueryResultChannel,
GlobalSealedSegments: queryChannelInfo.GlobalSealedSegments,
}
baseTask := newBaseTask(triggerTask.traceCtx(), triggerTask.getTriggerCondition())
baseTask.setParentTask(triggerTask)
watchQueryChannelTask := &watchQueryChannelTask{
baseTask: baseTask,
AddQueryChannelRequest: addQueryChannelRequest,
cluster: cluster,
}
derivedInternalTasks = append(derivedInternalTasks, watchQueryChannelTask)
}
}
for nodeID, collectionIDs := range watchDeltaChannelInfo {

View File

@ -139,22 +139,6 @@ func (tt *testTask) execute(ctx context.Context) error {
excludeNodeIDs: []int64{},
}
tt.addChildTask(childTask)
case commonpb.MsgType_WatchQueryChannels:
childTask := &watchQueryChannelTask{
baseTask: &baseTask{
ctx: tt.ctx,
condition: newTaskCondition(tt.ctx),
triggerCondition: tt.triggerCondition,
},
AddQueryChannelRequest: &querypb.AddQueryChannelRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_WatchQueryChannels,
},
NodeID: tt.nodeID,
},
cluster: tt.cluster,
}
tt.addChildTask(childTask)
}
return nil
@ -174,7 +158,6 @@ func TestWatchQueryChannel_ClearEtcdInfoAfterAssignedNodeDown(t *testing.T) {
assert.Nil(t, err)
queryNode, err := startQueryNodeServer(baseCtx)
assert.Nil(t, err)
queryNode.addQueryChannels = returnFailedResult
nodeID := queryNode.queryNodeID
waitQueryNodeOnline(queryCoord.cluster, nodeID)
@ -389,27 +372,6 @@ func TestUnMarshalTask(t *testing.T) {
assert.Equal(t, task.msgType(), commonpb.MsgType_WatchDeltaChannels)
})
t.Run("Test watchQueryChannelTask", func(t *testing.T) {
watchTask := &watchQueryChannelTask{
AddQueryChannelRequest: &querypb.AddQueryChannelRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_WatchQueryChannels,
},
},
}
blobs, err := watchTask.marshal()
assert.Nil(t, err)
err = kv.Save("testMarshalWatchQueryChannel", string(blobs))
assert.Nil(t, err)
defer kv.RemoveWithPrefix("testMarshalWatchQueryChannel")
value, err := kv.Load("testMarshalWatchQueryChannel")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(1008, value)
assert.Nil(t, err)
assert.Equal(t, task.msgType(), commonpb.MsgType_WatchQueryChannels)
})
t.Run("Test loadBalanceTask", func(t *testing.T) {
loadBalanceTask := &loadBalanceTask{
LoadBalanceRequest: &querypb.LoadBalanceRequest{
@ -568,16 +530,13 @@ func Test_generateDerivedInternalTasks(t *testing.T) {
derivedTasks, err := generateDerivedInternalTasks(loadCollectionTask, queryCoord.meta, queryCoord.cluster)
assert.Nil(t, err)
assert.Equal(t, 2, len(derivedTasks))
assert.Equal(t, 1, len(derivedTasks))
for _, internalTask := range derivedTasks {
matchType := internalTask.msgType() == commonpb.MsgType_WatchDeltaChannels || internalTask.msgType() == commonpb.MsgType_WatchQueryChannels
matchType := internalTask.msgType() == commonpb.MsgType_WatchDeltaChannels
assert.Equal(t, true, matchType)
if internalTask.msgType() == commonpb.MsgType_WatchDeltaChannels {
assert.Equal(t, node1.queryNodeID, internalTask.(*watchDeltaChannelTask).NodeID)
}
if internalTask.msgType() == commonpb.MsgType_WatchQueryChannels {
assert.Equal(t, node1.queryNodeID, internalTask.(*watchQueryChannelTask).NodeID)
}
}
queryCoord.Stop()

View File

@ -249,26 +249,6 @@ func genLoadSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int6
return loadSegmentTask
}
func genWatchQueryChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID int64) *watchQueryChannelTask {
queryChannelInfo := queryCoord.meta.getQueryChannelInfoByID(defaultCollectionID)
req := &querypb.AddQueryChannelRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_WatchQueryChannels,
},
NodeID: nodeID,
CollectionID: defaultCollectionID,
QueryChannel: queryChannelInfo.QueryChannel,
QueryResultChannel: queryChannelInfo.QueryResultChannel,
}
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
baseTask.taskID = 200
return &watchQueryChannelTask{
baseTask: baseTask,
AddQueryChannelRequest: req,
cluster: queryCoord.cluster,
}
}
func genWatchDeltaChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID int64) *watchDeltaChannelTask {
req := &querypb.WatchDeltaChannelsRequest{
Base: &commonpb.MsgBase{
@ -1361,11 +1341,8 @@ func TestUpdateTaskProcessWhenLoadSegment(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, int64(0), collectionInfo.InMemoryPercentage)
watchQueryChannel := genWatchQueryChannelTask(ctx, queryCoord, node1.queryNodeID)
watchQueryChannel.setParentTask(loadCollectionTask)
watchDeltaChannel := genWatchDeltaChannelTask(ctx, queryCoord, node1.queryNodeID)
watchDeltaChannel.setParentTask(loadCollectionTask)
queryCoord.scheduler.processTask(watchQueryChannel)
queryCoord.scheduler.processTask(watchDeltaChannel)
collectionInfo, err = queryCoord.meta.getCollectionInfoByID(defaultCollectionID)
assert.Nil(t, err)
@ -1387,16 +1364,12 @@ func TestUpdateTaskProcessWhenWatchDmChannel(t *testing.T) {
queryCoord.meta.addCollection(defaultCollectionID, querypb.LoadType_LoadCollection, genDefaultCollectionSchema(false))
watchDmChannel := genWatchDmChannelTask(ctx, queryCoord, node1.queryNodeID)
loadCollectionTask := watchDmChannel.getParentTask()
queryCoord.scheduler.processTask(watchDmChannel)
collectionInfo, err := queryCoord.meta.getCollectionInfoByID(defaultCollectionID)
assert.Nil(t, err)
assert.Equal(t, int64(0), collectionInfo.InMemoryPercentage)
queryCoord.scheduler.processTask(watchDmChannel)
watchQueryChannel := genWatchQueryChannelTask(ctx, queryCoord, node1.queryNodeID)
watchQueryChannel.setParentTask(loadCollectionTask)
queryCoord.scheduler.processTask(watchQueryChannel)
collectionInfo, err = queryCoord.meta.getCollectionInfoByID(defaultCollectionID)
assert.Nil(t, err)
assert.Equal(t, int64(100), collectionInfo.InMemoryPercentage)

View File

@ -90,9 +90,6 @@ func getDstNodeIDByTask(t task) int64 {
case commonpb.MsgType_WatchDeltaChannels:
watchDeltaChannel := t.(*watchDeltaChannelTask)
nodeID = watchDeltaChannel.NodeID
case commonpb.MsgType_WatchQueryChannels:
watchQueryChannel := t.(*watchQueryChannelTask)
nodeID = watchQueryChannel.NodeID
case commonpb.MsgType_ReleaseCollection:
releaseCollection := t.(*releaseCollectionTask)
nodeID = releaseCollection.NodeID

View File

@ -91,116 +91,6 @@ func (node *QueryNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.Stri
}, nil
}
// AddQueryChannel watch queryChannel of the collection to receive query message
func (node *QueryNode) AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelRequest) (*commonpb.Status, error) {
code := node.stateCode.Load().(internalpb.StateCode)
if code != internalpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
}
return status, nil
}
dct := &addQueryChannelTask{
baseTask: baseTask{
ctx: ctx,
done: make(chan error),
},
req: in,
node: node,
}
err := node.scheduler.queue.Enqueue(dct)
if err != nil {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
}
log.Warn(err.Error())
return status, nil
}
log.Info("addQueryChannelTask Enqueue done",
zap.Int64("collectionID", in.CollectionID),
zap.String("queryChannel", in.QueryChannel),
zap.String("queryResultChannel", in.QueryResultChannel),
)
waitFunc := func() (*commonpb.Status, error) {
err = dct.WaitToFinish()
if err != nil {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
}
log.Warn(err.Error())
return status, nil
}
log.Info("addQueryChannelTask WaitToFinish done",
zap.Int64("collectionID", in.CollectionID),
zap.String("queryChannel", in.QueryChannel),
zap.String("queryResultChannel", in.QueryResultChannel),
)
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
return waitFunc()
}
// RemoveQueryChannel remove queryChannel of the collection to stop receiving query message
func (node *QueryNode) RemoveQueryChannel(ctx context.Context, in *queryPb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
// if node.searchService == nil || node.searchService.searchMsgStream == nil {
// errMsg := "null search service or null search result message stream"
// status := &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
// Reason: errMsg,
// }
// return status, errors.New(errMsg)
// }
// searchStream, ok := node.searchService.searchMsgStream.(*pulsarms.PulsarMsgStream)
// if !ok {
// errMsg := "type assertion failed for search message stream"
// status := &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
// Reason: errMsg,
// }
// return status, errors.New(errMsg)
// }
// resultStream, ok := node.searchService.searchResultMsgStream.(*pulsarms.PulsarMsgStream)
// if !ok {
// errMsg := "type assertion failed for search result message stream"
// status := &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
// Reason: errMsg,
// }
// return status, errors.New(errMsg)
// }
// // remove request channel
// consumeChannels := []string{in.RequestChannelID}
// consumeSubName := Params.MsgChannelSubName
// // TODO: searchStream.RemovePulsarConsumers(producerChannels)
// searchStream.AsConsumer(consumeChannels, consumeSubName)
// // remove result channel
// producerChannels := []string{in.ResultChannelID}
// // TODO: resultStream.RemovePulsarProducer(producerChannels)
// resultStream.AsProducer(producerChannels)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
return status, nil
}
// WatchDmChannels create consumers on dmChannels to receive Incremental datawhich is the important part of real-time query
func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error) {
code := node.stateCode.Load().(internalpb.StateCode)

View File

@ -84,44 +84,6 @@ func TestImpl_GetStatisticsChannel(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
}
func TestImpl_AddQueryChannel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
req := &queryPb.AddQueryChannelRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
MsgID: rand.Int63(),
},
NodeID: 0,
CollectionID: defaultCollectionID,
QueryChannel: genQueryChannel(),
QueryResultChannel: genQueryResultChannel(),
}
status, err := node.AddQueryChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
node.UpdateStateCode(internalpb.StateCode_Abnormal)
status, err = node.AddQueryChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
}
func TestImpl_RemoveQueryChannel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
status, err := node.RemoveQueryChannel(ctx, nil)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
}
func TestImpl_WatchDmChannels(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@ -48,13 +48,6 @@ func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest,
ID: node.session.ServerID,
},
SystemConfigurations: metricsinfo.QueryNodeConfiguration{
SearchReceiveBufSize: Params.QueryNodeCfg.SearchReceiveBufSize,
SearchPulsarBufSize: Params.QueryNodeCfg.SearchPulsarBufSize,
SearchResultReceiveBufSize: Params.QueryNodeCfg.SearchResultReceiveBufSize,
RetrieveReceiveBufSize: Params.QueryNodeCfg.RetrieveReceiveBufSize,
RetrievePulsarBufSize: Params.QueryNodeCfg.RetrievePulsarBufSize,
RetrieveResultReceiveBufSize: Params.QueryNodeCfg.RetrieveResultReceiveBufSize,
SimdType: Params.CommonCfg.SimdType,
},
}

View File

@ -1537,11 +1537,6 @@ func genRetrieveMsg(schema *schemapb.CollectionSchema) (*msgstream.RetrieveMsg,
return msg, nil
}
func genQueryChannel() Channel {
const queryChannelPrefix = "query-node-unittest-query-channel-"
return queryChannelPrefix + strconv.Itoa(rand.Int())
}
func genQueryResultChannel() Channel {
const queryResultChannelPrefix = "query-node-unittest-query-result-channel-"
return queryResultChannelPrefix + strconv.Itoa(rand.Int())

View File

@ -1,148 +0,0 @@
package querynode
import (
"fmt"
"sync"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
)
// queryChannel simple query channel wrapper in query shard service
type queryChannel struct {
closeCh chan struct{}
collectionID int64
streaming ReplicaInterface
queryMsgStream msgstream.MsgStream
shardCluster *ShardClusterService
asConsumeOnce sync.Once
closeOnce sync.Once
}
// NewQueryChannel create a query channel with provided shardCluster, query msgstream and collection id
func NewQueryChannel(collectionID int64, scs *ShardClusterService, qms msgstream.MsgStream, streaming ReplicaInterface) *queryChannel {
return &queryChannel{
closeCh: make(chan struct{}),
collectionID: collectionID,
streaming: streaming,
queryMsgStream: qms,
shardCluster: scs,
}
}
// AsConsumer do AsConsumer for query msgstream and seek if position is not nil
func (qc *queryChannel) AsConsumer(channelName string, subName string, position *internalpb.MsgPosition) error {
var err error
qc.asConsumeOnce.Do(func() {
qc.queryMsgStream.AsConsumer([]string{channelName}, subName)
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
if position == nil || len(position.MsgID) == 0 {
log.Info("QueryNode AsConsumer", zap.String("channel", channelName), zap.String("sub name", subName))
} else {
err = qc.queryMsgStream.Seek([]*internalpb.MsgPosition{position})
if err == nil {
log.Info("querynode seek query channel: ", zap.Any("consumeChannel", channelName),
zap.String("seek position", string(position.MsgID)))
}
}
})
return err
}
// Start start a goroutine for consume msg
func (qc *queryChannel) Start() {
go qc.queryMsgStream.Start()
go qc.consumeQuery()
}
// Stop all workers and msgstream
func (qc *queryChannel) Stop() {
qc.closeOnce.Do(func() {
qc.queryMsgStream.Close()
close(qc.closeCh)
})
}
func (qc *queryChannel) consumeQuery() {
for {
select {
case <-qc.closeCh:
log.Info("query channel worker quit", zap.Int64("collection id", qc.collectionID))
return
case msgPack, ok := <-qc.queryMsgStream.Chan():
if !ok {
log.Warn("Receive Query Msg from chan failed", zap.Int64("collectionID", qc.collectionID))
return
}
if !ok || msgPack == nil || len(msgPack.Msgs) == 0 {
continue
}
for _, msg := range msgPack.Msgs {
switch sm := msg.(type) {
case *msgstream.SealedSegmentsChangeInfoMsg:
qc.adjustByChangeInfo(sm)
default:
log.Warn("ignore msgs other than SegmentChangeInfo", zap.Any("msgType", msg.Type().String()))
}
}
}
}
}
func (qc *queryChannel) adjustByChangeInfo(msg *msgstream.SealedSegmentsChangeInfoMsg) {
for _, info := range msg.Infos {
// precheck collection id, if not the same collection, skip
for _, segment := range info.OnlineSegments {
if segment.CollectionID != qc.collectionID {
return
}
}
for _, segment := range info.OfflineSegments {
if segment.CollectionID != qc.collectionID {
return
}
}
// process change in shard cluster
qc.shardCluster.HandoffSegments(qc.collectionID, info)
// for OnlineSegments:
for _, segment := range info.OnlineSegments {
/*
// 1. update global sealed segments
q.globalSegmentManager.addGlobalSegmentInfo(segment)
// 2. update excluded segment, cluster have been loaded sealed segments,
// so we need to avoid getting growing segment from flow graph.*/
qc.streaming.addExcludedSegments(segment.CollectionID, []*datapb.SegmentInfo{
{
ID: segment.SegmentID,
CollectionID: segment.CollectionID,
PartitionID: segment.PartitionID,
InsertChannel: segment.DmChannel,
NumOfRows: segment.NumRows,
// TODO: add status, remove query pb segment status, use common pb segment status?
DmlPosition: &internalpb.MsgPosition{
// use max timestamp to filter out dm messages
Timestamp: typeutil.MaxTimestamp,
},
},
})
}
log.Info("Successfully changed global sealed segment info ",
zap.Int64("collection ", qc.collectionID),
zap.Any("online segments ", info.OnlineSegments),
zap.Any("offline segments ", info.OfflineSegments))
}
}

View File

@ -1,157 +0,0 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querynode
import (
"math/rand"
"testing"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/mq/msgstream/mqwrapper"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/stretchr/testify/mock"
)
type mockQueryMsgStream struct {
mock.Mock
}
func (m *mockQueryMsgStream) Start() {
m.Called()
}
func (m *mockQueryMsgStream) Close() {
m.Called()
}
func (m *mockQueryMsgStream) AsProducer(channels []string) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) Produce(_ *msgstream.MsgPack) error {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) SetRepackFunc(repackFunc msgstream.RepackFunc) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) ComputeProduceChannelIndexes(tsMsgs []msgstream.TsMsg) [][]int32 {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) GetProduceChannels() []string {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) ProduceMark(_ *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) Broadcast(_ *msgstream.MsgPack) error {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) BroadcastMark(_ *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) AsConsumer(channels []string, subName string) {
m.Called(channels, subName)
}
func (m *mockQueryMsgStream) AsConsumerWithPosition(channels []string, subName string, position mqwrapper.SubscriptionInitialPosition) {
panic("not implemented") // TODO: Implement
}
func (m *mockQueryMsgStream) Chan() <-chan *msgstream.MsgPack {
args := m.Called()
return args.Get(0).(<-chan *msgstream.MsgPack)
}
func (m *mockQueryMsgStream) Seek(offset []*msgstream.MsgPosition) error {
args := m.Called(offset)
return args.Error(0)
}
func (m *mockQueryMsgStream) GetLatestMsgID(channel string) (msgstream.MessageID, error) {
panic("not implemented") // TODO: Implement
}
func TestQueryChannel_AsConsumer(t *testing.T) {
t.Run("AsConsumer with no seek", func(t *testing.T) {
mqs := &mockQueryMsgStream{}
mqs.On("Close").Return()
qc := NewQueryChannel(defaultCollectionID, nil, mqs, nil)
mqs.On("AsConsumer", []string{defaultDMLChannel}, defaultSubName).Return()
qc.AsConsumer(defaultDMLChannel, defaultSubName, nil)
qc.Stop()
mqs.AssertCalled(t, "AsConsumer", []string{defaultDMLChannel}, defaultSubName)
mqs.AssertNotCalled(t, "Seek")
mqs.AssertExpectations(t)
qc.Stop()
})
t.Run("AsConsumer with bad position", func(t *testing.T) {
mqs := &mockQueryMsgStream{}
mqs.On("Close").Return()
qc := NewQueryChannel(defaultCollectionID, nil, mqs, nil)
mqs.On("AsConsumer", []string{defaultDMLChannel}, defaultSubName).Return()
qc.AsConsumer(defaultDMLChannel, defaultSubName, &internalpb.MsgPosition{})
qc.Stop()
mqs.AssertCalled(t, "AsConsumer", []string{defaultDMLChannel}, defaultSubName)
mqs.AssertNotCalled(t, "Seek")
mqs.AssertExpectations(t)
})
t.Run("AsConsumer with position", func(t *testing.T) {
mqs := &mockQueryMsgStream{}
mqs.On("Close").Return()
qc := NewQueryChannel(defaultCollectionID, nil, mqs, nil)
msgID := make([]byte, 8)
rand.Read(msgID)
pos := &internalpb.MsgPosition{MsgID: msgID}
mqs.On("AsConsumer", []string{defaultDMLChannel}, defaultSubName).Return()
mqs.On("Seek", []*internalpb.MsgPosition{pos}).Return(nil)
qc.AsConsumer(defaultDMLChannel, defaultSubName, pos)
qc.Stop()
mqs.AssertCalled(t, "AsConsumer", []string{defaultDMLChannel}, defaultSubName)
mqs.AssertCalled(t, "Seek", []*internalpb.MsgPosition{pos})
mqs.AssertExpectations(t)
})
}

View File

@ -166,17 +166,6 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
assert.NoError(t, err)
}
func initSearchChannel(ctx context.Context, searchChan string, resultChan string, node *QueryNode) {
searchReq := &querypb.AddQueryChannelRequest{
QueryChannel: searchChan,
QueryResultChannel: resultChan,
}
_, err := node.AddQueryChannel(ctx, searchReq)
if err != nil {
panic(err)
}
}
func newQueryNodeMock() *QueryNode {
var ctx context.Context

View File

@ -35,9 +35,6 @@ type queryShardService struct {
queryShardsMu sync.Mutex // guards queryShards
queryShards map[Channel]*queryShard // Virtual Channel -> *queryShard
queryChannelMu sync.Mutex // guards queryChannels
queryChannels map[int64]*queryChannel // Collection ID -> query channel
factory dependency.Factory
historical ReplicaInterface
@ -63,7 +60,6 @@ func newQueryShardService(ctx context.Context, historical ReplicaInterface, stre
ctx: queryShardServiceCtx,
cancel: queryShardServiceCancel,
queryShards: make(map[Channel]*queryShard),
queryChannels: make(map[int64]*queryChannel),
historical: historical,
streaming: streaming,
tSafeReplica: tSafeReplica,
@ -142,29 +138,7 @@ func (q *queryShardService) close() {
}
}
func (q *queryShardService) getQueryChannel(collectionID int64) *queryChannel {
q.queryChannelMu.Lock()
defer q.queryChannelMu.Unlock()
qc, ok := q.queryChannels[collectionID]
if !ok {
queryStream, _ := q.factory.NewQueryMsgStream(q.ctx)
qc = NewQueryChannel(collectionID, q.shardClusterService, queryStream, q.streaming)
q.queryChannels[collectionID] = qc
}
return qc
}
func (q *queryShardService) releaseCollection(collectionID int64) {
q.queryChannelMu.Lock()
qc, ok := q.queryChannels[collectionID]
if ok && qc != nil {
qc.Stop()
delete(q.queryChannels, collectionID)
}
q.queryChannelMu.Unlock()
q.queryShardsMu.Lock()
for channel, queryShard := range q.queryShards {
if queryShard.collectionID == collectionID {

View File

@ -84,12 +84,6 @@ func (b *baseTask) Notify(err error) {
b.done <- err
}
type addQueryChannelTask struct {
baseTask
req *queryPb.AddQueryChannelRequest
node *QueryNode
}
type watchDmChannelsTask struct {
baseTask
req *queryPb.WatchDmChannelsRequest
@ -120,40 +114,6 @@ type releasePartitionsTask struct {
node *QueryNode
}
// addQueryChannelTask
func (r *addQueryChannelTask) Execute(ctx context.Context) error {
log.Info("Execute addQueryChannelTask",
zap.Any("collectionID", r.req.CollectionID))
collectionID := r.req.CollectionID
if r.node.queryShardService == nil {
return fmt.Errorf("null query shard service, collectionID %d", collectionID)
}
qc := r.node.queryShardService.getQueryChannel(collectionID)
log.Info("add query channel for collection", zap.Int64("collectionID", collectionID))
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.GetNodeID())
err := qc.AsConsumer(r.req.QueryChannel, consumeSubName, r.req.SeekPosition)
if err != nil {
log.Warn("query channel as consumer failed", zap.Int64("collectionID", collectionID), zap.String("channel", r.req.QueryChannel), zap.Error(err))
return err
}
// init global sealed segments
/*
for _, segment := range r.req.GlobalSealedSegments {
sc.globalSegmentManager.addGlobalSegmentInfo(segment)
}*/
qc.Start()
log.Info("addQueryChannelTask done",
zap.Any("collectionID", r.req.CollectionID),
)
return nil
}
// watchDmChannelsTask
func (w *watchDmChannelsTask) Execute(ctx context.Context) (err error) {
collectionID := w.req.CollectionID

View File

@ -33,146 +33,6 @@ import (
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func TestTask_AddQueryChannel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
genAddQueryChanelRequest := func() *querypb.AddQueryChannelRequest {
return &querypb.AddQueryChannelRequest{
Base: genCommonMsgBase(commonpb.MsgType_LoadCollection),
NodeID: 0,
CollectionID: defaultCollectionID,
QueryChannel: genQueryChannel(),
QueryResultChannel: genQueryResultChannel(),
}
}
t.Run("test timestamp", func(t *testing.T) {
timestamp := Timestamp(1000)
task := addQueryChannelTask{
baseTask: baseTask{
ts: timestamp,
},
req: genAddQueryChanelRequest(),
}
resT := task.Timestamp()
assert.Equal(t, timestamp, resT)
})
t.Run("test OnEnqueue", func(t *testing.T) {
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
}
err := task.OnEnqueue()
assert.NoError(t, err)
task.req.Base = nil
err = task.OnEnqueue()
assert.NoError(t, err)
})
t.Run("test execute", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
err = task.Execute(ctx)
assert.NoError(t, err)
})
t.Run("test execute has queryCollection", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
err = task.Execute(ctx)
assert.NoError(t, err)
})
t.Run("test execute nil query service", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
node.queryShardService = nil
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
err = task.Execute(ctx)
assert.Error(t, err)
})
t.Run("test execute init global sealed segments", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
task.req.GlobalSealedSegments = []*querypb.SegmentInfo{{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
}}
err = task.Execute(ctx)
assert.NoError(t, err)
})
t.Run("test execute not init global sealed segments", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
task.req.GlobalSealedSegments = []*querypb.SegmentInfo{{
SegmentID: defaultSegmentID,
CollectionID: 1000,
PartitionID: defaultPartitionID,
}}
err = task.Execute(ctx)
assert.NoError(t, err)
})
/*
t.Run("test execute seek error", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
t.Run("test execute seek error", func(t *testing.T) {
node, err := genSimpleQueryNode(ctx)
assert.NoError(t, err)
position := &internalpb.MsgPosition{
ChannelName: genQueryChannel(),
MsgID: []byte{1, 2, 3, 4, 5, 6, 7, 8},
MsgGroup: defaultSubName,
Timestamp: 0,
}
task := addQueryChannelTask{
req: genAddQueryChanelRequest(),
node: node,
}
task.req.SeekPosition = position
err = task.Execute(ctx)
assert.Error(t, err)
})*/
}
func TestTask_watchDmChannelsTask(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@ -1187,16 +1187,6 @@ type QueryNode interface {
Component
TimeTickProvider
// AddQueryChannel notifies QueryNode to subscribe a query channel and be a producer of a query result channel.
// `ctx` is the context to control request deadline and cancellation.
// `req` contains the request params, which are collection id, query channel and query result channel.
//
// Return UnexpectedError code in status:
// If QueryNode isn't in HEALTHY: states not HEALTHY or dynamic checks not HEALTHY.
// Return Success code in status:
// Subscribe a query channel and be a producer of a query result channel.
AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error)
RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error)
WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error)
WatchDeltaChannels(ctx context.Context, req *querypb.WatchDeltaChannelsRequest) (*commonpb.Status, error)
// LoadSegments notifies QueryNode to load the sealed segments from storage. The load tasks are sync to this
@ -1244,7 +1234,6 @@ type QueryCoord interface {
ShowPartitions(ctx context.Context, req *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error)
LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error)
ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error)
CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error)
GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error)
GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error)
LoadBalance(ctx context.Context, req *querypb.LoadBalanceRequest) (*commonpb.Status, error)

View File

@ -92,14 +92,6 @@ type BaseComponentInfos struct {
// QueryNodeConfiguration records the configuration of QueryNode.
type QueryNodeConfiguration struct {
SearchReceiveBufSize int64 `json:"search_receive_buf_size"`
SearchPulsarBufSize int64 `json:"search_pulsar_buf_size"`
SearchResultReceiveBufSize int64 `json:"search_result_receive_buf_size"`
RetrieveReceiveBufSize int64 `json:"retrieve_receive_buf_size"`
RetrievePulsarBufSize int64 `json:"retrieve_pulsar_buf_size"`
RetrieveResultReceiveBufSize int64 `json:"retrieve_result_receive_buf_size"`
SimdType string `json:"simd_type"`
}

View File

@ -87,13 +87,6 @@ func TestQueryNodeInfos_Codec(t *testing.T) {
ID: 1,
},
SystemConfigurations: QueryNodeConfiguration{
SearchReceiveBufSize: 1024,
SearchPulsarBufSize: 1024,
SearchResultReceiveBufSize: 1024,
RetrieveReceiveBufSize: 1024,
RetrievePulsarBufSize: 1024,
RetrieveResultReceiveBufSize: 1024,
SimdType: "avx2",
},
}

View File

@ -70,10 +70,6 @@ func (m *GrpcQueryCoordClient) ReleaseCollection(ctx context.Context, in *queryp
return &commonpb.Status{}, m.Err
}
func (m *GrpcQueryCoordClient) CreateQueryChannel(ctx context.Context, in *querypb.CreateQueryChannelRequest, opts ...grpc.CallOption) (*querypb.CreateQueryChannelResponse, error) {
return &querypb.CreateQueryChannelResponse{}, m.Err
}
func (m *GrpcQueryCoordClient) GetPartitionStates(ctx context.Context, in *querypb.GetPartitionStatesRequest, opts ...grpc.CallOption) (*querypb.GetPartitionStatesResponse, error) {
return &querypb.GetPartitionStatesResponse{}, m.Err
}

View File

@ -44,14 +44,6 @@ func (m *GrpcQueryNodeClient) GetStatisticsChannel(ctx context.Context, in *inte
return &milvuspb.StringResponse{}, m.Err
}
func (m *GrpcQueryNodeClient) AddQueryChannel(ctx context.Context, in *querypb.AddQueryChannelRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
return &commonpb.Status{}, m.Err
}
func (m *GrpcQueryNodeClient) RemoveQueryChannel(ctx context.Context, in *querypb.RemoveQueryChannelRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
return &commonpb.Status{}, m.Err
}
func (m *GrpcQueryNodeClient) WatchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
return &commonpb.Status{}, m.Err
}

View File

@ -61,14 +61,6 @@ func (q QueryNodeClient) GetTimeTickChannel(ctx context.Context) (*milvuspb.Stri
return q.grpcClient.GetTimeTickChannel(ctx, &internalpb.GetTimeTickChannelRequest{})
}
func (q QueryNodeClient) AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
return q.grpcClient.AddQueryChannel(ctx, req)
}
func (q QueryNodeClient) RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
return q.grpcClient.RemoveQueryChannel(ctx, req)
}
func (q QueryNodeClient) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
return q.grpcClient.WatchDmChannels(ctx, req)
}

View File

@ -679,20 +679,6 @@ type queryNodeConfig struct {
FlowGraphMaxQueueLength int32
FlowGraphMaxParallelism int32
// search
SearchChannelNames []string
SearchResultChannelNames []string
SearchReceiveBufSize int64
SearchPulsarBufSize int64
SearchResultReceiveBufSize int64
// Retrieve
RetrieveChannelNames []string
RetrieveResultChannelNames []string
RetrieveReceiveBufSize int64
RetrievePulsarBufSize int64
RetrieveResultReceiveBufSize int64
// stats
StatsPublishInterval int
@ -728,10 +714,6 @@ func (p *queryNodeConfig) init(base *BaseTable) {
p.initFlowGraphMaxQueueLength()
p.initFlowGraphMaxParallelism()
p.initSearchReceiveBufSize()
p.initSearchPulsarBufSize()
p.initSearchResultReceiveBufSize()
p.initStatsPublishInterval()
p.initSmallIndexParams()
@ -789,19 +771,6 @@ func (p *queryNodeConfig) initFlowGraphMaxParallelism() {
p.FlowGraphMaxParallelism = p.Base.ParseInt32WithDefault("queryNode.dataSync.flowGraph.maxParallelism", 1024)
}
// msgStream
func (p *queryNodeConfig) initSearchReceiveBufSize() {
p.SearchReceiveBufSize = p.Base.ParseInt64WithDefault("queryNode.msgStream.search.recvBufSize", 512)
}
func (p *queryNodeConfig) initSearchPulsarBufSize() {
p.SearchPulsarBufSize = p.Base.ParseInt64WithDefault("queryNode.msgStream.search.pulsarBufSize", 512)
}
func (p *queryNodeConfig) initSearchResultReceiveBufSize() {
p.SearchResultReceiveBufSize = p.Base.ParseInt64WithDefault("queryNode.msgStream.searchResult.recvBufSize", 64)
}
func (p *queryNodeConfig) initSmallIndexParams() {
p.ChunkRows = p.Base.ParseInt64WithDefault("queryNode.segcore.chunkRows", 32768)
if p.ChunkRows < 1024 {

View File

@ -225,15 +225,6 @@ func TestComponentParam(t *testing.T) {
interval := Params.StatsPublishInterval
assert.Equal(t, 1000, interval)
bufSize := Params.SearchReceiveBufSize
assert.Equal(t, int64(512), bufSize)
bufSize = Params.SearchResultReceiveBufSize
assert.Equal(t, int64(64), bufSize)
bufSize = Params.SearchPulsarBufSize
assert.Equal(t, int64(512), bufSize)
length := Params.FlowGraphMaxQueueLength
assert.Equal(t, int32(1024), length)