mirror of https://github.com/milvus-io/milvus.git
parent
149d92b6f1
commit
7b9fdd7f29
1
go.mod
1
go.mod
|
@ -22,6 +22,7 @@ require (
|
|||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/onsi/gomega v1.10.5 // indirect
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20200813121455-4a6760c71486
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.6.0 // indirect
|
||||
|
|
5
go.sum
5
go.sum
|
@ -181,6 +181,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU=
|
||||
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
|
@ -297,6 +298,9 @@ github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
|||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
|
||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20200813121455-4a6760c71486 h1:K35HCWaOTJIPW6cDHK4yj3QfRY/NhE0pBbfoc0M2NMQ=
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20200813121455-4a6760c71486/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/ozonru/etcd v3.3.20-grpc1.27-origmodule+incompatible h1:CAG0PUvo1fen+ZEfxKJjFIc8GuuN5RuaBuCAuaP2Hno=
|
||||
|
@ -472,6 +476,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package datanode
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
)
|
||||
|
@ -22,7 +24,8 @@ func newAllocatorImpl(s MasterServiceInterface) *allocatorImpl {
|
|||
}
|
||||
|
||||
func (alloc *allocatorImpl) allocID() (UniqueID, error) {
|
||||
resp, err := alloc.masterService.AllocID(&masterpb.IDRequest{
|
||||
ctx := context.TODO()
|
||||
resp, err := alloc.masterService.AllocID(ctx, &masterpb.IDRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kRequestID,
|
||||
MsgID: 1, // GOOSE TODO
|
||||
|
|
|
@ -25,34 +25,34 @@ const (
|
|||
)
|
||||
|
||||
type (
|
||||
Inteface interface {
|
||||
Interface interface {
|
||||
// Service
|
||||
Init() error
|
||||
Start() error
|
||||
Stop() error
|
||||
|
||||
// Component
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetTimeTickChannel() (*milvuspb.StringResponse, error) // This function has no effect
|
||||
GetStatisticsChannel() (*milvuspb.StringResponse, error) // This function has no effect
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) // This function has no effect
|
||||
GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) // This function has no effect
|
||||
|
||||
WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error)
|
||||
FlushSegments(in *datapb.FlushSegRequest) (*commonpb.Status, error)
|
||||
WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error)
|
||||
FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) error
|
||||
|
||||
SetMasterServiceInterface(ms MasterServiceInterface) error
|
||||
SetDataServiceInterface(ds DataServiceInterface) error
|
||||
SetMasterServiceInterface(ctx context.Context, ms MasterServiceInterface) error
|
||||
SetDataServiceInterface(ctx context.Context, ds DataServiceInterface) error
|
||||
}
|
||||
|
||||
DataServiceInterface interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
RegisterNode(ctx context.Context, req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
|
||||
}
|
||||
|
||||
MasterServiceInterface interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error)
|
||||
ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error)
|
||||
ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
}
|
||||
|
||||
DataNode struct {
|
||||
|
@ -98,7 +98,7 @@ func NewDataNode(ctx context.Context, factory msgstream.Factory) *DataNode {
|
|||
return node
|
||||
}
|
||||
|
||||
func (node *DataNode) SetMasterServiceInterface(ms MasterServiceInterface) error {
|
||||
func (node *DataNode) SetMasterServiceInterface(ctx context.Context, ms MasterServiceInterface) error {
|
||||
switch {
|
||||
case ms == nil, node.masterService != nil:
|
||||
return errors.New("Nil parameter or repeatly set")
|
||||
|
@ -108,7 +108,7 @@ func (node *DataNode) SetMasterServiceInterface(ms MasterServiceInterface) error
|
|||
}
|
||||
}
|
||||
|
||||
func (node *DataNode) SetDataServiceInterface(ds DataServiceInterface) error {
|
||||
func (node *DataNode) SetDataServiceInterface(ctx context.Context, ds DataServiceInterface) error {
|
||||
switch {
|
||||
case ds == nil, node.dataService != nil:
|
||||
return errors.New("Nil parameter or repeatly set")
|
||||
|
@ -120,6 +120,7 @@ func (node *DataNode) SetDataServiceInterface(ds DataServiceInterface) error {
|
|||
|
||||
// Suppose dataservice is in INITIALIZING
|
||||
func (node *DataNode) Init() error {
|
||||
ctx := context.Background()
|
||||
|
||||
req := &datapb.RegisterNodeRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -132,7 +133,7 @@ func (node *DataNode) Init() error {
|
|||
},
|
||||
}
|
||||
|
||||
resp, err := node.dataService.RegisterNode(req)
|
||||
resp, err := node.dataService.RegisterNode(ctx, req)
|
||||
if err != nil {
|
||||
return errors.Errorf("Register node failed: %v", err)
|
||||
}
|
||||
|
@ -187,7 +188,7 @@ func (node *DataNode) UpdateStateCode(code internalpb2.StateCode) {
|
|||
node.State.Store(code)
|
||||
}
|
||||
|
||||
func (node *DataNode) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
func (node *DataNode) WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
}
|
||||
|
@ -210,7 +211,7 @@ func (node *DataNode) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*common
|
|||
}
|
||||
}
|
||||
|
||||
func (node *DataNode) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (node *DataNode) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
log.Debug("DataNode current state", zap.Any("State", node.State.Load()))
|
||||
states := &internalpb2.ComponentStates{
|
||||
State: &internalpb2.ComponentInfo{
|
||||
|
@ -224,7 +225,7 @@ func (node *DataNode) GetComponentStates() (*internalpb2.ComponentStates, error)
|
|||
return states, nil
|
||||
}
|
||||
|
||||
func (node *DataNode) FlushSegments(in *datapb.FlushSegRequest) error {
|
||||
func (node *DataNode) FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) error {
|
||||
ids := make([]UniqueID, 0)
|
||||
ids = append(ids, in.SegmentIDs...)
|
||||
|
||||
|
@ -253,10 +254,22 @@ func (node *DataNode) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (node *DataNode) GetTimeTickChannel() (string, error) {
|
||||
return "Nothing happened", nil
|
||||
func (node *DataNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (node *DataNode) GetStatisticsChannel() (string, error) {
|
||||
return "Nothing happened", nil
|
||||
func (node *DataNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package datanode
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/rand"
|
||||
|
@ -416,7 +417,7 @@ func (m *MasterServiceFactory) setCollectionName(name string) {
|
|||
m.collectionName = name
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
func (m *MasterServiceFactory) AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
resp := &masterpb.IDResponse{
|
||||
Status: &commonpb.Status{},
|
||||
ID: m.ID,
|
||||
|
@ -424,7 +425,7 @@ func (m *MasterServiceFactory) AllocID(in *masterpb.IDRequest) (*masterpb.IDResp
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
func (m *MasterServiceFactory) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
resp := &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{},
|
||||
CollectionNames: []string{m.collectionName},
|
||||
|
@ -432,7 +433,7 @@ func (m *MasterServiceFactory) ShowCollections(in *milvuspb.ShowCollectionReques
|
|||
return resp, nil
|
||||
|
||||
}
|
||||
func (m *MasterServiceFactory) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
func (m *MasterServiceFactory) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
f := MetaFactory{}
|
||||
meta := f.CollectionMetaFactory(m.collectionID, m.collectionName)
|
||||
resp := &milvuspb.DescribeCollectionResponse{
|
||||
|
@ -443,7 +444,7 @@ func (m *MasterServiceFactory) DescribeCollection(in *milvuspb.DescribeCollectio
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (m *MasterServiceFactory) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return &internalpb2.ComponentStates{
|
||||
State: &internalpb2.ComponentInfo{},
|
||||
SubcomponentStates: make([]*internalpb2.ComponentInfo, 0),
|
||||
|
|
|
@ -30,20 +30,21 @@ func newMetaService(ctx context.Context, replica Replica, m MasterServiceInterfa
|
|||
|
||||
func (mService *metaService) init() {
|
||||
log.Debug("Initing meta ...")
|
||||
err := mService.loadCollections()
|
||||
ctx := context.Background()
|
||||
err := mService.loadCollections(ctx)
|
||||
if err != nil {
|
||||
log.Error("metaService init failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) loadCollections() error {
|
||||
names, err := mService.getCollectionNames()
|
||||
func (mService *metaService) loadCollections(ctx context.Context) error {
|
||||
names, err := mService.getCollectionNames(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
err := mService.createCollection(name)
|
||||
err := mService.createCollection(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -51,7 +52,7 @@ func (mService *metaService) loadCollections() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mService *metaService) getCollectionNames() ([]string, error) {
|
||||
func (mService *metaService) getCollectionNames(ctx context.Context) ([]string, error) {
|
||||
req := &milvuspb.ShowCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
|
@ -62,14 +63,14 @@ func (mService *metaService) getCollectionNames() ([]string, error) {
|
|||
DbName: "default", // GOOSE TODO
|
||||
}
|
||||
|
||||
response, err := mService.masterClient.ShowCollections(req)
|
||||
response, err := mService.masterClient.ShowCollections(ctx, req)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Get collection names from master service wrong: %v", err)
|
||||
}
|
||||
return response.GetCollectionNames(), nil
|
||||
}
|
||||
|
||||
func (mService *metaService) createCollection(name string) error {
|
||||
func (mService *metaService) createCollection(ctx context.Context, name string) error {
|
||||
log.Debug("Describing collections")
|
||||
req := &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -82,7 +83,7 @@ func (mService *metaService) createCollection(name string) error {
|
|||
CollectionName: name,
|
||||
}
|
||||
|
||||
response, err := mService.masterClient.DescribeCollection(req)
|
||||
response, err := mService.masterClient.DescribeCollection(ctx, req)
|
||||
if err != nil {
|
||||
return errors.Errorf("Describe collection %v from master service wrong: %v", name, err)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestMetaService_All(t *testing.T) {
|
|||
metaService := newMetaService(ctx, replica, mFactory)
|
||||
|
||||
t.Run("Test getCollectionNames", func(t *testing.T) {
|
||||
names, err := metaService.getCollectionNames()
|
||||
names, err := metaService.getCollectionNames(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(names))
|
||||
assert.Equal(t, "a-collection", names[0])
|
||||
|
@ -28,7 +28,7 @@ func TestMetaService_All(t *testing.T) {
|
|||
hasColletion := metaService.replica.hasCollection(0)
|
||||
assert.False(t, hasColletion)
|
||||
|
||||
err := metaService.createCollection("a-collection")
|
||||
err := metaService.createCollection(ctx, "a-collection")
|
||||
assert.NoError(t, err)
|
||||
hasColletion = metaService.replica.hasCollection(0)
|
||||
assert.True(t, hasColletion)
|
||||
|
@ -40,7 +40,7 @@ func TestMetaService_All(t *testing.T) {
|
|||
|
||||
mFactory.setCollectionID(1)
|
||||
mFactory.setCollectionName("a-collection-1")
|
||||
err := metaService.loadCollections()
|
||||
err := metaService.loadCollections(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hasColletion = metaService.replica.hasCollection(1)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package dataservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
)
|
||||
|
@ -21,7 +23,8 @@ func newAllocatorImpl(masterClient MasterClient) *allocatorImpl {
|
|||
}
|
||||
|
||||
func (allocator *allocatorImpl) allocTimestamp() (Timestamp, error) {
|
||||
resp, err := allocator.masterClient.AllocTimestamp(&masterpb.TsoRequest{
|
||||
ctx := context.TODO()
|
||||
resp, err := allocator.masterClient.AllocTimestamp(ctx, &masterpb.TsoRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgID: -1, // todo add msg id
|
||||
|
@ -37,7 +40,8 @@ func (allocator *allocatorImpl) allocTimestamp() (Timestamp, error) {
|
|||
}
|
||||
|
||||
func (allocator *allocatorImpl) allocID() (UniqueID, error) {
|
||||
resp, err := allocator.masterClient.AllocID(&masterpb.IDRequest{
|
||||
ctx := context.TODO()
|
||||
resp, err := allocator.masterClient.AllocID(ctx, &masterpb.IDRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgID: -1, // todo add msg id
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dataservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
@ -77,6 +78,7 @@ func (c *dataNodeCluster) GetNodeIDs() []int64 {
|
|||
}
|
||||
|
||||
func (c *dataNodeCluster) WatchInsertChannels(channels []string) {
|
||||
ctx := context.TODO()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var groups [][]string
|
||||
|
@ -90,7 +92,7 @@ func (c *dataNodeCluster) WatchInsertChannels(channels []string) {
|
|||
groups[i%length] = append(groups[i%length], channel)
|
||||
}
|
||||
for i, group := range groups {
|
||||
resp, err := c.nodes[i].client.WatchDmChannels(&datapb.WatchDmChannelRequest{
|
||||
resp, err := c.nodes[i].client.WatchDmChannels(ctx, &datapb.WatchDmChannelRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
MsgID: -1, // todo
|
||||
|
@ -107,12 +109,12 @@ func (c *dataNodeCluster) WatchInsertChannels(channels []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *dataNodeCluster) GetDataNodeStates() ([]*internalpb2.ComponentInfo, error) {
|
||||
func (c *dataNodeCluster) GetDataNodeStates(ctx context.Context) ([]*internalpb2.ComponentInfo, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
ret := make([]*internalpb2.ComponentInfo, 0)
|
||||
for _, node := range c.nodes {
|
||||
states, err := node.client.GetComponentStates(&commonpb.Empty{})
|
||||
states, err := node.client.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
if err != nil {
|
||||
log.Error("get component states error", zap.Stringer("dataNode", node), zap.Error(err))
|
||||
continue
|
||||
|
@ -123,10 +125,11 @@ func (c *dataNodeCluster) GetDataNodeStates() ([]*internalpb2.ComponentInfo, err
|
|||
}
|
||||
|
||||
func (c *dataNodeCluster) FlushSegment(request *datapb.FlushSegRequest) {
|
||||
ctx := context.TODO()
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
for _, node := range c.nodes {
|
||||
if _, err := node.client.FlushSegments(request); err != nil {
|
||||
if _, err := node.client.FlushSegments(ctx, request); err != nil {
|
||||
log.Error("flush segment err", zap.Stringer("dataNode", node), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dataservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -58,16 +59,16 @@ func newMockDataNodeClient() *mockDataNodeClient {
|
|||
return &mockDataNodeClient{}
|
||||
}
|
||||
|
||||
func (c *mockDataNodeClient) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
func (c *mockDataNodeClient) WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS}, nil
|
||||
}
|
||||
|
||||
func (c *mockDataNodeClient) GetComponentStates(empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
func (c *mockDataNodeClient) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
// todo
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *mockDataNodeClient) FlushSegments(in *datapb.FlushSegRequest) (*commonpb.Status, error) {
|
||||
func (c *mockDataNodeClient) FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) (*commonpb.Status, error) {
|
||||
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -31,36 +31,35 @@ const role = "dataservice"
|
|||
type DataService interface {
|
||||
typeutil.Service
|
||||
typeutil.Component
|
||||
RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
|
||||
Flush(req *datapb.FlushRequest) (*commonpb.Status, error)
|
||||
RegisterNode(ctx context.Context, req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
|
||||
Flush(ctx context.Context, req *datapb.FlushRequest) (*commonpb.Status, error)
|
||||
|
||||
AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
|
||||
ShowSegments(req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error)
|
||||
GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
|
||||
GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
|
||||
GetSegmentInfoChannel() (*milvuspb.StringResponse, error)
|
||||
GetInsertChannels(req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
|
||||
GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error)
|
||||
GetPartitionStatistics(req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error)
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetCount(req *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error)
|
||||
GetSegmentInfo(req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error)
|
||||
AssignSegmentID(ctx context.Context, req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
|
||||
ShowSegments(ctx context.Context, req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error)
|
||||
GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
|
||||
GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
|
||||
GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
|
||||
GetCollectionStatistics(ctx context.Context, req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error)
|
||||
GetPartitionStatistics(ctx context.Context, req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error)
|
||||
GetCount(ctx context.Context, req *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error)
|
||||
GetSegmentInfo(ctx context.Context, req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error)
|
||||
}
|
||||
|
||||
type MasterClient interface {
|
||||
ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
GetDdChannel() (string, error)
|
||||
AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
|
||||
AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error)
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
GetDdChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
|
||||
AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
}
|
||||
|
||||
type DataNodeClient interface {
|
||||
WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error)
|
||||
GetComponentStates(empty *commonpb.Empty) (*internalpb2.ComponentStates, error)
|
||||
FlushSegments(in *datapb.FlushSegRequest) (*commonpb.Status, error)
|
||||
WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error)
|
||||
GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error)
|
||||
FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) (*commonpb.Status, error)
|
||||
Stop() error
|
||||
}
|
||||
|
||||
|
@ -215,19 +214,20 @@ func (s *Server) initMsgProducer() error {
|
|||
}
|
||||
|
||||
func (s *Server) loadMetaFromMaster() error {
|
||||
ctx := context.Background()
|
||||
log.Debug("loading collection meta from master")
|
||||
var err error
|
||||
if err = s.checkMasterIsHealthy(); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.ddChannelName == "" {
|
||||
channel, err := s.masterClient.GetDdChannel()
|
||||
channel, err := s.masterClient.GetDdChannel(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.ddChannelName = channel
|
||||
s.ddChannelName = channel.Value
|
||||
}
|
||||
collections, err := s.masterClient.ShowCollections(&milvuspb.ShowCollectionRequest{
|
||||
collections, err := s.masterClient.ShowCollections(ctx, &milvuspb.ShowCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgID: -1, // todo add msg id
|
||||
|
@ -240,7 +240,7 @@ func (s *Server) loadMetaFromMaster() error {
|
|||
return err
|
||||
}
|
||||
for _, collectionName := range collections.CollectionNames {
|
||||
collection, err := s.masterClient.DescribeCollection(&milvuspb.DescribeCollectionRequest{
|
||||
collection, err := s.masterClient.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
MsgID: -1, // todo
|
||||
|
@ -254,7 +254,7 @@ func (s *Server) loadMetaFromMaster() error {
|
|||
log.Error("describe collection error", zap.String("collectionName", collectionName), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
partitions, err := s.masterClient.ShowPartitions(&milvuspb.ShowPartitionRequest{
|
||||
partitions, err := s.masterClient.ShowPartitions(ctx, &milvuspb.ShowPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowPartitions,
|
||||
MsgID: -1, // todo
|
||||
|
@ -297,7 +297,7 @@ func (s *Server) checkMasterIsHealthy() error {
|
|||
case <-ctx.Done():
|
||||
return fmt.Errorf("master is not healthy")
|
||||
case <-ticker.C:
|
||||
resp, err = s.masterClient.GetComponentStates()
|
||||
resp, err = s.masterClient.GetComponentStates(ctx)
|
||||
if err = VerifyResponse(resp, err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func (s *Server) stopServerLoop() {
|
|||
s.serverLoopWg.Wait()
|
||||
}
|
||||
|
||||
func (s *Server) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (s *Server) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
resp := &internalpb2.ComponentStates{
|
||||
State: &internalpb2.ComponentInfo{
|
||||
NodeID: Params.NodeID,
|
||||
|
@ -435,7 +435,7 @@ func (s *Server) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
|||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
}
|
||||
dataNodeStates, err := s.cluster.GetDataNodeStates()
|
||||
dataNodeStates, err := s.cluster.GetDataNodeStates(ctx)
|
||||
if err != nil {
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
|
@ -445,7 +445,7 @@ func (s *Server) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -454,7 +454,7 @@ func (s *Server) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
||||
func (s *Server) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -463,7 +463,7 @@ func (s *Server) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error) {
|
||||
func (s *Server) RegisterNode(ctx context.Context, req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error) {
|
||||
ret := &datapb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -478,12 +478,12 @@ func (s *Server) RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.Register
|
|||
s.cluster.Register(node)
|
||||
|
||||
if s.ddChannelName == "" {
|
||||
resp, err := s.masterClient.GetDdChannel()
|
||||
resp, err := s.masterClient.GetDdChannel(ctx)
|
||||
if err = VerifyResponse(resp, err); err != nil {
|
||||
ret.Status.Reason = err.Error()
|
||||
return ret, err
|
||||
}
|
||||
s.ddChannelName = resp
|
||||
s.ddChannelName = resp.Value
|
||||
}
|
||||
ret.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
ret.InitParams = &internalpb2.InitParams{
|
||||
|
@ -518,7 +518,7 @@ func (s *Server) newDataNode(ip string, port int64, id UniqueID) (*dataNode, err
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Flush(req *datapb.FlushRequest) (*commonpb.Status, error) {
|
||||
func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*commonpb.Status, error) {
|
||||
if !s.checkStateIsHealthy() {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -531,7 +531,7 @@ func (s *Server) Flush(req *datapb.FlushRequest) (*commonpb.Status, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
|
||||
func (s *Server) AssignSegmentID(ctx context.Context, req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
|
||||
resp := &datapb.AssignSegIDResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -593,7 +593,8 @@ func (s *Server) AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.Assign
|
|||
}
|
||||
|
||||
func (s *Server) loadCollectionFromMaster(collectionID int64) error {
|
||||
resp, err := s.masterClient.DescribeCollection(&milvuspb.DescribeCollectionRequest{
|
||||
ctx := context.TODO()
|
||||
resp, err := s.masterClient.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
SourceID: Params.NodeID,
|
||||
|
@ -649,7 +650,7 @@ func (s *Server) openNewSegment(collectionID UniqueID, partitionID UniqueID, cha
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) ShowSegments(req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error) {
|
||||
func (s *Server) ShowSegments(ctx context.Context, req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error) {
|
||||
resp := &datapb.ShowSegmentResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -665,7 +666,7 @@ func (s *Server) ShowSegments(req *datapb.ShowSegmentRequest) (*datapb.ShowSegme
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
func (s *Server) GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
resp := &datapb.SegmentStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -701,7 +702,7 @@ func (s *Server) GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.Seg
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
func (s *Server) GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
resp := &datapb.InsertBinlogPathsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -731,7 +732,7 @@ func (s *Server) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*dat
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetInsertChannels(req *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
func (s *Server) GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
return &internalpb2.StringList{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -740,7 +741,7 @@ func (s *Server) GetInsertChannels(req *datapb.InsertChannelRequest) (*internalp
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
|
||||
func (s *Server) GetCollectionStatistics(ctx context.Context, req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
|
||||
resp := &datapb.CollectionStatsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -756,12 +757,12 @@ func (s *Server) GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*d
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetPartitionStatistics(req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error) {
|
||||
func (s *Server) GetPartitionStatistics(ctx context.Context, req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error) {
|
||||
// todo implement
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentInfoChannel() (*milvuspb.StringResponse, error) {
|
||||
func (s *Server) GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -770,7 +771,7 @@ func (s *Server) GetSegmentInfoChannel() (*milvuspb.StringResponse, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetCount(req *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error) {
|
||||
func (s *Server) GetCount(ctx context.Context, req *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error) {
|
||||
resp := &datapb.CollectionCountResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -790,7 +791,7 @@ func (s *Server) GetCount(req *datapb.CollectionCountRequest) (*datapb.Collectio
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentInfo(req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error) {
|
||||
func (s *Server) GetSegmentInfo(ctx context.Context, req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error) {
|
||||
resp := &datapb.SegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/log"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
|
@ -29,10 +31,14 @@ func NewClient(address string) *Client {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
|
||||
tracer := opentracing.GlobalTracer()
|
||||
connectGrpcFunc := func() error {
|
||||
log.Debug("DataNode connect czs::", zap.String("address", c.address))
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -56,14 +62,14 @@ func (c *Client) Stop() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates(empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpc.GetComponentStates(context.Background(), empty)
|
||||
func (c *Client) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpc.GetComponentStates(ctx, empty)
|
||||
}
|
||||
|
||||
func (c *Client) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
return c.grpc.WatchDmChannels(context.Background(), in)
|
||||
func (c *Client) WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
return c.grpc.WatchDmChannels(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) FlushSegments(in *datapb.FlushSegRequest) (*commonpb.Status, error) {
|
||||
return c.grpc.FlushSegments(context.Background(), in)
|
||||
func (c *Client) FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) (*commonpb.Status, error) {
|
||||
return c.grpc.FlushSegments(ctx, in)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"go.uber.org/zap"
|
||||
|
@ -70,7 +71,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
}
|
||||
log.Debug("DataNode address", zap.String("address", addr))
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
datapb.RegisterDataNodeServer(s.grpcServer, s)
|
||||
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
|
@ -84,12 +89,12 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
|
||||
}
|
||||
|
||||
func (s *Server) SetMasterServiceInterface(ms dn.MasterServiceInterface) error {
|
||||
return s.impl.SetMasterServiceInterface(ms)
|
||||
func (s *Server) SetMasterServiceInterface(ctx context.Context, ms dn.MasterServiceInterface) error {
|
||||
return s.impl.SetMasterServiceInterface(ctx, ms)
|
||||
}
|
||||
|
||||
func (s *Server) SetDataServiceInterface(ds dn.DataServiceInterface) error {
|
||||
return s.impl.SetDataServiceInterface(ds)
|
||||
func (s *Server) SetDataServiceInterface(ctx context.Context, ds dn.DataServiceInterface) error {
|
||||
return s.impl.SetDataServiceInterface(ctx, ds)
|
||||
}
|
||||
|
||||
func (s *Server) Run() error {
|
||||
|
@ -124,6 +129,7 @@ func (s *Server) Stop() error {
|
|||
}
|
||||
|
||||
func (s *Server) init() error {
|
||||
ctx := context.Background()
|
||||
Params.Init()
|
||||
if !funcutil.CheckPortAvailable(Params.Port) {
|
||||
Params.Port = funcutil.GetAvailablePort()
|
||||
|
@ -132,10 +138,25 @@ func (s *Server) init() error {
|
|||
Params.LoadFromArgs()
|
||||
|
||||
log.Debug("DataNode port", zap.Int("port", Params.Port))
|
||||
// TODO
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: fmt.Sprintf("data_node ip: %s, port: %d", Params.IP, Params.Port),
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
tracer, closer, err := cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
s.closer = closer
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.startGrpcLoop(Params.Port)
|
||||
// wait for grpc server loop start
|
||||
err := <-s.grpcErrChan
|
||||
err = <-s.grpcErrChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -155,13 +176,13 @@ func (s *Server) init() error {
|
|||
if err = masterClient.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = funcutil.WaitForComponentHealthy(masterClient, "MasterService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentHealthy(ctx, masterClient, "MasterService", 100, time.Millisecond*200)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := s.SetMasterServiceInterface(masterClient); err != nil {
|
||||
if err := s.SetMasterServiceInterface(ctx, masterClient); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
@ -175,11 +196,11 @@ func (s *Server) init() error {
|
|||
if err = dataService.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = funcutil.WaitForComponentInitOrHealthy(dataService, "DataService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := s.SetDataServiceInterface(dataService); err != nil {
|
||||
if err := s.SetDataServiceInterface(ctx, dataService); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
@ -190,21 +211,6 @@ func (s *Server) init() error {
|
|||
s.impl.NodeID = dn.Params.NodeID
|
||||
s.impl.UpdateStateCode(internalpb2.StateCode_INITIALIZING)
|
||||
|
||||
// TODO
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: fmt.Sprintf("data_node_%d", s.impl.NodeID),
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
tracer, closer, err := cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
s.closer = closer
|
||||
|
||||
if err := s.impl.Init(); err != nil {
|
||||
log.Warn("impl init error: ", zap.Error(err))
|
||||
return err
|
||||
|
@ -217,11 +223,11 @@ func (s *Server) start() error {
|
|||
}
|
||||
|
||||
func (s *Server) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return s.impl.GetComponentStates()
|
||||
return s.impl.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
return s.impl.WatchDmChannels(in)
|
||||
return s.impl.WatchDmChannels(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) (*commonpb.Status, error) {
|
||||
|
@ -233,5 +239,5 @@ func (s *Server) FlushSegments(ctx context.Context, in *datapb.FlushSegRequest)
|
|||
}
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
}, s.impl.FlushSegments(in)
|
||||
}, s.impl.FlushSegments(ctx, in)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
|
||||
|
@ -30,8 +32,13 @@ func NewClient(addr string) *Client {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
conn, err := grpc.DialContext(c.ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -56,62 +63,62 @@ func (c *Client) Stop() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannel(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatisticsChannel(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatisticsChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error) {
|
||||
return c.grpcClient.RegisterNode(context.Background(), req)
|
||||
func (c *Client) RegisterNode(ctx context.Context, req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error) {
|
||||
return c.grpcClient.RegisterNode(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) Flush(req *datapb.FlushRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.Flush(context.Background(), req)
|
||||
func (c *Client) Flush(ctx context.Context, req *datapb.FlushRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.Flush(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
|
||||
return c.grpcClient.AssignSegmentID(context.Background(), req)
|
||||
func (c *Client) AssignSegmentID(ctx context.Context, req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
|
||||
return c.grpcClient.AssignSegmentID(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) ShowSegments(req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error) {
|
||||
return c.grpcClient.ShowSegments(context.Background(), req)
|
||||
func (c *Client) ShowSegments(ctx context.Context, req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error) {
|
||||
return c.grpcClient.ShowSegments(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
return c.grpcClient.GetSegmentStates(context.Background(), req)
|
||||
func (c *Client) GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
return c.grpcClient.GetSegmentStates(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
return c.grpcClient.GetInsertBinlogPaths(context.Background(), req)
|
||||
func (c *Client) GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
return c.grpcClient.GetInsertBinlogPaths(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetInsertChannels(req *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
return c.grpcClient.GetInsertChannels(context.Background(), req)
|
||||
func (c *Client) GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
return c.grpcClient.GetInsertChannels(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
|
||||
return c.grpcClient.GetCollectionStatistics(context.Background(), req)
|
||||
func (c *Client) GetCollectionStatistics(ctx context.Context, req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
|
||||
return c.grpcClient.GetCollectionStatistics(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetPartitionStatistics(req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error) {
|
||||
return c.grpcClient.GetPartitionStatistics(context.Background(), req)
|
||||
func (c *Client) GetPartitionStatistics(ctx context.Context, req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error) {
|
||||
return c.grpcClient.GetPartitionStatistics(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetSegmentInfoChannel() (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfoChannel(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfoChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetCount(req *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error) {
|
||||
return c.grpcClient.GetCount(context.Background(), req)
|
||||
func (c *Client) GetCount(ctx context.Context, req *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error) {
|
||||
return c.grpcClient.GetCount(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetSegmentInfo(req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfo(context.Background(), req)
|
||||
func (c *Client) GetSegmentInfo(ctx context.Context, req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfo(ctx, req)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
msc "github.com/zilliztech/milvus-distributed/internal/distributed/masterservice/client"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
@ -96,7 +97,8 @@ func (s *Server) init() error {
|
|||
}
|
||||
s.impl.UpdateStateCode(internalpb2.StateCode_INITIALIZING)
|
||||
|
||||
err = funcutil.WaitForComponentInitOrHealthy(client, "MasterService", 100, time.Millisecond*200)
|
||||
ctx := context.Background()
|
||||
err = funcutil.WaitForComponentInitOrHealthy(ctx, client, "MasterService", 100, time.Millisecond*200)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -126,7 +128,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
datapb.RegisterDataServiceServer(s.grpcServer, s)
|
||||
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
|
@ -174,61 +180,61 @@ func (s *Server) Run() error {
|
|||
}
|
||||
|
||||
func (s *Server) GetSegmentInfo(ctx context.Context, request *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error) {
|
||||
return s.impl.GetSegmentInfo(request)
|
||||
return s.impl.GetSegmentInfo(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) RegisterNode(ctx context.Context, request *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error) {
|
||||
return s.impl.RegisterNode(request)
|
||||
return s.impl.RegisterNode(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Flush(ctx context.Context, request *datapb.FlushRequest) (*commonpb.Status, error) {
|
||||
return s.impl.Flush(request)
|
||||
return s.impl.Flush(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) AssignSegmentID(ctx context.Context, request *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
|
||||
return s.impl.AssignSegmentID(request)
|
||||
return s.impl.AssignSegmentID(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ShowSegments(ctx context.Context, request *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error) {
|
||||
return s.impl.ShowSegments(request)
|
||||
return s.impl.ShowSegments(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentStates(ctx context.Context, request *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
return s.impl.GetSegmentStates(request)
|
||||
return s.impl.GetSegmentStates(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetInsertBinlogPaths(ctx context.Context, request *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
return s.impl.GetInsertBinlogPaths(request)
|
||||
return s.impl.GetInsertBinlogPaths(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetInsertChannels(ctx context.Context, request *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
return s.impl.GetInsertChannels(request)
|
||||
return s.impl.GetInsertChannels(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetCollectionStatistics(ctx context.Context, request *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
|
||||
return s.impl.GetCollectionStatistics(request)
|
||||
return s.impl.GetCollectionStatistics(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetPartitionStatistics(ctx context.Context, request *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error) {
|
||||
return s.impl.GetPartitionStatistics(request)
|
||||
return s.impl.GetPartitionStatistics(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return s.impl.GetComponentStates()
|
||||
return s.impl.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
return s.impl.GetTimeTickChannel()
|
||||
return s.impl.GetTimeTickChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
return s.impl.GetStatisticsChannel()
|
||||
return s.impl.GetStatisticsChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentInfoChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
return s.impl.GetSegmentInfoChannel()
|
||||
return s.impl.GetSegmentInfoChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetCount(ctx context.Context, request *datapb.CollectionCountRequest) (*datapb.CollectionCountResponse, error) {
|
||||
return s.impl.GetCount(request)
|
||||
return s.impl.GetCount(ctx, request)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
@ -21,8 +23,13 @@ type Client struct {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -44,25 +51,23 @@ func (c *Client) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannel(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatisticsChannel(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatisticsChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) BuildIndex(ctx context.Context, req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
return c.grpcClient.BuildIndex(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.DropIndex(ctx, req)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
grpcindexserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
|
||||
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
@ -56,7 +58,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.loopCtx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
indexpb.RegisterIndexNodeServer(s.grpcServer, s)
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
@ -25,8 +27,13 @@ type Client struct {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -47,12 +54,11 @@ func (c *Client) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
||||
func (c *Client) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -60,7 +66,7 @@ func (c *Client) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -68,32 +74,26 @@ func (c *Client) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
return c.grpcClient.RegisterNode(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
return c.grpcClient.BuildIndex(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.DropIndex(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
return c.grpcClient.GetIndexStates(ctx, req)
|
||||
}
|
||||
func (c *Client) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
return c.grpcClient.GetIndexFilePaths(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
ctx := context.TODO()
|
||||
func (c *Client) NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
return c.grpcClient.NotifyBuildIndex(ctx, nty)
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"github.com/zilliztech/milvus-distributed/internal/indexservice"
|
||||
|
@ -93,33 +94,27 @@ func (s *Server) Stop() error {
|
|||
}
|
||||
|
||||
func (s *Server) RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
|
||||
return s.impl.RegisterNode(req)
|
||||
return s.impl.RegisterNode(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
|
||||
return s.impl.BuildIndex(req)
|
||||
return s.impl.BuildIndex(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
|
||||
return s.impl.GetIndexStates(req)
|
||||
return s.impl.GetIndexStates(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) DropIndex(ctx context.Context, request *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
|
||||
return s.impl.DropIndex(request)
|
||||
return s.impl.DropIndex(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
|
||||
return s.impl.GetIndexFilePaths(req)
|
||||
return s.impl.GetIndexFilePaths(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
|
||||
return s.impl.NotifyBuildIndex(nty)
|
||||
return s.impl.NotifyBuildIndex(ctx, nty)
|
||||
}
|
||||
|
||||
func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
|
@ -137,7 +132,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.loopCtx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
indexpb.RegisterIndexServiceServer(s.grpcServer, s)
|
||||
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
|
@ -148,40 +147,15 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
}
|
||||
|
||||
func (s *Server) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return s.impl.GetComponentStates()
|
||||
return s.impl.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
resp := &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}
|
||||
channel, err := s.impl.GetTimeTickChannel()
|
||||
if err != nil {
|
||||
resp.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
resp.Value = channel
|
||||
return resp, nil
|
||||
return s.impl.GetTimeTickChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
resp := &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}
|
||||
channel, err := s.impl.GetStatisticsChannel()
|
||||
if err != nil {
|
||||
resp.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
}
|
||||
resp.Value = channel
|
||||
return resp, nil
|
||||
return s.impl.GetStatisticsChannel(ctx)
|
||||
}
|
||||
|
||||
func NewServer(ctx context.Context) (*Server, error) {
|
||||
|
|
|
@ -4,7 +4,8 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
|
@ -36,11 +37,16 @@ func NewClient(addr string, timeout time.Duration) (*GrpcClient, error) {
|
|||
}
|
||||
|
||||
func (c *GrpcClient) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
|
||||
defer cancel()
|
||||
var err error
|
||||
for i := 0; i < c.retry; i++ {
|
||||
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock()); err == nil {
|
||||
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer))); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -58,146 +64,86 @@ func (c *GrpcClient) Stop() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *GrpcClient) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
// TODO: timeout need to be propagated through ctx
|
||||
func (c *GrpcClient) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStatesRPC(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
//DDL request
|
||||
func (c *GrpcClient) CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.CreateCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.DropCollection(ctx, in)
|
||||
}
|
||||
func (c *GrpcClient) HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return c.grpcClient.HasCollection(ctx, in)
|
||||
}
|
||||
func (c *GrpcClient) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return c.grpcClient.DescribeCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
return c.grpcClient.ShowCollections(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.CreatePartition(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.DropPartition(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return c.grpcClient.HasPartition(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
return c.grpcClient.ShowPartitions(ctx, in)
|
||||
}
|
||||
|
||||
//index builder service
|
||||
func (c *GrpcClient) CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.CreateIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) DropIndex(in *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.DropIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
return c.grpcClient.DescribeIndex(ctx, in)
|
||||
}
|
||||
|
||||
//global timestamp allocator
|
||||
func (c *GrpcClient) AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
|
||||
return c.grpcClient.AllocTimestamp(ctx, in)
|
||||
}
|
||||
func (c *GrpcClient) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
return c.grpcClient.AllocID(ctx, in)
|
||||
}
|
||||
|
||||
//receiver time tick from proxy service, and put it into this channel
|
||||
func (c *GrpcClient) GetTimeTickChannel() (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
rsp, err := c.grpcClient.GetTimeTickChannelRPC(ctx, &commonpb.Empty{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if rsp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return "", errors.Errorf("%s", rsp.Status.Reason)
|
||||
}
|
||||
return rsp.Value, nil
|
||||
func (c *GrpcClient) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannelRPC(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
//receive ddl from rpc and time tick from proxy service, and put them into this channel
|
||||
func (c *GrpcClient) GetDdChannel() (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
rsp, err := c.grpcClient.GetDdChannelRPC(ctx, &commonpb.Empty{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if rsp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return "", errors.Errorf("%s", rsp.Status.Reason)
|
||||
}
|
||||
return rsp.Value, nil
|
||||
func (c *GrpcClient) GetDdChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetDdChannelRPC(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
//just define a channel, not used currently
|
||||
func (c *GrpcClient) GetStatisticsChannel() (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
rsp, err := c.grpcClient.GetStatisticsChannelRPC(ctx, &commonpb.Empty{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if rsp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return "", errors.Errorf("%s", rsp.Status.Reason)
|
||||
}
|
||||
return rsp.Value, nil
|
||||
func (c *GrpcClient) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatisticsChannelRPC(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *GrpcClient) DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
return c.grpcClient.DescribeSegment(ctx, in)
|
||||
}
|
||||
|
||||
func (c *GrpcClient) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.grpcTimeout)
|
||||
defer cancel()
|
||||
func (c *GrpcClient) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
return c.grpcClient.ShowSegments(ctx, in)
|
||||
}
|
||||
|
|
|
@ -37,8 +37,9 @@ func TestGrpcService(t *testing.T) {
|
|||
t.Log("newParams.Address:", Params.Address)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
msFactory := pulsarms.NewFactory()
|
||||
svr, err := NewServer(context.Background(), msFactory)
|
||||
svr, err := NewServer(ctx, msFactory)
|
||||
assert.Nil(t, err)
|
||||
svr.connectQueryService = false
|
||||
svr.connectProxyService = false
|
||||
|
@ -192,7 +193,7 @@ func TestGrpcService(t *testing.T) {
|
|||
Schema: sbf,
|
||||
}
|
||||
|
||||
status, err := cli.CreateCollection(req)
|
||||
status, err := cli.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, len(createCollectionArray), 1)
|
||||
|
@ -203,7 +204,7 @@ func TestGrpcService(t *testing.T) {
|
|||
req.Base.MsgID = 101
|
||||
req.Base.Timestamp = 101
|
||||
req.Base.SourceID = 101
|
||||
status, err = cli.CreateCollection(req)
|
||||
status, err = cli.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
|
||||
|
@ -211,7 +212,7 @@ func TestGrpcService(t *testing.T) {
|
|||
req.Base.Timestamp = 102
|
||||
req.Base.SourceID = 102
|
||||
req.CollectionName = "testColl-again"
|
||||
status, err = cli.CreateCollection(req)
|
||||
status, err = cli.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
|
||||
|
@ -222,7 +223,7 @@ func TestGrpcService(t *testing.T) {
|
|||
req.Base.MsgID = 103
|
||||
req.Base.Timestamp = 103
|
||||
req.Base.SourceID = 103
|
||||
status, err = cli.CreateCollection(req)
|
||||
status, err = cli.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(createCollectionArray), 2)
|
||||
|
@ -238,7 +239,7 @@ func TestGrpcService(t *testing.T) {
|
|||
req.Base.MsgID = 103
|
||||
req.Base.Timestamp = 103
|
||||
req.Base.SourceID = 103
|
||||
status, err = cli.CreateCollection(req)
|
||||
status, err = cli.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
matched, err := regexp.MatchString("input timestamp = [0-9]+, last dd time stamp = [0-9]+", status.Reason)
|
||||
|
@ -257,7 +258,7 @@ func TestGrpcService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
rsp, err := cli.HasCollection(req)
|
||||
rsp, err := cli.HasCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, true)
|
||||
|
@ -272,7 +273,7 @@ func TestGrpcService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl2",
|
||||
}
|
||||
rsp, err = cli.HasCollection(req)
|
||||
rsp, err = cli.HasCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, false)
|
||||
|
@ -288,7 +289,7 @@ func TestGrpcService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl2",
|
||||
}
|
||||
rsp, err = cli.HasCollection(req)
|
||||
rsp, err = cli.HasCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, false)
|
||||
|
@ -307,7 +308,7 @@ func TestGrpcService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
rsp, err := cli.DescribeCollection(req)
|
||||
rsp, err := cli.DescribeCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Schema.Name, "testColl")
|
||||
|
@ -324,7 +325,7 @@ func TestGrpcService(t *testing.T) {
|
|||
},
|
||||
DbName: "testDb",
|
||||
}
|
||||
rsp, err := cli.ShowCollections(req)
|
||||
rsp, err := cli.ShowCollections(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.ElementsMatch(t, rsp.CollectionNames, []string{"testColl", "testColl-again"})
|
||||
|
@ -343,7 +344,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
PartitionName: "testPartition",
|
||||
}
|
||||
status, err := cli.CreatePartition(req)
|
||||
status, err := cli.CreatePartition(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
|
||||
|
@ -367,7 +368,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
PartitionName: "testPartition",
|
||||
}
|
||||
rsp, err := cli.HasPartition(req)
|
||||
rsp, err := cli.HasPartition(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, true)
|
||||
|
@ -387,7 +388,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
CollectionID: coll.ID,
|
||||
}
|
||||
rsp, err := cli.ShowPartitions(req)
|
||||
rsp, err := cli.ShowPartitions(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(rsp.PartitionNames), 2)
|
||||
|
@ -422,7 +423,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionID: coll.ID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
rsp, err := cli.ShowSegments(req)
|
||||
rsp, err := cli.ShowSegments(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.SegmentIDs[0], int64(1000))
|
||||
|
@ -450,7 +451,7 @@ func TestGrpcService(t *testing.T) {
|
|||
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(collMeta.FieldIndexes), 0)
|
||||
rsp, err := cli.CreateIndex(req)
|
||||
rsp, err := cli.CreateIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
collMeta, err = core.MetaTable.GetCollectionByName("testColl")
|
||||
|
@ -463,7 +464,7 @@ func TestGrpcService(t *testing.T) {
|
|||
assert.ElementsMatch(t, binlogPathArray, []string{"file1", "file2", "file3"})
|
||||
|
||||
req.FieldName = "no field"
|
||||
rsp, err = cli.CreateIndex(req)
|
||||
rsp, err = cli.CreateIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, rsp.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
})
|
||||
|
@ -482,7 +483,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionID: coll.ID,
|
||||
SegmentID: 1000,
|
||||
}
|
||||
rsp, err := cli.DescribeSegment(req)
|
||||
rsp, err := cli.DescribeSegment(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
t.Logf("index id = %d", rsp.IndexID)
|
||||
|
@ -501,7 +502,7 @@ func TestGrpcService(t *testing.T) {
|
|||
FieldName: "vector",
|
||||
IndexName: "",
|
||||
}
|
||||
rsp, err := cli.DescribeIndex(req)
|
||||
rsp, err := cli.DescribeIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(rsp.IndexDescriptions), 1)
|
||||
|
@ -540,7 +541,7 @@ func TestGrpcService(t *testing.T) {
|
|||
FieldName: "vector",
|
||||
IndexName: "",
|
||||
}
|
||||
rsp, err := cli.DescribeIndex(req)
|
||||
rsp, err := cli.DescribeIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(rsp.IndexDescriptions), 1)
|
||||
|
@ -564,7 +565,7 @@ func TestGrpcService(t *testing.T) {
|
|||
idx, err := core.MetaTable.GetIndexByName("testColl", "vector", cms.Params.DefaultIndexName)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(idx), 1)
|
||||
rsp, err := cli.DropIndex(req)
|
||||
rsp, err := cli.DropIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
|
@ -587,7 +588,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
PartitionName: "testPartition",
|
||||
}
|
||||
status, err := cli.DropPartition(req)
|
||||
status, err := cli.DropPartition(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
|
||||
|
@ -610,7 +611,7 @@ func TestGrpcService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
}
|
||||
|
||||
status, err := cli.DropCollection(req)
|
||||
status, err := cli.DropCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(dropCollectionArray), 1)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
@ -629,7 +630,7 @@ func TestGrpcService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
status, err = cli.DropCollection(req)
|
||||
status, err = cli.DropCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(dropCollectionArray), 1)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"net"
|
||||
"sync"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
dsc "github.com/zilliztech/milvus-distributed/internal/distributed/dataservice/client"
|
||||
|
@ -101,6 +102,7 @@ func (s *Server) Run() error {
|
|||
|
||||
func (s *Server) init() error {
|
||||
Params.Init()
|
||||
ctx := context.Background()
|
||||
|
||||
log.Info("init params done")
|
||||
|
||||
|
@ -118,12 +120,12 @@ func (s *Server) init() error {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
err := funcutil.WaitForComponentInitOrHealthy(proxyService, "ProxyService", 100, 200*time.Millisecond)
|
||||
err := funcutil.WaitForComponentInitOrHealthy(ctx, proxyService, "ProxyService", 100, 200*time.Millisecond)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = s.core.SetProxyService(proxyService); err != nil {
|
||||
if err = s.core.SetProxyService(ctx, proxyService); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
@ -136,12 +138,12 @@ func (s *Server) init() error {
|
|||
if err := dataService.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err := funcutil.WaitForComponentInitOrHealthy(dataService, "DataService", 100, 200*time.Millisecond)
|
||||
err := funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, 200*time.Millisecond)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = s.core.SetDataService(dataService); err != nil {
|
||||
if err = s.core.SetDataService(ctx, dataService); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
@ -152,7 +154,7 @@ func (s *Server) init() error {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
if err := s.core.SetIndexService(indexService); err != nil {
|
||||
if err := s.core.SetIndexService(ctx, indexService); err != nil {
|
||||
panic(err)
|
||||
|
||||
}
|
||||
|
@ -168,7 +170,7 @@ func (s *Server) init() error {
|
|||
if err = queryService.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = s.core.SetQueryService(queryService); err != nil {
|
||||
if err = s.core.SetQueryService(ctx, queryService); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
@ -204,7 +206,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
masterpb.RegisterMasterServiceServer(s.grpcServer, s)
|
||||
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
|
@ -250,135 +256,87 @@ func (s *Server) Stop() error {
|
|||
}
|
||||
|
||||
func (s *Server) GetComponentStatesRPC(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return s.core.GetComponentStates()
|
||||
return s.core.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
//DDL request
|
||||
func (s *Server) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.core.CreateCollection(in)
|
||||
return s.core.CreateCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.core.DropCollection(in)
|
||||
return s.core.DropCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return s.core.HasCollection(in)
|
||||
return s.core.HasCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return s.core.DescribeCollection(in)
|
||||
return s.core.DescribeCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
return s.core.ShowCollections(in)
|
||||
return s.core.ShowCollections(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
return s.core.CreatePartition(in)
|
||||
return s.core.CreatePartition(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
return s.core.DropPartition(in)
|
||||
return s.core.DropPartition(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return s.core.HasPartition(in)
|
||||
return s.core.HasPartition(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
return s.core.ShowPartitions(in)
|
||||
return s.core.ShowPartitions(ctx, in)
|
||||
}
|
||||
|
||||
//index builder service
|
||||
func (s *Server) CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
return s.core.CreateIndex(in)
|
||||
return s.core.CreateIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return s.core.DropIndex(in)
|
||||
return s.core.DropIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
return s.core.DescribeIndex(in)
|
||||
return s.core.DescribeIndex(ctx, in)
|
||||
}
|
||||
|
||||
//global timestamp allocator
|
||||
func (s *Server) AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
|
||||
return s.core.AllocTimestamp(in)
|
||||
return s.core.AllocTimestamp(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
return s.core.AllocID(in)
|
||||
return s.core.AllocID(ctx, in)
|
||||
}
|
||||
|
||||
//receiver time tick from proxy service, and put it into this channel
|
||||
func (s *Server) GetTimeTickChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
rsp, err := s.core.GetTimeTickChannel()
|
||||
if err != nil {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: rsp,
|
||||
}, nil
|
||||
return s.core.GetTimeTickChannel(ctx)
|
||||
}
|
||||
|
||||
//receive ddl from rpc and time tick from proxy service, and put them into this channel
|
||||
func (s *Server) GetDdChannelRPC(ctx context.Context, in *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
rsp, err := s.core.GetDdChannel()
|
||||
if err != nil {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: rsp,
|
||||
}, nil
|
||||
return s.core.GetDdChannel(ctx)
|
||||
}
|
||||
|
||||
//just define a channel, not used currently
|
||||
func (s *Server) GetStatisticsChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
rsp, err := s.core.GetStatisticsChannel()
|
||||
if err != nil {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: rsp,
|
||||
}, nil
|
||||
return s.core.GetStatisticsChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
return s.core.DescribeSegment(in)
|
||||
return s.core.DescribeSegment(ctx, in)
|
||||
}
|
||||
|
||||
func (s *Server) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
return s.core.ShowSegments(in)
|
||||
return s.core.ShowSegments(ctx, in)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,9 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -18,8 +19,13 @@ type Client struct {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -41,8 +47,8 @@ func (c *Client) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.InvalidateCollectionMetaCache(c.ctx, request)
|
||||
func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.InvalidateCollectionMetaCache(ctx, request)
|
||||
}
|
||||
|
||||
func NewClient(ctx context.Context, address string) *Client {
|
||||
|
|
|
@ -2,6 +2,7 @@ package grpcproxynode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
grpcdataserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/dataservice/client"
|
||||
grpcindexserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
|
||||
grpcmasterserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/masterservice/client"
|
||||
|
@ -18,6 +20,7 @@ import (
|
|||
grpcqueryserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/queryservice/client"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
@ -75,7 +78,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
proxypb.RegisterProxyNodeServiceServer(s.grpcServer, s)
|
||||
milvuspb.RegisterMilvusServiceServer(s.grpcServer, s)
|
||||
|
||||
|
@ -115,6 +122,21 @@ func (s *Server) init() error {
|
|||
log.Println("proxy port: ", Params.Port)
|
||||
log.Println("proxy address: ", Params.Address)
|
||||
|
||||
// TODO
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: fmt.Sprintf("proxy_node ip: %s, port: %d", Params.IP, Params.Port),
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
tracer, closer, err := cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
s.closer = closer
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err2 := s.Stop()
|
||||
|
@ -233,102 +255,102 @@ func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, request *pro
|
|||
}
|
||||
|
||||
func (s *Server) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.CreateCollection(request)
|
||||
return s.impl.CreateCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.DropCollection(request)
|
||||
return s.impl.DropCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return s.impl.HasCollection(request)
|
||||
return s.impl.HasCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.LoadCollection(request)
|
||||
return s.impl.LoadCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.ReleaseCollection(request)
|
||||
return s.impl.ReleaseCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return s.impl.DescribeCollection(request)
|
||||
return s.impl.DescribeCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
return s.impl.GetCollectionStatistics(request)
|
||||
return s.impl.GetCollectionStatistics(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
return s.impl.ShowCollections(request)
|
||||
return s.impl.ShowCollections(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.CreatePartition(request)
|
||||
return s.impl.CreatePartition(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.DropPartition(request)
|
||||
return s.impl.DropPartition(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return s.impl.HasPartition(request)
|
||||
return s.impl.HasPartition(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error) {
|
||||
return s.impl.LoadPartitions(request)
|
||||
return s.impl.LoadPartitions(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.ReleasePartitions(request)
|
||||
return s.impl.ReleasePartitions(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
|
||||
return s.impl.GetPartitionStatistics(request)
|
||||
return s.impl.GetPartitionStatistics(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
return s.impl.ShowPartitions(request)
|
||||
return s.impl.ShowPartitions(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
return s.impl.CreateIndex(request)
|
||||
return s.impl.CreateIndex(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return s.impl.DropIndex(request)
|
||||
return s.impl.DropIndex(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
return s.impl.DescribeIndex(request)
|
||||
return s.impl.DescribeIndex(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
return s.impl.GetIndexState(request)
|
||||
return s.impl.GetIndexState(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
|
||||
return s.impl.Insert(request)
|
||||
return s.impl.Insert(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
|
||||
return s.impl.Search(request)
|
||||
return s.impl.Search(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
|
||||
return s.impl.Flush(request)
|
||||
return s.impl.Flush(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
return s.impl.GetDdChannel(request)
|
||||
return s.impl.GetDdChannel(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetPersistentSegmentInfo(ctx context.Context, request *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error) {
|
||||
return s.impl.GetPersistentSegmentInfo(request)
|
||||
return s.impl.GetPersistentSegmentInfo(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetQuerySegmentInfo(ctx context.Context, request *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error) {
|
||||
return s.impl.GetQuerySegmentInfo(request)
|
||||
return s.impl.GetQuerySegmentInfo(ctx, request)
|
||||
|
||||
}
|
||||
|
|
|
@ -6,12 +6,13 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
|
@ -21,8 +22,13 @@ type Client struct {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -44,23 +50,23 @@ func (c *Client) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) RegisterNode(request *proxypb.RegisterNodeRequest) (*proxypb.RegisterNodeResponse, error) {
|
||||
return c.proxyServiceClient.RegisterNode(c.ctx, request)
|
||||
func (c *Client) RegisterNode(ctx context.Context, request *proxypb.RegisterNodeRequest) (*proxypb.RegisterNodeResponse, error) {
|
||||
return c.proxyServiceClient.RegisterNode(ctx, request)
|
||||
}
|
||||
|
||||
func (c *Client) InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return c.proxyServiceClient.InvalidateCollectionMetaCache(c.ctx, request)
|
||||
func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return c.proxyServiceClient.InvalidateCollectionMetaCache(ctx, request)
|
||||
}
|
||||
|
||||
func (c *Client) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
||||
return c.proxyServiceClient.GetTimeTickChannel(c.ctx, &commonpb.Empty{})
|
||||
func (c *Client) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.proxyServiceClient.GetTimeTickChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return c.proxyServiceClient.GetComponentStates(c.ctx, &commonpb.Empty{})
|
||||
func (c *Client) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.proxyServiceClient.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetStatisticsChannel() (*milvuspb.StringResponse, error) {
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
|
@ -114,7 +115,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
proxypb.RegisterProxyServiceServer(s.grpcServer, s)
|
||||
milvuspb.RegisterProxyServiceServer(s.grpcServer, s)
|
||||
|
||||
|
|
|
@ -6,8 +6,11 @@ import (
|
|||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/querypb"
|
||||
)
|
||||
|
||||
|
@ -30,11 +33,16 @@ func NewClient(address string) *Client {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), RPCConnectionTimeout)
|
||||
defer cancel()
|
||||
var err error
|
||||
for i := 0; i < Retry; i++ {
|
||||
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock()); err == nil {
|
||||
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer))); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -53,54 +61,46 @@ func (c *Client) Stop() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(context.TODO(), nil)
|
||||
func (c *Client) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(ctx, nil)
|
||||
}
|
||||
|
||||
func (c *Client) GetTimeTickChannel() (string, error) {
|
||||
response, err := c.grpcClient.GetTimeTickChannel(context.TODO(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return response.Value, nil
|
||||
func (c *Client) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannel(ctx, nil)
|
||||
}
|
||||
|
||||
func (c *Client) GetStatisticsChannel() (string, error) {
|
||||
response, err := c.grpcClient.GetStatsChannel(context.TODO(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return response.Value, nil
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatsChannel(ctx, nil)
|
||||
}
|
||||
|
||||
func (c *Client) AddQueryChannel(in *querypb.AddQueryChannelsRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.AddQueryChannel(context.TODO(), in)
|
||||
func (c *Client) AddQueryChannel(ctx context.Context, in *querypb.AddQueryChannelsRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.AddQueryChannel(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) RemoveQueryChannel(in *querypb.RemoveQueryChannelsRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.RemoveQueryChannel(context.TODO(), in)
|
||||
func (c *Client) RemoveQueryChannel(ctx context.Context, in *querypb.RemoveQueryChannelsRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.RemoveQueryChannel(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) WatchDmChannels(in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.WatchDmChannels(context.TODO(), in)
|
||||
func (c *Client) WatchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.WatchDmChannels(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) LoadSegments(in *querypb.LoadSegmentRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.LoadSegments(context.TODO(), in)
|
||||
func (c *Client) LoadSegments(ctx context.Context, in *querypb.LoadSegmentRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.LoadSegments(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) ReleaseCollection(in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleaseCollection(context.TODO(), in)
|
||||
func (c *Client) ReleaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleaseCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) ReleasePartitions(in *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleasePartitions(context.TODO(), in)
|
||||
func (c *Client) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleasePartitions(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) ReleaseSegments(in *querypb.ReleaseSegmentRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleaseSegments(context.TODO(), in)
|
||||
func (c *Client) ReleaseSegments(ctx context.Context, in *querypb.ReleaseSegmentRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleaseSegments(ctx, in)
|
||||
}
|
||||
|
||||
func (c *Client) GetSegmentInfo(in *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfo(context.TODO(), in)
|
||||
func (c *Client) GetSegmentInfo(ctx context.Context, in *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfo(ctx, in)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package grpcquerynode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -15,8 +17,11 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
@ -40,6 +45,8 @@ type Server struct {
|
|||
masterService *msc.GrpcClient
|
||||
indexService *isc.Client
|
||||
queryService *qsc.Client
|
||||
|
||||
closer io.Closer
|
||||
}
|
||||
|
||||
func NewServer(ctx context.Context, factory msgstream.Factory) (*Server, error) {
|
||||
|
@ -55,7 +62,7 @@ func NewServer(ctx context.Context, factory msgstream.Factory) (*Server, error)
|
|||
}
|
||||
|
||||
func (s *Server) init() error {
|
||||
|
||||
ctx := context.Background()
|
||||
Params.Init()
|
||||
if !funcutil.CheckPortAvailable(Params.QueryNodePort) {
|
||||
Params.QueryNodePort = funcutil.GetAvailablePort()
|
||||
|
@ -63,11 +70,26 @@ func (s *Server) init() error {
|
|||
Params.LoadFromEnv()
|
||||
Params.LoadFromArgs()
|
||||
|
||||
// TODO
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: fmt.Sprintf("query_node ip: %s, port: %d", Params.QueryNodeIP, Params.QueryNodePort),
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
tracer, closer, err := cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
s.closer = closer
|
||||
|
||||
log.Println("QueryNode, port:", Params.QueryNodePort)
|
||||
s.wg.Add(1)
|
||||
go s.startGrpcLoop(Params.QueryNodePort)
|
||||
// wait for grpc server loop start
|
||||
err := <-s.grpcErrChan
|
||||
err = <-s.grpcErrChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -87,7 +109,7 @@ func (s *Server) init() error {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
err = funcutil.WaitForComponentInitOrHealthy(queryService, "QueryService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentInitOrHealthy(ctx, queryService, "QueryService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -115,7 +137,7 @@ func (s *Server) init() error {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
err = funcutil.WaitForComponentHealthy(masterService, "MasterService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentHealthy(ctx, masterService, "MasterService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -136,7 +158,7 @@ func (s *Server) init() error {
|
|||
panic(err)
|
||||
}
|
||||
// wait indexservice healthy
|
||||
err = funcutil.WaitForComponentHealthy(indexService, "IndexService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentHealthy(ctx, indexService, "IndexService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -156,7 +178,7 @@ func (s *Server) init() error {
|
|||
if err = dataService.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = funcutil.WaitForComponentInitOrHealthy(dataService, "DataService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -196,7 +218,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
}
|
||||
log.Println("QueryNode:: addr:", addr)
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
querypb.RegisterQueryNodeServer(s.grpcServer, s)
|
||||
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
|
@ -225,13 +251,16 @@ func (s *Server) Run() error {
|
|||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.cancel()
|
||||
var err error
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
err = s.impl.Stop()
|
||||
err := s.impl.Stop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -2,14 +2,16 @@ package grpcqueryserviceclient
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/querypb"
|
||||
)
|
||||
|
||||
|
@ -34,11 +36,16 @@ func NewClient(address string, timeout time.Duration) (*Client, error) {
|
|||
}
|
||||
|
||||
func (c *Client) Init() error {
|
||||
tracer := opentracing.GlobalTracer()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
|
||||
defer cancel()
|
||||
var err error
|
||||
for i := 0; i < c.retry; i++ {
|
||||
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock()); err == nil {
|
||||
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithUnaryInterceptor(
|
||||
otgrpc.OpenTracingClientInterceptor(tracer)),
|
||||
grpc.WithStreamInterceptor(
|
||||
otgrpc.OpenTracingStreamClientInterceptor(tracer))); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -60,68 +67,54 @@ func (c *Client) Stop() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(context.Background(), &commonpb.Empty{})
|
||||
func (c *Client) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetTimeTickChannel() (string, error) {
|
||||
resp, err := c.grpcClient.GetTimeTickChannel(context.Background(), &commonpb.Empty{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return "", errors.New(resp.Status.Reason)
|
||||
}
|
||||
return resp.Value, nil
|
||||
func (c *Client) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetTimeTickChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetStatisticsChannel() (string, error) {
|
||||
resp, err := c.grpcClient.GetStatisticsChannel(context.Background(), &commonpb.Empty{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return "", errors.New(resp.Status.Reason)
|
||||
}
|
||||
return resp.Value, nil
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return c.grpcClient.GetStatisticsChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) RegisterNode(req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
return c.grpcClient.RegisterNode(context.TODO(), req)
|
||||
func (c *Client) RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
return c.grpcClient.RegisterNode(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) ShowCollections(req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error) {
|
||||
return c.grpcClient.ShowCollections(context.TODO(), req)
|
||||
func (c *Client) ShowCollections(ctx context.Context, req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error) {
|
||||
return c.grpcClient.ShowCollections(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) LoadCollection(req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.LoadCollection(context.TODO(), req)
|
||||
func (c *Client) LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.LoadCollection(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleaseCollection(context.TODO(), req)
|
||||
func (c *Client) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleaseCollection(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) ShowPartitions(req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error) {
|
||||
return c.grpcClient.ShowPartitions(context.TODO(), req)
|
||||
func (c *Client) ShowPartitions(ctx context.Context, req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error) {
|
||||
return c.grpcClient.ShowPartitions(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) LoadPartitions(req *querypb.LoadPartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.LoadPartitions(context.TODO(), req)
|
||||
func (c *Client) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.LoadPartitions(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) ReleasePartitions(req *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleasePartitions(context.TODO(), req)
|
||||
func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return c.grpcClient.ReleasePartitions(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) CreateQueryChannel() (*querypb.CreateQueryChannelResponse, error) {
|
||||
return c.grpcClient.CreateQueryChannel(context.TODO(), &commonpb.Empty{})
|
||||
func (c *Client) CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error) {
|
||||
return c.grpcClient.CreateQueryChannel(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c *Client) GetPartitionStates(req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error) {
|
||||
return c.grpcClient.GetPartitionStates(context.TODO(), req)
|
||||
func (c *Client) GetPartitionStates(ctx context.Context, req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error) {
|
||||
return c.grpcClient.GetPartitionStates(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetSegmentInfo(req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfo(context.TODO(), req)
|
||||
func (c *Client) GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return c.grpcClient.GetSegmentInfo(ctx, req)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
otgrpc "github.com/opentracing-contrib/go-grpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
dsc "github.com/zilliztech/milvus-distributed/internal/distributed/dataservice/client"
|
||||
msc "github.com/zilliztech/milvus-distributed/internal/distributed/masterservice/client"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
|
||||
|
@ -69,6 +71,7 @@ func (s *Server) Run() error {
|
|||
}
|
||||
|
||||
func (s *Server) init() error {
|
||||
ctx := context.Background()
|
||||
Params.Init()
|
||||
|
||||
s.wg.Add(1)
|
||||
|
@ -96,7 +99,7 @@ func (s *Server) init() error {
|
|||
panic(err)
|
||||
}
|
||||
// wait for master init or healthy
|
||||
err = funcutil.WaitForComponentInitOrHealthy(masterService, "MasterService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentInitOrHealthy(ctx, masterService, "MasterService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -116,7 +119,7 @@ func (s *Server) init() error {
|
|||
if err = dataService.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = funcutil.WaitForComponentInitOrHealthy(dataService, "DataService", 100, time.Millisecond*200)
|
||||
err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, time.Millisecond*200)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -148,7 +151,11 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
|||
ctx, cancel := context.WithCancel(s.loopCtx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
tracer := opentracing.GlobalTracer()
|
||||
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(
|
||||
otgrpc.OpenTracingServerInterceptor(tracer)),
|
||||
grpc.StreamInterceptor(
|
||||
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
|
||||
querypb.RegisterQueryServiceServer(s.grpcServer, s)
|
||||
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
|
@ -171,57 +178,15 @@ func (s *Server) Stop() error {
|
|||
}
|
||||
|
||||
func (s *Server) GetComponentStates(ctx context.Context, req *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
componentStates, err := s.impl.GetComponentStates()
|
||||
if err != nil {
|
||||
return &internalpb2.ComponentStates{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
return componentStates, nil
|
||||
return s.impl.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, req *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
channel, err := s.impl.GetTimeTickChannel()
|
||||
if err != nil {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: channel,
|
||||
}, nil
|
||||
return s.impl.GetTimeTickChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetStatisticsChannel(ctx context.Context, req *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
statisticsChannel, err := s.impl.GetStatisticsChannel()
|
||||
if err != nil {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: statisticsChannel,
|
||||
}, nil
|
||||
return s.impl.GetStatisticsChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) SetMasterService(m qs.MasterServiceInterface) error {
|
||||
|
@ -235,41 +200,41 @@ func (s *Server) SetDataService(d qs.DataServiceInterface) error {
|
|||
}
|
||||
|
||||
func (s *Server) RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
return s.impl.RegisterNode(req)
|
||||
return s.impl.RegisterNode(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) ShowCollections(ctx context.Context, req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error) {
|
||||
return s.impl.ShowCollections(req)
|
||||
return s.impl.ShowCollections(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.LoadCollection(req)
|
||||
return s.impl.LoadCollection(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.ReleaseCollection(req)
|
||||
return s.impl.ReleaseCollection(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) ShowPartitions(ctx context.Context, req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error) {
|
||||
return s.impl.ShowPartitions(req)
|
||||
return s.impl.ShowPartitions(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) GetPartitionStates(ctx context.Context, req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error) {
|
||||
return s.impl.GetPartitionStates(req)
|
||||
return s.impl.GetPartitionStates(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.LoadPartitions(req)
|
||||
return s.impl.LoadPartitions(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.ReleasePartitions(req)
|
||||
return s.impl.ReleasePartitions(ctx, req)
|
||||
}
|
||||
|
||||
func (s *Server) CreateQueryChannel(ctx context.Context, req *commonpb.Empty) (*querypb.CreateQueryChannelResponse, error) {
|
||||
return s.impl.CreateQueryChannel()
|
||||
return s.impl.CreateQueryChannel(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return s.impl.GetSegmentInfo(req)
|
||||
return s.impl.GetSegmentInfo(ctx, req)
|
||||
}
|
||||
|
|
|
@ -62,7 +62,8 @@ func NewNodeImpl(ctx context.Context) (*NodeImpl, error) {
|
|||
}
|
||||
|
||||
func (i *NodeImpl) Init() error {
|
||||
err := funcutil.WaitForComponentHealthy(i.serviceClient, "IndexService", 10, time.Second)
|
||||
ctx := context.Background()
|
||||
err := funcutil.WaitForComponentHealthy(ctx, i.serviceClient, "IndexService", 10, time.Second)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -75,7 +76,7 @@ func (i *NodeImpl) Init() error {
|
|||
},
|
||||
}
|
||||
|
||||
resp, err2 := i.serviceClient.RegisterNode(request)
|
||||
resp, err2 := i.serviceClient.RegisterNode(ctx, request)
|
||||
if err2 != nil {
|
||||
log.Printf("Index NodeImpl connect to IndexService failed, error= %v", err)
|
||||
return err2
|
||||
|
|
|
@ -122,7 +122,7 @@ func (it *IndexBuildTask) PostExecute(ctx context.Context) error {
|
|||
IndexFilePaths: it.savePaths,
|
||||
}
|
||||
|
||||
resp, err := it.serviceClient.NotifyBuildIndex(nty)
|
||||
resp, err := it.serviceClient.NotifyBuildIndex(ctx, nty)
|
||||
if err != nil {
|
||||
log.Println("IndexBuildTask notify err:", err.Error())
|
||||
return err
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
@ -150,7 +151,7 @@ func (i *ServiceImpl) UpdateStateCode(code internalpb2.StateCode) {
|
|||
i.stateCode = code
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (i *ServiceImpl) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
|
||||
stateInfo := &internalpb2.ComponentInfo{
|
||||
NodeID: i.ID,
|
||||
|
@ -168,22 +169,33 @@ func (i *ServiceImpl) GetComponentStates() (*internalpb2.ComponentStates, error)
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) GetTimeTickChannel() (string, error) {
|
||||
return "", nil
|
||||
func (i *ServiceImpl) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) GetStatisticsChannel() (string, error) {
|
||||
return "", nil
|
||||
func (i *ServiceImpl) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
func (i *ServiceImpl) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
fmt.Println("builder building index ..., indexName = ", req.IndexName, "indexID = ", req.IndexID, "dataPath = ", req.DataPaths)
|
||||
ret := &indexpb.BuildIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
t := &IndexAddTask{
|
||||
BaseTask: BaseTask{
|
||||
ctx: ctx,
|
||||
|
@ -232,7 +244,7 @@ func (i *ServiceImpl) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.Build
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
func (i *ServiceImpl) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
var indexStates []*indexpb.IndexInfo
|
||||
for _, indexID := range req.IndexBuildIDs {
|
||||
indexState, err := i.metaTable.GetIndexState(indexID)
|
||||
|
@ -250,7 +262,7 @@ func (i *ServiceImpl) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
func (i *ServiceImpl) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
i.sched.IndexAddQueue.tryToRemoveUselessIndexAddTask(req.IndexID)
|
||||
|
||||
err := i.metaTable.MarkIndexAsDeleted(req.IndexID)
|
||||
|
@ -265,7 +277,7 @@ func (i *ServiceImpl) DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status
|
|||
go func() {
|
||||
allNodeClients := i.nodeClients.PeekAllClients()
|
||||
for _, client := range allNodeClients {
|
||||
client.DropIndex(req)
|
||||
client.DropIndex(ctx, req)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
|
@ -279,7 +291,7 @@ func (i *ServiceImpl) DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
func (i *ServiceImpl) GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
var indexPaths []*indexpb.IndexFilePathInfo = nil
|
||||
|
||||
for _, indexID := range req.IndexBuildIDs {
|
||||
|
@ -299,7 +311,7 @@ func (i *ServiceImpl) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*in
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
func (i *ServiceImpl) NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
ret := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package indexservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
grpcindexnodeclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexnode/client"
|
||||
|
@ -54,7 +55,7 @@ func (i *ServiceImpl) prepareNodeInitParams() []*commonpb.KeyValuePair {
|
|||
return params
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
func (i *ServiceImpl) RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
ret := &indexpb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
|
|
@ -117,7 +117,7 @@ func (it *IndexAddTask) Execute(ctx context.Context) error {
|
|||
Req: it.req,
|
||||
}
|
||||
log.Println("before index ...")
|
||||
resp, err := it.builderClient.BuildIndex(cmd)
|
||||
resp, err := it.builderClient.BuildIndex(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -38,22 +38,22 @@ import (
|
|||
// masterpb2 -> masterpb (master_service)
|
||||
|
||||
type ProxyServiceInterface interface {
|
||||
GetTimeTickChannel() (*milvuspb.StringResponse, error)
|
||||
InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type DataServiceInterface interface {
|
||||
GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
|
||||
GetSegmentInfoChannel() (*milvuspb.StringResponse, error)
|
||||
GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
|
||||
GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
}
|
||||
|
||||
type IndexServiceInterface interface {
|
||||
BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
|
||||
DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status, error)
|
||||
BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
|
||||
DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type QueryServiceInterface interface {
|
||||
ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type Interface interface {
|
||||
|
@ -61,42 +61,42 @@ type Interface interface {
|
|||
Init() error
|
||||
Start() error
|
||||
Stop() error
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
|
||||
//DDL request
|
||||
CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
|
||||
//index builder service
|
||||
CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
DropIndex(in *milvuspb.DropIndexRequest) (*commonpb.Status, error)
|
||||
CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error)
|
||||
|
||||
//global timestamp allocator
|
||||
AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
|
||||
AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error)
|
||||
AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
|
||||
AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error)
|
||||
|
||||
//TODO, master load these channel form config file ?
|
||||
|
||||
//receiver time tick from proxy service, and put it into this channel
|
||||
GetTimeTickChannel() (string, error)
|
||||
GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
|
||||
//receive ddl from rpc and time tick from proxy service, and put them into this channel
|
||||
GetDdChannel() (string, error)
|
||||
GetDdChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
|
||||
//just define a channel, not used currently
|
||||
GetStatisticsChannel() (string, error)
|
||||
GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
|
||||
//segment
|
||||
DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
|
||||
ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
|
||||
DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
|
||||
ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
|
||||
}
|
||||
|
||||
// ------------------ struct -----------------------
|
||||
|
@ -639,8 +639,8 @@ func (c *Core) setMsgStreams() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) SetProxyService(s ProxyServiceInterface) error {
|
||||
rsp, err := s.GetTimeTickChannel()
|
||||
func (c *Core) SetProxyService(ctx context.Context, s ProxyServiceInterface) error {
|
||||
rsp, err := s.GetTimeTickChannel(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ func (c *Core) SetProxyService(s ProxyServiceInterface) error {
|
|||
log.Info("proxy time tick", zap.String("channel name", Params.ProxyTimeTickChannel))
|
||||
|
||||
c.InvalidateCollectionMetaCache = func(ts typeutil.Timestamp, dbName string, collectionName string) error {
|
||||
status, _ := s.InvalidateCollectionMetaCache(&proxypb.InvalidateCollMetaCacheRequest{
|
||||
status, _ := s.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: 0, //TODO,MsgType
|
||||
MsgID: 0,
|
||||
|
@ -669,8 +669,8 @@ func (c *Core) SetProxyService(s ProxyServiceInterface) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) SetDataService(s DataServiceInterface) error {
|
||||
rsp, err := s.GetSegmentInfoChannel()
|
||||
func (c *Core) SetDataService(ctx context.Context, s DataServiceInterface) error {
|
||||
rsp, err := s.GetSegmentInfoChannel(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -682,7 +682,7 @@ func (c *Core) SetDataService(s DataServiceInterface) error {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binlog, err := s.GetInsertBinlogPaths(&datapb.InsertBinlogPathRequest{
|
||||
binlog, err := s.GetInsertBinlogPaths(ctx, &datapb.InsertBinlogPathRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: 0, //TODO, msy type
|
||||
MsgID: 0,
|
||||
|
@ -707,9 +707,9 @@ func (c *Core) SetDataService(s DataServiceInterface) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) SetIndexService(s IndexServiceInterface) error {
|
||||
func (c *Core) SetIndexService(ctx context.Context, s IndexServiceInterface) error {
|
||||
c.BuildIndexReq = func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair, indexID typeutil.UniqueID, indexName string) (typeutil.UniqueID, error) {
|
||||
rsp, err := s.BuildIndex(&indexpb.BuildIndexRequest{
|
||||
rsp, err := s.BuildIndex(ctx, &indexpb.BuildIndexRequest{
|
||||
DataPaths: binlog,
|
||||
TypeParams: typeParams,
|
||||
IndexParams: indexParams,
|
||||
|
@ -726,7 +726,7 @@ func (c *Core) SetIndexService(s IndexServiceInterface) error {
|
|||
}
|
||||
|
||||
c.DropIndexReq = func(indexID typeutil.UniqueID) error {
|
||||
rsp, err := s.DropIndex(&indexpb.DropIndexRequest{
|
||||
rsp, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{
|
||||
IndexID: indexID,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -741,7 +741,7 @@ func (c *Core) SetIndexService(s IndexServiceInterface) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) SetQueryService(s QueryServiceInterface) error {
|
||||
func (c *Core) SetQueryService(ctx context.Context, s QueryServiceInterface) error {
|
||||
c.ReleaseCollection = func(ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) error {
|
||||
req := &querypb.ReleaseCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -753,7 +753,7 @@ func (c *Core) SetQueryService(s QueryServiceInterface) error {
|
|||
DbID: dbID,
|
||||
CollectionID: collectionID,
|
||||
}
|
||||
rsp, err := s.ReleaseCollection(req)
|
||||
rsp, err := s.ReleaseCollection(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -825,7 +825,7 @@ func (c *Core) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (c *Core) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
log.Info("GetComponentStates", zap.String("State Code", internalpb2.StateCode_name[int32(code)]))
|
||||
|
||||
|
@ -851,19 +851,37 @@ func (c *Core) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) GetTimeTickChannel() (string, error) {
|
||||
return Params.TimeTickChannel, nil
|
||||
func (c *Core) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: Params.TimeTickChannel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) GetDdChannel() (string, error) {
|
||||
return Params.DdChannel, nil
|
||||
func (c *Core) GetDdChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: Params.DdChannel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) GetStatisticsChannel() (string, error) {
|
||||
return Params.StatisticsChannel, nil
|
||||
func (c *Core) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: Params.StatisticsChannel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &commonpb.Status{
|
||||
|
@ -895,7 +913,7 @@ func (c *Core) CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &commonpb.Status{
|
||||
|
@ -927,7 +945,7 @@ func (c *Core) DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Sta
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
func (c *Core) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.BoolResponse{
|
||||
|
@ -969,7 +987,7 @@ func (c *Core) HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolR
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
func (c *Core) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
|
@ -1010,7 +1028,7 @@ func (c *Core) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milv
|
|||
return t.Rsp, nil
|
||||
}
|
||||
|
||||
func (c *Core) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.ShowCollectionResponse{
|
||||
|
@ -1052,7 +1070,7 @@ func (c *Core) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.Sh
|
|||
return t.Rsp, nil
|
||||
}
|
||||
|
||||
func (c *Core) CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &commonpb.Status{
|
||||
|
@ -1084,7 +1102,7 @@ func (c *Core) CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.S
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &commonpb.Status{
|
||||
|
@ -1116,7 +1134,7 @@ func (c *Core) DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Statu
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.BoolResponse{
|
||||
|
@ -1158,7 +1176,7 @@ func (c *Core) HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolRes
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
func (c *Core) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
|
@ -1201,7 +1219,7 @@ func (c *Core) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.Show
|
|||
return t.Rsp, nil
|
||||
}
|
||||
|
||||
func (c *Core) CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
func (c *Core) CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &commonpb.Status{
|
||||
|
@ -1233,7 +1251,7 @@ func (c *Core) CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, e
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
func (c *Core) DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.DescribeIndexResponse{
|
||||
|
@ -1279,7 +1297,7 @@ func (c *Core) DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.Descr
|
|||
return t.Rsp, nil
|
||||
}
|
||||
|
||||
func (c *Core) DropIndex(in *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
func (c *Core) DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &commonpb.Status{
|
||||
|
@ -1311,7 +1329,7 @@ func (c *Core) DropIndex(in *milvuspb.DropIndexRequest) (*commonpb.Status, error
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
func (c *Core) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.DescribeSegmentResponse{
|
||||
|
@ -1354,7 +1372,7 @@ func (c *Core) DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.D
|
|||
return t.Rsp, nil
|
||||
}
|
||||
|
||||
func (c *Core) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
func (c *Core) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
code := c.stateCode.Load().(internalpb2.StateCode)
|
||||
if code != internalpb2.StateCode_HEALTHY {
|
||||
return &milvuspb.ShowSegmentResponse{
|
||||
|
@ -1396,7 +1414,7 @@ func (c *Core) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegm
|
|||
return t.Rsp, nil
|
||||
}
|
||||
|
||||
func (c *Core) AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
|
||||
func (c *Core) AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
|
||||
ts, err := c.tsoAllocator.Alloc(in.Count)
|
||||
if err != nil {
|
||||
return &masterpb.TsoResponse{
|
||||
|
@ -1419,7 +1437,7 @@ func (c *Core) AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, e
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
func (c *Core) AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
start, _, err := c.idAllocator.Alloc(in.Count)
|
||||
if err != nil {
|
||||
return &masterpb.IDResponse{
|
||||
|
|
|
@ -29,7 +29,7 @@ type proxyMock struct {
|
|||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (p *proxyMock) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
||||
func (p *proxyMock) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -37,7 +37,7 @@ func (p *proxyMock) GetTimeTickChannel() (*milvuspb.StringResponse, error) {
|
|||
Value: fmt.Sprintf("proxy-time-tick-%d", p.randVal),
|
||||
}, nil
|
||||
}
|
||||
func (p *proxyMock) InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
p.collArray = append(p.collArray, request.CollectionName)
|
||||
|
@ -57,7 +57,7 @@ type dataMock struct {
|
|||
randVal int
|
||||
}
|
||||
|
||||
func (d *dataMock) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
func (d *dataMock) GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
|
||||
rst := &datapb.InsertBinlogPathsResponse{
|
||||
FieldIDs: []int64{},
|
||||
Paths: []*internalpb2.StringList{},
|
||||
|
@ -85,7 +85,7 @@ type queryMock struct {
|
|||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (q *queryMock) ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
func (q *queryMock) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
q.collID = append(q.collID, req.CollectionID)
|
||||
|
@ -95,7 +95,7 @@ func (q *queryMock) ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*c
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (d *dataMock) GetSegmentInfoChannel() (*milvuspb.StringResponse, error) {
|
||||
func (d *dataMock) GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -112,7 +112,7 @@ type indexMock struct {
|
|||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (idx *indexMock) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
func (idx *indexMock) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
idx.mutex.Lock()
|
||||
defer idx.mutex.Unlock()
|
||||
idx.fileArray = append(idx.fileArray, req.DataPaths...)
|
||||
|
@ -127,7 +127,7 @@ func (idx *indexMock) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.Build
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (idx *indexMock) DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
func (idx *indexMock) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
idx.mutex.Lock()
|
||||
defer idx.mutex.Unlock()
|
||||
idx.idxDropID = append(idx.idxDropID, req.IndexID)
|
||||
|
@ -179,11 +179,11 @@ func TestMasterService(t *testing.T) {
|
|||
collArray: make([]string, 0, 16),
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
err = core.SetProxyService(pm)
|
||||
err = core.SetProxyService(ctx, pm)
|
||||
assert.Nil(t, err)
|
||||
|
||||
dm := &dataMock{randVal: randVal}
|
||||
err = core.SetDataService(dm)
|
||||
err = core.SetDataService(ctx, dm)
|
||||
assert.Nil(t, err)
|
||||
|
||||
im := &indexMock{
|
||||
|
@ -193,14 +193,14 @@ func TestMasterService(t *testing.T) {
|
|||
idxDropID: []int64{},
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
err = core.SetIndexService(im)
|
||||
err = core.SetIndexService(ctx, im)
|
||||
assert.Nil(t, err)
|
||||
|
||||
qm := &queryMock{
|
||||
collID: nil,
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
err = core.SetQueryService(qm)
|
||||
err = core.SetQueryService(ctx, qm)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = core.Init()
|
||||
|
@ -307,7 +307,7 @@ func TestMasterService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
Schema: sbf,
|
||||
}
|
||||
status, err := core.CreateCollection(req)
|
||||
status, err := core.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
|
@ -339,7 +339,7 @@ func TestMasterService(t *testing.T) {
|
|||
req.Base.MsgID = 101
|
||||
req.Base.Timestamp = 101
|
||||
req.Base.SourceID = 101
|
||||
status, err = core.CreateCollection(req)
|
||||
status, err = core.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
|
||||
|
@ -347,7 +347,7 @@ func TestMasterService(t *testing.T) {
|
|||
req.Base.Timestamp = 102
|
||||
req.Base.SourceID = 102
|
||||
req.CollectionName = "testColl-again"
|
||||
status, err = core.CreateCollection(req)
|
||||
status, err = core.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
|
||||
|
@ -358,7 +358,7 @@ func TestMasterService(t *testing.T) {
|
|||
req.Base.MsgID = 103
|
||||
req.Base.Timestamp = 103
|
||||
req.Base.SourceID = 103
|
||||
status, err = core.CreateCollection(req)
|
||||
status, err = core.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
|
@ -382,7 +382,7 @@ func TestMasterService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
rsp, err := core.HasCollection(req)
|
||||
rsp, err := core.HasCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, true)
|
||||
|
@ -397,7 +397,7 @@ func TestMasterService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl2",
|
||||
}
|
||||
rsp, err = core.HasCollection(req)
|
||||
rsp, err = core.HasCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, false)
|
||||
|
@ -413,7 +413,7 @@ func TestMasterService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
rsp, err = core.HasCollection(req)
|
||||
rsp, err = core.HasCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, true)
|
||||
|
@ -432,7 +432,7 @@ func TestMasterService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
rsp, err := core.DescribeCollection(req)
|
||||
rsp, err := core.DescribeCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Schema.Name, "testColl")
|
||||
|
@ -450,7 +450,7 @@ func TestMasterService(t *testing.T) {
|
|||
},
|
||||
DbName: "testDb",
|
||||
}
|
||||
rsp, err := core.ShowCollections(req)
|
||||
rsp, err := core.ShowCollections(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.ElementsMatch(t, rsp.CollectionNames, []string{"testColl", "testColl-again"})
|
||||
|
@ -470,7 +470,7 @@ func TestMasterService(t *testing.T) {
|
|||
PartitionName: "testPartition",
|
||||
}
|
||||
consumeMsgChan(time.Second, ddStream.Chan())
|
||||
status, err := core.CreatePartition(req)
|
||||
status, err := core.CreatePartition(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
|
||||
|
@ -501,7 +501,7 @@ func TestMasterService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
PartitionName: "testPartition",
|
||||
}
|
||||
rsp, err := core.HasPartition(req)
|
||||
rsp, err := core.HasPartition(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.Value, true)
|
||||
|
@ -521,7 +521,7 @@ func TestMasterService(t *testing.T) {
|
|||
CollectionName: "testColl",
|
||||
CollectionID: coll.ID,
|
||||
}
|
||||
rsp, err := core.ShowPartitions(req)
|
||||
rsp, err := core.ShowPartitions(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(rsp.PartitionNames), 2)
|
||||
|
@ -579,7 +579,7 @@ func TestMasterService(t *testing.T) {
|
|||
CollectionID: coll.ID,
|
||||
PartitionID: partID,
|
||||
}
|
||||
rsp, err := core.ShowSegments(req)
|
||||
rsp, err := core.ShowSegments(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, rsp.SegmentIDs[0], int64(1000))
|
||||
|
@ -608,7 +608,7 @@ func TestMasterService(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(collMeta.FieldIndexes), 0)
|
||||
|
||||
rsp, err := core.CreateIndex(req)
|
||||
rsp, err := core.CreateIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
time.Sleep(time.Second)
|
||||
|
@ -623,7 +623,7 @@ func TestMasterService(t *testing.T) {
|
|||
assert.Equal(t, idxMeta.IndexName, Params.DefaultIndexName)
|
||||
|
||||
req.FieldName = "no field"
|
||||
rsp, err = core.CreateIndex(req)
|
||||
rsp, err = core.CreateIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, rsp.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
})
|
||||
|
@ -642,7 +642,7 @@ func TestMasterService(t *testing.T) {
|
|||
CollectionID: coll.ID,
|
||||
SegmentID: 1000,
|
||||
}
|
||||
rsp, err := core.DescribeSegment(req)
|
||||
rsp, err := core.DescribeSegment(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
t.Logf("index id = %d", rsp.IndexID)
|
||||
|
@ -661,7 +661,7 @@ func TestMasterService(t *testing.T) {
|
|||
FieldName: "vector",
|
||||
IndexName: "",
|
||||
}
|
||||
rsp, err := core.DescribeIndex(req)
|
||||
rsp, err := core.DescribeIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(rsp.IndexDescriptions), 1)
|
||||
|
@ -738,7 +738,7 @@ func TestMasterService(t *testing.T) {
|
|||
FieldName: "vector",
|
||||
IndexName: "",
|
||||
}
|
||||
rsp, err := core.DescribeIndex(req)
|
||||
rsp, err := core.DescribeIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, len(rsp.IndexDescriptions), 1)
|
||||
|
@ -762,7 +762,7 @@ func TestMasterService(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(idx), 1)
|
||||
|
||||
rsp, err := core.DropIndex(req)
|
||||
rsp, err := core.DropIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
|
@ -791,7 +791,7 @@ func TestMasterService(t *testing.T) {
|
|||
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
|
||||
assert.Nil(t, err)
|
||||
dropPartID := collMeta.PartitionIDs[1]
|
||||
status, err := core.DropPartition(req)
|
||||
status, err := core.DropPartition(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
collMeta, err = core.MetaTable.GetCollectionByName("testColl")
|
||||
|
@ -823,7 +823,7 @@ func TestMasterService(t *testing.T) {
|
|||
}
|
||||
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
|
||||
assert.Nil(t, err)
|
||||
status, err := core.DropCollection(req)
|
||||
status, err := core.DropCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
|
@ -853,7 +853,7 @@ func TestMasterService(t *testing.T) {
|
|||
DbName: "testDb",
|
||||
CollectionName: "testColl",
|
||||
}
|
||||
status, err = core.DropCollection(req)
|
||||
status, err = core.DropCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
|
||||
time.Sleep(time.Second)
|
||||
|
|
|
@ -26,16 +26,16 @@ func (node *NodeImpl) UpdateStateCode(code internalpb2.StateCode) {
|
|||
|
||||
func (node *NodeImpl) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
collectionName := request.CollectionName
|
||||
globalMetaCache.RemoveCollection(collectionName) // no need to return error, though collection may be not cached
|
||||
globalMetaCache.RemoveCollection(ctx, collectionName) // no need to return error, though collection may be not cached
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) CreateCollection(request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("create collection: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
cct := &CreateCollectionTask{
|
||||
|
@ -65,9 +65,9 @@ func (node *NodeImpl) CreateCollection(request *milvuspb.CreateCollectionRequest
|
|||
return cct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DropCollection(request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("drop collection: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
dct := &DropCollectionTask{
|
||||
|
@ -96,9 +96,9 @@ func (node *NodeImpl) DropCollection(request *milvuspb.DropCollectionRequest) (*
|
|||
return dct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) HasCollection(request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
func (node *NodeImpl) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
log.Println("has collection: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
hct := &HasCollectionTask{
|
||||
|
@ -131,9 +131,9 @@ func (node *NodeImpl) HasCollection(request *milvuspb.HasCollectionRequest) (*mi
|
|||
return hct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) LoadCollection(request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("load collection: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
lct := &LoadCollectionTask{
|
||||
|
@ -162,9 +162,9 @@ func (node *NodeImpl) LoadCollection(request *milvuspb.LoadCollectionRequest) (*
|
|||
return lct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ReleaseCollection(request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("release collection: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
rct := &ReleaseCollectionTask{
|
||||
|
@ -193,9 +193,9 @@ func (node *NodeImpl) ReleaseCollection(request *milvuspb.ReleaseCollectionReque
|
|||
return rct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DescribeCollection(request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
func (node *NodeImpl) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
log.Println("describe collection: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
dct := &DescribeCollectionTask{
|
||||
|
@ -228,9 +228,9 @@ func (node *NodeImpl) DescribeCollection(request *milvuspb.DescribeCollectionReq
|
|||
return dct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetCollectionStatistics(request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
func (node *NodeImpl) GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
log.Println("get collection statistics")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
g := &GetCollectionsStatisticsTask{
|
||||
ctx: ctx,
|
||||
|
@ -262,9 +262,9 @@ func (node *NodeImpl) GetCollectionStatistics(request *milvuspb.CollectionStatsR
|
|||
return g.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ShowCollections(request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
func (node *NodeImpl) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
log.Println("show collections")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
sct := &ShowCollectionsTask{
|
||||
ctx: ctx,
|
||||
|
@ -296,9 +296,9 @@ func (node *NodeImpl) ShowCollections(request *milvuspb.ShowCollectionRequest) (
|
|||
return sct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) CreatePartition(request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("create partition", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
cpt := &CreatePartitionTask{
|
||||
ctx: ctx,
|
||||
|
@ -325,9 +325,9 @@ func (node *NodeImpl) CreatePartition(request *milvuspb.CreatePartitionRequest)
|
|||
return cpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DropPartition(request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("drop partition: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dpt := &DropPartitionTask{
|
||||
ctx: ctx,
|
||||
|
@ -355,9 +355,9 @@ func (node *NodeImpl) DropPartition(request *milvuspb.DropPartitionRequest) (*co
|
|||
return dpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) HasPartition(request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
func (node *NodeImpl) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
log.Println("has partition: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
hpt := &HasPartitionTask{
|
||||
ctx: ctx,
|
||||
|
@ -391,9 +391,9 @@ func (node *NodeImpl) HasPartition(request *milvuspb.HasPartitionRequest) (*milv
|
|||
return hpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) LoadPartitions(request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error) {
|
||||
log.Println("load partitions: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
lpt := &LoadPartitionTask{
|
||||
|
@ -422,9 +422,9 @@ func (node *NodeImpl) LoadPartitions(request *milvuspb.LoadPartitonRequest) (*co
|
|||
return lpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ReleasePartitions(request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("load partitions: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
rpt := &ReleasePartitionTask{
|
||||
|
@ -453,13 +453,13 @@ func (node *NodeImpl) ReleasePartitions(request *milvuspb.ReleasePartitionReques
|
|||
return rpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetPartitionStatistics(request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
|
||||
func (node *NodeImpl) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ShowPartitions(request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
func (node *NodeImpl) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
log.Println("show partitions: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
spt := &ShowPartitionsTask{
|
||||
ctx: ctx,
|
||||
|
@ -492,9 +492,9 @@ func (node *NodeImpl) ShowPartitions(request *milvuspb.ShowPartitionRequest) (*m
|
|||
return spt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) CreateIndex(request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
log.Println("create index for: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
cit := &CreateIndexTask{
|
||||
ctx: ctx,
|
||||
|
@ -522,9 +522,9 @@ func (node *NodeImpl) CreateIndex(request *milvuspb.CreateIndexRequest) (*common
|
|||
return cit.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DescribeIndex(request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
func (node *NodeImpl) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
log.Println("Describe index for: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dit := &DescribeIndexTask{
|
||||
ctx: ctx,
|
||||
|
@ -556,9 +556,9 @@ func (node *NodeImpl) DescribeIndex(request *milvuspb.DescribeIndexRequest) (*mi
|
|||
return dit.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DropIndex(request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
log.Println("Drop index for: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dit := &DropIndexTask{
|
||||
ctx: ctx,
|
||||
|
@ -583,9 +583,9 @@ func (node *NodeImpl) DropIndex(request *milvuspb.DropIndexRequest) (*commonpb.S
|
|||
return dit.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetIndexState(request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
func (node *NodeImpl) GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
// log.Println("Describe index progress for: ", request)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dipt := &GetIndexStateTask{
|
||||
ctx: ctx,
|
||||
|
@ -618,8 +618,8 @@ func (node *NodeImpl) GetIndexState(request *milvuspb.IndexStateRequest) (*milvu
|
|||
return dipt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
func (node *NodeImpl) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
it := &InsertTask{
|
||||
|
@ -670,8 +670,8 @@ func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertR
|
|||
return it.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
func (node *NodeImpl) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
qt := &SearchTask{
|
||||
|
@ -712,9 +712,9 @@ func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchR
|
|||
return qt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Flush(request *milvuspb.FlushRequest) (*commonpb.Status, error) {
|
||||
func (node *NodeImpl) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
|
||||
log.Println("AA Flush collections: ", request.CollectionNames)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
ctx, cancel := context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
ft := &FlushTask{
|
||||
ctx: ctx,
|
||||
|
@ -742,22 +742,22 @@ func (node *NodeImpl) Flush(request *milvuspb.FlushRequest) (*commonpb.Status, e
|
|||
return ft.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetDdChannel(request *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
func (node *NodeImpl) GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetPersistentSegmentInfo(req *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error) {
|
||||
func (node *NodeImpl) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error) {
|
||||
resp := &milvuspb.PersistentSegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
}
|
||||
segments, err := node.getSegmentsOfCollection(req.DbName, req.CollectionName)
|
||||
segments, err := node.getSegmentsOfCollection(ctx, req.DbName, req.CollectionName)
|
||||
if err != nil {
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
}
|
||||
infoResp, err := node.dataServiceClient.GetSegmentInfo(&datapb.SegmentInfoRequest{
|
||||
infoResp, err := node.dataServiceClient.GetSegmentInfo(ctx, &datapb.SegmentInfoRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kSegmentInfo,
|
||||
MsgID: 0,
|
||||
|
@ -793,18 +793,18 @@ func (node *NodeImpl) GetPersistentSegmentInfo(req *milvuspb.PersistentSegmentIn
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetQuerySegmentInfo(req *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error) {
|
||||
func (node *NodeImpl) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error) {
|
||||
resp := &milvuspb.QuerySegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
}
|
||||
segments, err := node.getSegmentsOfCollection(req.DbName, req.CollectionName)
|
||||
segments, err := node.getSegmentsOfCollection(ctx, req.DbName, req.CollectionName)
|
||||
if err != nil {
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
}
|
||||
infoResp, err := node.queryServiceClient.GetSegmentInfo(&querypb.SegmentInfoRequest{
|
||||
infoResp, err := node.queryServiceClient.GetSegmentInfo(ctx, &querypb.SegmentInfoRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kSegmentInfo,
|
||||
MsgID: 0,
|
||||
|
@ -838,8 +838,8 @@ func (node *NodeImpl) GetQuerySegmentInfo(req *milvuspb.QuerySegmentInfoRequest)
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) getSegmentsOfCollection(dbName string, collectionName string) ([]UniqueID, error) {
|
||||
describeCollectionResponse, err := node.masterClient.DescribeCollection(&milvuspb.DescribeCollectionRequest{
|
||||
func (node *NodeImpl) getSegmentsOfCollection(ctx context.Context, dbName string, collectionName string) ([]UniqueID, error) {
|
||||
describeCollectionResponse, err := node.masterClient.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
MsgID: 0,
|
||||
|
@ -856,7 +856,7 @@ func (node *NodeImpl) getSegmentsOfCollection(dbName string, collectionName stri
|
|||
return nil, errors.New(describeCollectionResponse.Status.Reason)
|
||||
}
|
||||
collectionID := describeCollectionResponse.CollectionID
|
||||
showPartitionsResp, err := node.masterClient.ShowPartitions(&milvuspb.ShowPartitionRequest{
|
||||
showPartitionsResp, err := node.masterClient.ShowPartitions(ctx, &milvuspb.ShowPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowPartitions,
|
||||
MsgID: 0,
|
||||
|
@ -876,7 +876,7 @@ func (node *NodeImpl) getSegmentsOfCollection(dbName string, collectionName stri
|
|||
|
||||
ret := make([]UniqueID, 0)
|
||||
for _, partitionID := range showPartitionsResp.PartitionIDs {
|
||||
showSegmentResponse, err := node.masterClient.ShowSegments(&milvuspb.ShowSegmentRequest{
|
||||
showSegmentResponse, err := node.masterClient.ShowSegments(ctx, &milvuspb.ShowSegmentRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowSegment,
|
||||
MsgID: 0,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
|
@ -11,57 +13,57 @@ import (
|
|||
)
|
||||
|
||||
type MasterClient interface {
|
||||
CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
DropIndex(in *milvuspb.DropIndexRequest) (*commonpb.Status, error)
|
||||
ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
|
||||
DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
|
||||
CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error)
|
||||
ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
|
||||
DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
|
||||
}
|
||||
|
||||
type IndexServiceClient interface {
|
||||
GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
}
|
||||
|
||||
type QueryServiceClient interface {
|
||||
ShowCollections(req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error)
|
||||
LoadCollection(req *querypb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ShowPartitions(req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error)
|
||||
LoadPartitions(req *querypb.LoadPartitionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
CreateQueryChannel() (*querypb.CreateQueryChannelResponse, error)
|
||||
GetPartitionStates(req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error)
|
||||
ShowCollections(ctx context.Context, req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error)
|
||||
LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ShowPartitions(ctx context.Context, req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error)
|
||||
LoadPartitions(ctx context.Context, req *querypb.LoadPartitionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error)
|
||||
GetPartitionStates(ctx context.Context, req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error)
|
||||
|
||||
//GetSearchChannelNames() ([]string, error)
|
||||
//GetSearchResultChannels() ([]string, error)
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetSegmentInfo(req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
|
||||
}
|
||||
|
||||
type DataServiceClient interface {
|
||||
AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
|
||||
GetInsertChannels(req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
|
||||
Flush(req *datapb.FlushRequest) (*commonpb.Status, error)
|
||||
GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error)
|
||||
AssignSegmentID(ctx context.Context, req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
|
||||
GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
|
||||
Flush(ctx context.Context, req *datapb.FlushRequest) (*commonpb.Status, error)
|
||||
GetCollectionStatistics(ctx context.Context, req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error)
|
||||
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetSegmentInfo(req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
GetSegmentInfo(ctx context.Context, req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error)
|
||||
}
|
||||
|
||||
type ProxyServiceClient interface {
|
||||
GetTimeTickChannel() (*milvuspb.StringResponse, error)
|
||||
RegisterNode(request *proxypb.RegisterNodeRequest) (*proxypb.RegisterNodeResponse, error)
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
RegisterNode(ctx context.Context, request *proxypb.RegisterNodeRequest) (*proxypb.RegisterNodeResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
}
|
||||
|
||||
type ProxyNode interface {
|
||||
|
@ -69,36 +71,36 @@ type ProxyNode interface {
|
|||
Start() error
|
||||
Stop() error
|
||||
|
||||
InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
|
||||
CreateCollection(request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(request *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
LoadCollection(request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
DescribeCollection(request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
GetCollectionStatistics(request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error)
|
||||
ShowCollections(request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error)
|
||||
ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
|
||||
CreatePartition(request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(request *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
LoadPartitions(request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
GetPartitionStatistics(request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error)
|
||||
ShowPartitions(request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error)
|
||||
ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
|
||||
CreateIndex(request *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
GetIndexState(request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error)
|
||||
DropIndex(request *milvuspb.DropIndexRequest) (*commonpb.Status, error)
|
||||
CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error)
|
||||
DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error)
|
||||
|
||||
Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error)
|
||||
Search(request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error)
|
||||
Flush(request *milvuspb.FlushRequest) (*commonpb.Status, error)
|
||||
Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error)
|
||||
Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error)
|
||||
Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error)
|
||||
|
||||
GetDdChannel(request *commonpb.Empty) (*milvuspb.StringResponse, error)
|
||||
GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error)
|
||||
|
||||
GetQuerySegmentInfo(req *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error)
|
||||
GetPersistentSegmentInfo(req *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error)
|
||||
GetQuerySegmentInfo(ctx context.Context, req *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error)
|
||||
GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
|
@ -11,16 +12,16 @@ import (
|
|||
)
|
||||
|
||||
type MasterClientInterface interface {
|
||||
DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
}
|
||||
|
||||
type Cache interface {
|
||||
GetCollectionID(collectionName string) (typeutil.UniqueID, error)
|
||||
GetPartitionID(collectionName string, partitionName string) (typeutil.UniqueID, error)
|
||||
GetCollectionSchema(collectionName string) (*schemapb.CollectionSchema, error)
|
||||
RemoveCollection(collectionName string)
|
||||
RemovePartition(collectionName string, partitionName string)
|
||||
GetCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error)
|
||||
GetPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error)
|
||||
GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error)
|
||||
RemoveCollection(ctx context.Context, collectionName string)
|
||||
RemovePartition(ctx context.Context, collectionName string, partitionName string)
|
||||
}
|
||||
|
||||
type collectionInfo struct {
|
||||
|
@ -54,7 +55,7 @@ func NewMetaCache(client MasterClientInterface) (*MetaCache, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *MetaCache) readCollectionID(collectionName string) (typeutil.UniqueID, error) {
|
||||
func (m *MetaCache) readCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
|
@ -65,7 +66,7 @@ func (m *MetaCache) readCollectionID(collectionName string) (typeutil.UniqueID,
|
|||
return collInfo.collID, nil
|
||||
}
|
||||
|
||||
func (m *MetaCache) readCollectionSchema(collectionName string) (*schemapb.CollectionSchema, error) {
|
||||
func (m *MetaCache) readCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
|
@ -76,7 +77,7 @@ func (m *MetaCache) readCollectionSchema(collectionName string) (*schemapb.Colle
|
|||
return collInfo.schema, nil
|
||||
}
|
||||
|
||||
func (m *MetaCache) readPartitionID(collectionName string, partitionName string) (typeutil.UniqueID, error) {
|
||||
func (m *MetaCache) readPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
|
@ -92,8 +93,8 @@ func (m *MetaCache) readPartitionID(collectionName string, partitionName string)
|
|||
return partitionID, nil
|
||||
}
|
||||
|
||||
func (m *MetaCache) GetCollectionID(collectionName string) (typeutil.UniqueID, error) {
|
||||
collID, err := m.readCollectionID(collectionName)
|
||||
func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error) {
|
||||
collID, err := m.readCollectionID(ctx, collectionName)
|
||||
if err == nil {
|
||||
return collID, nil
|
||||
}
|
||||
|
@ -106,7 +107,7 @@ func (m *MetaCache) GetCollectionID(collectionName string) (typeutil.UniqueID, e
|
|||
},
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
coll, err := m.client.DescribeCollection(req)
|
||||
coll, err := m.client.DescribeCollection(ctx, req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -123,8 +124,8 @@ func (m *MetaCache) GetCollectionID(collectionName string) (typeutil.UniqueID, e
|
|||
|
||||
return m.collInfo[collectionName].collID, nil
|
||||
}
|
||||
func (m *MetaCache) GetCollectionSchema(collectionName string) (*schemapb.CollectionSchema, error) {
|
||||
collSchema, err := m.readCollectionSchema(collectionName)
|
||||
func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) {
|
||||
collSchema, err := m.readCollectionSchema(ctx, collectionName)
|
||||
if err == nil {
|
||||
return collSchema, nil
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ func (m *MetaCache) GetCollectionSchema(collectionName string) (*schemapb.Collec
|
|||
},
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
coll, err := m.client.DescribeCollection(req)
|
||||
coll, err := m.client.DescribeCollection(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -155,8 +156,8 @@ func (m *MetaCache) GetCollectionSchema(collectionName string) (*schemapb.Collec
|
|||
return m.collInfo[collectionName].schema, nil
|
||||
}
|
||||
|
||||
func (m *MetaCache) GetPartitionID(collectionName string, partitionName string) (typeutil.UniqueID, error) {
|
||||
partitionID, err := m.readPartitionID(collectionName, partitionName)
|
||||
func (m *MetaCache) GetPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) {
|
||||
partitionID, err := m.readPartitionID(ctx, collectionName, partitionName)
|
||||
if err == nil {
|
||||
return partitionID, nil
|
||||
}
|
||||
|
@ -169,7 +170,7 @@ func (m *MetaCache) GetPartitionID(collectionName string, partitionName string)
|
|||
},
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
partitions, err := m.client.ShowPartitions(req)
|
||||
partitions, err := m.client.ShowPartitions(ctx, req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -206,13 +207,13 @@ func (m *MetaCache) GetPartitionID(collectionName string, partitionName string)
|
|||
return partInfo[partitionName], nil
|
||||
}
|
||||
|
||||
func (m *MetaCache) RemoveCollection(collectionName string) {
|
||||
func (m *MetaCache) RemoveCollection(ctx context.Context, collectionName string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
delete(m.collInfo, collectionName)
|
||||
}
|
||||
|
||||
func (m *MetaCache) RemovePartition(collectionName, partitionName string) {
|
||||
func (m *MetaCache) RemovePartition(ctx context.Context, collectionName, partitionName string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
_, ok := m.collInfo[collectionName]
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
type MockMasterClientInterface struct {
|
||||
}
|
||||
|
||||
func (m *MockMasterClientInterface) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
func (m *MockMasterClientInterface) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
if in.CollectionName == "collection1" {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -32,7 +33,7 @@ func (m *MockMasterClientInterface) ShowPartitions(in *milvuspb.ShowPartitionReq
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockMasterClientInterface) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
func (m *MockMasterClientInterface) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
if in.CollectionName == "collection1" {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -54,44 +55,46 @@ func (m *MockMasterClientInterface) DescribeCollection(in *milvuspb.DescribeColl
|
|||
}
|
||||
|
||||
func TestMetaCache_GetCollection(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := &MockMasterClientInterface{}
|
||||
err := InitMetaCache(client)
|
||||
assert.Nil(t, err)
|
||||
|
||||
id, err := globalMetaCache.GetCollectionID("collection1")
|
||||
id, err := globalMetaCache.GetCollectionID(ctx, "collection1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(1))
|
||||
schema, err := globalMetaCache.GetCollectionSchema("collection1")
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, schema, &schemapb.CollectionSchema{
|
||||
AutoID: true,
|
||||
})
|
||||
id, err = globalMetaCache.GetCollectionID("collection2")
|
||||
id, err = globalMetaCache.GetCollectionID(ctx, "collection2")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(0))
|
||||
schema, err = globalMetaCache.GetCollectionSchema("collection2")
|
||||
schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection2")
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, schema)
|
||||
}
|
||||
|
||||
func TestMetaCache_GetPartitionID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := &MockMasterClientInterface{}
|
||||
err := InitMetaCache(client)
|
||||
assert.Nil(t, err)
|
||||
|
||||
id, err := globalMetaCache.GetPartitionID("collection1", "par1")
|
||||
id, err := globalMetaCache.GetPartitionID(ctx, "collection1", "par1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(1))
|
||||
id, err = globalMetaCache.GetPartitionID("collection1", "par2")
|
||||
id, err = globalMetaCache.GetPartitionID(ctx, "collection1", "par2")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(2))
|
||||
id, err = globalMetaCache.GetPartitionID("collection1", "par3")
|
||||
id, err = globalMetaCache.GetPartitionID(ctx, "collection1", "par3")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(0))
|
||||
id, err = globalMetaCache.GetPartitionID("collection2", "par3")
|
||||
id, err = globalMetaCache.GetPartitionID(ctx, "collection2", "par3")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(0))
|
||||
id, err = globalMetaCache.GetPartitionID("collection2", "par4")
|
||||
id, err = globalMetaCache.GetPartitionID(ctx, "collection2", "par4")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, id, typeutil.UniqueID(0))
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package proxynode
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
@ -12,9 +11,6 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
|
@ -56,8 +52,6 @@ type NodeImpl struct {
|
|||
queryMsgStream msgstream.MsgStream
|
||||
msFactory msgstream.Factory
|
||||
|
||||
closer io.Closer
|
||||
|
||||
// Add callback functions at different stages
|
||||
startCallbacks []func()
|
||||
closeCallbacks []func()
|
||||
|
@ -77,13 +71,13 @@ func NewProxyNodeImpl(ctx context.Context, factory msgstream.Factory) (*NodeImpl
|
|||
}
|
||||
|
||||
type Component interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
}
|
||||
|
||||
func (node *NodeImpl) waitForServiceReady(service Component, serviceName string) error {
|
||||
func (node *NodeImpl) waitForServiceReady(ctx context.Context, service Component, serviceName string) error {
|
||||
|
||||
checkFunc := func() error {
|
||||
resp, err := service.GetComponentStates()
|
||||
resp, err := service.GetComponentStates(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -106,8 +100,9 @@ func (node *NodeImpl) waitForServiceReady(service Component, serviceName string)
|
|||
|
||||
func (node *NodeImpl) Init() error {
|
||||
// todo wait for proxyservice state changed to Healthy
|
||||
ctx := context.Background()
|
||||
|
||||
err := node.waitForServiceReady(node.proxyServiceClient, "ProxyService")
|
||||
err := node.waitForServiceReady(ctx, node.proxyServiceClient, "ProxyService")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -120,7 +115,7 @@ func (node *NodeImpl) Init() error {
|
|||
},
|
||||
}
|
||||
|
||||
response, err := node.proxyServiceClient.RegisterNode(request)
|
||||
response, err := node.proxyServiceClient.RegisterNode(ctx, request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -133,24 +128,9 @@ func (node *NodeImpl) Init() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: fmt.Sprintf("proxy_node_%d", Params.ProxyID),
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
tracer, closer, err := cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
node.closer = closer
|
||||
|
||||
// wait for dataservice state changed to Healthy
|
||||
if node.dataServiceClient != nil {
|
||||
err = node.waitForServiceReady(node.dataServiceClient, "DataService")
|
||||
err = node.waitForServiceReady(ctx, node.dataServiceClient, "DataService")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -158,7 +138,7 @@ func (node *NodeImpl) Init() error {
|
|||
|
||||
// wait for queryservice state changed to Healthy
|
||||
if node.queryServiceClient != nil {
|
||||
err = node.waitForServiceReady(node.queryServiceClient, "QueryService")
|
||||
err = node.waitForServiceReady(ctx, node.queryServiceClient, "QueryService")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -166,14 +146,14 @@ func (node *NodeImpl) Init() error {
|
|||
|
||||
// wait for indexservice state changed to Healthy
|
||||
if node.indexServiceClient != nil {
|
||||
err = node.waitForServiceReady(node.indexServiceClient, "IndexService")
|
||||
err = node.waitForServiceReady(ctx, node.indexServiceClient, "IndexService")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if node.queryServiceClient != nil {
|
||||
resp, err := node.queryServiceClient.CreateQueryChannel()
|
||||
resp, err := node.queryServiceClient.CreateQueryChannel(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -288,9 +268,6 @@ func (node *NodeImpl) Start() error {
|
|||
}
|
||||
|
||||
func (node *NodeImpl) Stop() error {
|
||||
if err := node.closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
node.cancel()
|
||||
|
||||
globalInsertChannelsMap.closeAllMsgStream()
|
||||
|
@ -304,13 +281,6 @@ func (node *NodeImpl) Stop() error {
|
|||
|
||||
node.wg.Wait()
|
||||
|
||||
if node.closer != nil {
|
||||
err := node.closer.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, cb := range node.closeCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ func (sa *SegIDAssigner) syncSegments() bool {
|
|||
return true
|
||||
}
|
||||
sa.reduceSegReqs()
|
||||
_, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
req := &datapb.AssignSegIDRequest{
|
||||
NodeID: sa.PeerID,
|
||||
|
@ -282,7 +282,7 @@ func (sa *SegIDAssigner) syncSegments() bool {
|
|||
}
|
||||
|
||||
sa.segReqs = []*datapb.SegIDRequest{}
|
||||
resp, err := sa.serviceClient.AssignSegmentID(req)
|
||||
resp, err := sa.serviceClient.AssignSegmentID(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
log.Println("GRPC AssignSegmentID Failed", resp, err)
|
||||
|
|
|
@ -133,24 +133,24 @@ func (it *InsertTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (it *InsertTask) Execute(ctx context.Context) error {
|
||||
collectionName := it.BaseInsertTask.CollectionName
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(collectionName)
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(ctx, collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
autoID := collSchema.AutoID
|
||||
collID, err := globalMetaCache.GetCollectionID(collectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it.CollectionID = collID
|
||||
var partitionID UniqueID
|
||||
if len(it.PartitionName) > 0 {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(collectionName, it.PartitionName)
|
||||
partitionID, err = globalMetaCache.GetPartitionID(ctx, collectionName, it.PartitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(collectionName, Params.DefaultPartitionTag)
|
||||
partitionID, err = globalMetaCache.GetPartitionID(ctx, collectionName, Params.DefaultPartitionTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func (it *InsertTask) Execute(ctx context.Context) error {
|
|||
|
||||
stream, err := globalInsertChannelsMap.getInsertMsgStream(collID)
|
||||
if err != nil {
|
||||
resp, _ := it.dataServiceClient.GetInsertChannels(&datapb.InsertChannelRequest{
|
||||
resp, _ := it.dataServiceClient.GetInsertChannels(ctx, &datapb.InsertChannelRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert, // todo
|
||||
MsgID: it.Base.MsgID, // todo
|
||||
|
@ -351,16 +351,16 @@ func (cct *CreateCollectionTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (cct *CreateCollectionTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
cct.result, err = cct.masterClient.CreateCollection(cct.CreateCollectionRequest)
|
||||
cct.result, err = cct.masterClient.CreateCollection(ctx, cct.CreateCollectionRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cct.result.ErrorCode == commonpb.ErrorCode_SUCCESS {
|
||||
collID, err := globalMetaCache.GetCollectionID(cct.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, cct.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, _ := cct.dataServiceClient.GetInsertChannels(&datapb.InsertChannelRequest{
|
||||
resp, _ := cct.dataServiceClient.GetInsertChannels(ctx, &datapb.InsertChannelRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert, // todo
|
||||
MsgID: cct.Base.MsgID, // todo
|
||||
|
@ -444,12 +444,12 @@ func (dct *DropCollectionTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (dct *DropCollectionTask) Execute(ctx context.Context) error {
|
||||
collID, err := globalMetaCache.GetCollectionID(dct.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, dct.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dct.result, err = dct.masterClient.DropCollection(dct.DropCollectionRequest)
|
||||
dct.result, err = dct.masterClient.DropCollection(ctx, dct.DropCollectionRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -463,7 +463,7 @@ func (dct *DropCollectionTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (dct *DropCollectionTask) PostExecute(ctx context.Context) error {
|
||||
globalMetaCache.RemoveCollection(dct.CollectionName)
|
||||
globalMetaCache.RemoveCollection(ctx, dct.CollectionName)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -518,7 +518,7 @@ func (st *SearchTask) PreExecute(ctx context.Context) error {
|
|||
st.Base.SourceID = Params.ProxyID
|
||||
|
||||
collectionName := st.query.CollectionName
|
||||
_, err := globalMetaCache.GetCollectionID(collectionName)
|
||||
_, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
if err != nil { // err is not nil if collection not exists
|
||||
return err
|
||||
}
|
||||
|
@ -543,14 +543,14 @@ func (st *SearchTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
st.ResultChannelID = Params.SearchResultChannelNames[0]
|
||||
st.DbID = 0 // todo
|
||||
collectionID, err := globalMetaCache.GetCollectionID(collectionName)
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
if err != nil { // err is not nil if collection not exists
|
||||
return err
|
||||
}
|
||||
st.CollectionID = collectionID
|
||||
st.PartitionIDs = make([]UniqueID, 0)
|
||||
for _, partitionName := range st.query.PartitionNames {
|
||||
partitionID, err := globalMetaCache.GetPartitionID(collectionName, partitionName)
|
||||
partitionID, err := globalMetaCache.GetPartitionID(ctx, collectionName, partitionName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -795,7 +795,7 @@ func (hct *HasCollectionTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (hct *HasCollectionTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
hct.result, err = hct.masterClient.HasCollection(hct.HasCollectionRequest)
|
||||
hct.result, err = hct.masterClient.HasCollection(ctx, hct.HasCollectionRequest)
|
||||
if hct.result == nil {
|
||||
return errors.New("has collection resp is nil")
|
||||
}
|
||||
|
@ -866,7 +866,7 @@ func (dct *DescribeCollectionTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (dct *DescribeCollectionTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
dct.result, err = dct.masterClient.DescribeCollection(dct.DescribeCollectionRequest)
|
||||
dct.result, err = dct.masterClient.DescribeCollection(ctx, dct.DescribeCollectionRequest)
|
||||
if dct.result == nil {
|
||||
return errors.New("has collection resp is nil")
|
||||
}
|
||||
|
@ -932,7 +932,7 @@ func (g *GetCollectionsStatisticsTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (g *GetCollectionsStatisticsTask) Execute(ctx context.Context) error {
|
||||
collID, err := globalMetaCache.GetCollectionID(g.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, g.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -946,7 +946,7 @@ func (g *GetCollectionsStatisticsTask) Execute(ctx context.Context) error {
|
|||
CollectionID: collID,
|
||||
}
|
||||
|
||||
result, _ := g.dataServiceClient.GetCollectionStatistics(req)
|
||||
result, _ := g.dataServiceClient.GetCollectionStatistics(ctx, req)
|
||||
if result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1021,7 +1021,7 @@ func (sct *ShowCollectionsTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (sct *ShowCollectionsTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
sct.result, err = sct.masterClient.ShowCollections(sct.ShowCollectionRequest)
|
||||
sct.result, err = sct.masterClient.ShowCollections(ctx, sct.ShowCollectionRequest)
|
||||
if sct.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1098,7 +1098,7 @@ func (cpt *CreatePartitionTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (cpt *CreatePartitionTask) Execute(ctx context.Context) (err error) {
|
||||
cpt.result, err = cpt.masterClient.CreatePartition(cpt.CreatePartitionRequest)
|
||||
cpt.result, err = cpt.masterClient.CreatePartition(ctx, cpt.CreatePartitionRequest)
|
||||
if cpt.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1175,7 +1175,7 @@ func (dpt *DropPartitionTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (dpt *DropPartitionTask) Execute(ctx context.Context) (err error) {
|
||||
dpt.result, err = dpt.masterClient.DropPartition(dpt.DropPartitionRequest)
|
||||
dpt.result, err = dpt.masterClient.DropPartition(ctx, dpt.DropPartitionRequest)
|
||||
if dpt.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1251,7 +1251,7 @@ func (hpt *HasPartitionTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (hpt *HasPartitionTask) Execute(ctx context.Context) (err error) {
|
||||
hpt.result, err = hpt.masterClient.HasPartition(hpt.HasPartitionRequest)
|
||||
hpt.result, err = hpt.masterClient.HasPartition(ctx, hpt.HasPartitionRequest)
|
||||
if hpt.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1322,7 +1322,7 @@ func (spt *ShowPartitionsTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (spt *ShowPartitionsTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
spt.result, err = spt.masterClient.ShowPartitions(spt.ShowPartitionRequest)
|
||||
spt.result, err = spt.masterClient.ShowPartitions(ctx, spt.ShowPartitionRequest)
|
||||
if spt.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1400,7 +1400,7 @@ func (cit *CreateIndexTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (cit *CreateIndexTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
cit.result, err = cit.masterClient.CreateIndex(cit.CreateIndexRequest)
|
||||
cit.result, err = cit.masterClient.CreateIndex(ctx, cit.CreateIndexRequest)
|
||||
if cit.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
}
|
||||
|
@ -1483,7 +1483,7 @@ func (dit *DescribeIndexTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (dit *DescribeIndexTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
dit.result, err = dit.masterClient.DescribeIndex(dit.DescribeIndexRequest)
|
||||
dit.result, err = dit.masterClient.DescribeIndex(ctx, dit.DescribeIndexRequest)
|
||||
log.Println("YYYYY:", dit.result)
|
||||
if dit.result == nil {
|
||||
return errors.New("get collection statistics resp is nil")
|
||||
|
@ -1562,7 +1562,7 @@ func (dit *DropIndexTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (dit *DropIndexTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
dit.result, err = dit.masterClient.DropIndex(dit.DropIndexRequest)
|
||||
dit.result, err = dit.masterClient.DropIndex(ctx, dit.DropIndexRequest)
|
||||
if dit.result == nil {
|
||||
return errors.New("drop index resp is nil")
|
||||
}
|
||||
|
@ -1641,7 +1641,7 @@ func (gist *GetIndexStateTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
|
||||
collectionName := gist.CollectionName
|
||||
collectionID, err := globalMetaCache.GetCollectionID(collectionName)
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
if err != nil { // err is not nil if collection not exists
|
||||
return err
|
||||
}
|
||||
|
@ -1657,7 +1657,7 @@ func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
|
|||
CollectionName: collectionName,
|
||||
CollectionID: collectionID,
|
||||
}
|
||||
partitions, err := gist.masterClient.ShowPartitions(showPartitionRequest)
|
||||
partitions, err := gist.masterClient.ShowPartitions(ctx, showPartitionRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1679,7 +1679,7 @@ func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
|
|||
IndexName: gist.IndexName,
|
||||
}
|
||||
|
||||
indexDescriptionResp, err2 := gist.masterClient.DescribeIndex(&describeIndexReq)
|
||||
indexDescriptionResp, err2 := gist.masterClient.DescribeIndex(ctx, &describeIndexReq)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
@ -1709,7 +1709,7 @@ func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
|
|||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
}
|
||||
segments, err := gist.masterClient.ShowSegments(showSegmentsRequest)
|
||||
segments, err := gist.masterClient.ShowSegments(ctx, showSegmentsRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1734,7 +1734,7 @@ func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
|
|||
CollectionID: collectionID,
|
||||
SegmentID: segmentID,
|
||||
}
|
||||
segmentDesc, err := gist.masterClient.DescribeSegment(describeSegmentRequest)
|
||||
segmentDesc, err := gist.masterClient.DescribeSegment(ctx, describeSegmentRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1755,7 +1755,7 @@ func (gist *GetIndexStateTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
states, err := gist.indexServiceClient.GetIndexStates(getIndexStatesRequest)
|
||||
states, err := gist.indexServiceClient.GetIndexStates(ctx, getIndexStatesRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1848,7 +1848,7 @@ func (ft *FlushTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (ft *FlushTask) Execute(ctx context.Context) error {
|
||||
for _, collName := range ft.CollectionNames {
|
||||
collID, err := globalMetaCache.GetCollectionID(collName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1863,7 +1863,7 @@ func (ft *FlushTask) Execute(ctx context.Context) error {
|
|||
CollectionID: collID,
|
||||
}
|
||||
var status *commonpb.Status
|
||||
status, _ = ft.dataServiceClient.Flush(flushReq)
|
||||
status, _ = ft.dataServiceClient.Flush(ctx, flushReq)
|
||||
if status == nil {
|
||||
return errors.New("flush resp is nil")
|
||||
}
|
||||
|
@ -1940,11 +1940,11 @@ func (lct *LoadCollectionTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (lct *LoadCollectionTask) Execute(ctx context.Context) (err error) {
|
||||
collID, err := globalMetaCache.GetCollectionID(lct.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, lct.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(lct.CollectionName)
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(ctx, lct.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1960,7 +1960,7 @@ func (lct *LoadCollectionTask) Execute(ctx context.Context) (err error) {
|
|||
CollectionID: collID,
|
||||
Schema: collSchema,
|
||||
}
|
||||
lct.result, err = lct.queryserviceClient.LoadCollection(request)
|
||||
lct.result, err = lct.queryserviceClient.LoadCollection(ctx, request)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -2027,7 +2027,7 @@ func (rct *ReleaseCollectionTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (rct *ReleaseCollectionTask) Execute(ctx context.Context) (err error) {
|
||||
collID, err := globalMetaCache.GetCollectionID(rct.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, rct.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2041,7 +2041,7 @@ func (rct *ReleaseCollectionTask) Execute(ctx context.Context) (err error) {
|
|||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
}
|
||||
rct.result, err = rct.queryserviceClient.ReleaseCollection(request)
|
||||
rct.result, err = rct.queryserviceClient.ReleaseCollection(ctx, request)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -2105,16 +2105,16 @@ func (lpt *LoadPartitionTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (lpt *LoadPartitionTask) Execute(ctx context.Context) error {
|
||||
var partitionIDs []int64
|
||||
collID, err := globalMetaCache.GetCollectionID(lpt.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, lpt.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(lpt.CollectionName)
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(ctx, lpt.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, partitionName := range lpt.PartitionNames {
|
||||
partitionID, err := globalMetaCache.GetPartitionID(lpt.CollectionName, partitionName)
|
||||
partitionID, err := globalMetaCache.GetPartitionID(ctx, lpt.CollectionName, partitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2132,7 +2132,7 @@ func (lpt *LoadPartitionTask) Execute(ctx context.Context) error {
|
|||
PartitionIDs: partitionIDs,
|
||||
Schema: collSchema,
|
||||
}
|
||||
lpt.result, err = lpt.queryserviceClient.LoadPartitions(request)
|
||||
lpt.result, err = lpt.queryserviceClient.LoadPartitions(ctx, request)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -2200,12 +2200,12 @@ func (rpt *ReleasePartitionTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
func (rpt *ReleasePartitionTask) Execute(ctx context.Context) (err error) {
|
||||
var partitionIDs []int64
|
||||
collID, err := globalMetaCache.GetCollectionID(rpt.CollectionName)
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, rpt.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, partitionName := range rpt.PartitionNames {
|
||||
partitionID, err := globalMetaCache.GetPartitionID(rpt.CollectionName, partitionName)
|
||||
partitionID, err := globalMetaCache.GetPartitionID(ctx, rpt.CollectionName, partitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2222,7 +2222,7 @@ func (rpt *ReleasePartitionTask) Execute(ctx context.Context) (err error) {
|
|||
CollectionID: collID,
|
||||
PartitionIDs: partitionIDs,
|
||||
}
|
||||
rpt.result, err = rpt.queryserviceClient.ReleasePartitions(request)
|
||||
rpt.result, err = rpt.queryserviceClient.ReleasePartitions(ctx, request)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ type NodeClient interface {
|
|||
Start() error
|
||||
Stop() error
|
||||
|
||||
InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type GlobalNodeInfoTable struct {
|
||||
|
|
|
@ -192,7 +192,7 @@ func (t *InvalidateCollectionMetaCacheTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
for _, c := range clients {
|
||||
status, _ := c.InvalidateCollectionMetaCache(t.request)
|
||||
status, _ := c.InvalidateCollectionMetaCache(ctx, t.request)
|
||||
if status == nil {
|
||||
return errors.New("invalidate collection meta cache error")
|
||||
}
|
||||
|
|
|
@ -304,6 +304,7 @@ func (loader *indexLoader) checkIndexReady(indexParams indexParam, l *loadIndex)
|
|||
}
|
||||
|
||||
func (loader *indexLoader) getIndexInfo(collectionID UniqueID, segmentID UniqueID) (UniqueID, UniqueID, error) {
|
||||
ctx := context.TODO()
|
||||
req := &milvuspb.DescribeSegmentRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeSegment,
|
||||
|
@ -311,7 +312,7 @@ func (loader *indexLoader) getIndexInfo(collectionID UniqueID, segmentID UniqueI
|
|||
CollectionID: collectionID,
|
||||
SegmentID: segmentID,
|
||||
}
|
||||
response, err := loader.masterClient.DescribeSegment(req)
|
||||
response, err := loader.masterClient.DescribeSegment(ctx, req)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
@ -322,6 +323,7 @@ func (loader *indexLoader) getIndexInfo(collectionID UniqueID, segmentID UniqueI
|
|||
}
|
||||
|
||||
func (loader *indexLoader) getIndexPaths(indexBuildID UniqueID) ([]string, error) {
|
||||
ctx := context.TODO()
|
||||
if loader.indexClient == nil {
|
||||
return nil, errors.New("null index service client")
|
||||
}
|
||||
|
@ -329,7 +331,7 @@ func (loader *indexLoader) getIndexPaths(indexBuildID UniqueID) ([]string, error
|
|||
indexFilePathRequest := &indexpb.IndexFilePathsRequest{
|
||||
IndexBuildIDs: []UniqueID{indexBuildID},
|
||||
}
|
||||
pathResponse, err := loader.indexClient.GetIndexFilePaths(indexFilePathRequest)
|
||||
pathResponse, err := loader.indexClient.GetIndexFilePaths(ctx, indexFilePathRequest)
|
||||
if err != nil || pathResponse.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -15,9 +15,6 @@ import "C"
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"io"
|
||||
"log"
|
||||
"sync/atomic"
|
||||
|
||||
|
@ -34,14 +31,14 @@ import (
|
|||
type Node interface {
|
||||
typeutil.Component
|
||||
|
||||
AddQueryChannel(in *queryPb.AddQueryChannelsRequest) (*commonpb.Status, error)
|
||||
RemoveQueryChannel(in *queryPb.RemoveQueryChannelsRequest) (*commonpb.Status, error)
|
||||
WatchDmChannels(in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error)
|
||||
LoadSegments(in *queryPb.LoadSegmentRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(in *queryPb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(in *queryPb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
ReleaseSegments(in *queryPb.ReleaseSegmentRequest) (*commonpb.Status, error)
|
||||
GetSegmentInfo(in *queryPb.SegmentInfoRequest) (*queryPb.SegmentInfoResponse, error)
|
||||
AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelsRequest) (*commonpb.Status, error)
|
||||
RemoveQueryChannel(ctx context.Context, in *queryPb.RemoveQueryChannelsRequest) (*commonpb.Status, error)
|
||||
WatchDmChannels(ctx context.Context, in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error)
|
||||
LoadSegments(ctx context.Context, in *queryPb.LoadSegmentRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, in *queryPb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, in *queryPb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
ReleaseSegments(ctx context.Context, in *queryPb.ReleaseSegmentRequest) (*commonpb.Status, error)
|
||||
GetSegmentInfo(ctx context.Context, in *queryPb.SegmentInfoRequest) (*queryPb.SegmentInfoResponse, error)
|
||||
}
|
||||
|
||||
type QueryService = typeutil.QueryServiceInterface
|
||||
|
@ -64,9 +61,6 @@ type QueryNode struct {
|
|||
loadService *loadService
|
||||
statsService *statsService
|
||||
|
||||
//opentracing
|
||||
closer io.Closer
|
||||
|
||||
// clients
|
||||
masterClient MasterServiceInterface
|
||||
queryClient QueryServiceInterface
|
||||
|
@ -117,6 +111,7 @@ func NewQueryNodeWithoutID(ctx context.Context, factory msgstream.Factory) *Quer
|
|||
}
|
||||
|
||||
func (node *QueryNode) Init() error {
|
||||
ctx := context.Background()
|
||||
registerReq := &queryPb.RegisterNodeRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kNone,
|
||||
|
@ -128,7 +123,7 @@ func (node *QueryNode) Init() error {
|
|||
},
|
||||
}
|
||||
|
||||
resp, err := node.queryClient.RegisterNode(registerReq)
|
||||
resp, err := node.queryClient.RegisterNode(ctx, registerReq)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -153,20 +148,6 @@ func (node *QueryNode) Init() error {
|
|||
|
||||
fmt.Println("QueryNodeID is", Params.QueryNodeID)
|
||||
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: fmt.Sprintf("query_node_%d", node.QueryNodeID),
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
tracer, closer, err := cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
node.closer = closer
|
||||
|
||||
if node.masterClient == nil {
|
||||
log.Println("WARN: null master service detected")
|
||||
}
|
||||
|
@ -231,9 +212,6 @@ func (node *QueryNode) Stop() error {
|
|||
if node.statsService != nil {
|
||||
node.statsService.close()
|
||||
}
|
||||
if node.closer != nil {
|
||||
node.closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -189,7 +189,7 @@ func refreshChannelNames() {
|
|||
Params.StatsChannelName = Params.StatsChannelName + suffix
|
||||
}
|
||||
|
||||
func (q *queryServiceMock) RegisterNode(req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
func (q *queryServiceMock) RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
return &querypb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
|
|
@ -40,6 +40,7 @@ func (loader *segmentLoader) seekSegment(position *internalpb2.MsgPosition) erro
|
|||
}
|
||||
|
||||
func (loader *segmentLoader) getInsertBinlogPaths(segmentID UniqueID) ([]*internalpb2.StringList, []int64, error) {
|
||||
ctx := context.TODO()
|
||||
if loader.dataClient == nil {
|
||||
return nil, nil, errors.New("null data service client")
|
||||
}
|
||||
|
@ -48,7 +49,7 @@ func (loader *segmentLoader) getInsertBinlogPaths(segmentID UniqueID) ([]*intern
|
|||
SegmentID: segmentID,
|
||||
}
|
||||
|
||||
pathResponse, err := loader.dataClient.GetInsertBinlogPaths(insertBinlogPathRequest)
|
||||
pathResponse, err := loader.dataClient.GetInsertBinlogPaths(ctx, insertBinlogPathRequest)
|
||||
if err != nil || pathResponse.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -61,6 +62,7 @@ func (loader *segmentLoader) getInsertBinlogPaths(segmentID UniqueID) ([]*intern
|
|||
}
|
||||
|
||||
func (loader *segmentLoader) GetSegmentStates(segmentID UniqueID) (*datapb.SegmentStatesResponse, error) {
|
||||
ctx := context.TODO()
|
||||
if loader.dataClient == nil {
|
||||
return nil, errors.New("null data service client")
|
||||
}
|
||||
|
@ -68,7 +70,7 @@ func (loader *segmentLoader) GetSegmentStates(segmentID UniqueID) (*datapb.Segme
|
|||
segmentStatesRequest := &datapb.SegmentStatesRequest{
|
||||
SegmentIDs: []int64{segmentID},
|
||||
}
|
||||
statesResponse, err := loader.dataClient.GetSegmentStates(segmentStatesRequest)
|
||||
statesResponse, err := loader.dataClient.GetSegmentStates(ctx, segmentStatesRequest)
|
||||
if err != nil || statesResponse.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
@ -22,18 +24,18 @@ type TimeRange struct {
|
|||
}
|
||||
|
||||
type MasterServiceInterface interface {
|
||||
DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
|
||||
DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
|
||||
}
|
||||
|
||||
type QueryServiceInterface interface {
|
||||
RegisterNode(req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error)
|
||||
RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error)
|
||||
}
|
||||
|
||||
type DataServiceInterface interface {
|
||||
GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
|
||||
GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
|
||||
GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
|
||||
GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
|
||||
}
|
||||
|
||||
type IndexServiceInterface interface {
|
||||
GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
|
||||
GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package queryservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
|
@ -43,7 +44,7 @@ func NewMasterMock() *MasterMock {
|
|||
}
|
||||
}
|
||||
|
||||
func (master *MasterMock) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
func (master *MasterMock) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
collectionID := in.CollectionID
|
||||
partitionIDs := make([]UniqueID, 0)
|
||||
for _, id := range master.CollectionIDs {
|
||||
|
@ -62,7 +63,7 @@ func (master *MasterMock) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*mi
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (master *MasterMock) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
func (master *MasterMock) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
collectionID := in.CollectionID
|
||||
partitionID := in.PartitionID
|
||||
|
||||
|
@ -122,7 +123,7 @@ func NewDataMock() *DataMock {
|
|||
}
|
||||
}
|
||||
|
||||
func (data *DataMock) GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
func (data *DataMock) GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
|
||||
ret := &datapb.SegmentStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
@ -142,7 +143,7 @@ func (data *DataMock) GetSegmentStates(req *datapb.SegmentStatesRequest) (*datap
|
|||
|
||||
return ret, nil
|
||||
}
|
||||
func (data *DataMock) GetInsertChannels(req *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
func (data *DataMock) GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error) {
|
||||
return &internalpb2.StringList{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package queryservice
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/querypb"
|
||||
|
@ -12,36 +14,36 @@ type queryNodeInfo struct {
|
|||
dmChannelNames []string
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return qn.client.GetComponentStates()
|
||||
func (qn *queryNodeInfo) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
return qn.client.GetComponentStates(ctx)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) LoadSegments(in *querypb.LoadSegmentRequest) (*commonpb.Status, error) {
|
||||
return qn.client.LoadSegments(in)
|
||||
func (qn *queryNodeInfo) LoadSegments(ctx context.Context, in *querypb.LoadSegmentRequest) (*commonpb.Status, error) {
|
||||
return qn.client.LoadSegments(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) GetSegmentInfo(in *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return qn.client.GetSegmentInfo(in)
|
||||
func (qn *queryNodeInfo) GetSegmentInfo(ctx context.Context, in *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
return qn.client.GetSegmentInfo(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) WatchDmChannels(in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.WatchDmChannels(in)
|
||||
func (qn *queryNodeInfo) WatchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.WatchDmChannels(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) AddDmChannels(channels []string) {
|
||||
qn.dmChannelNames = append(qn.dmChannelNames, channels...)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) AddQueryChannel(in *querypb.AddQueryChannelsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.AddQueryChannel(in)
|
||||
func (qn *queryNodeInfo) AddQueryChannel(ctx context.Context, in *querypb.AddQueryChannelsRequest) (*commonpb.Status, error) {
|
||||
return qn.client.AddQueryChannel(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) ReleaseCollection(in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return qn.client.ReleaseCollection(in)
|
||||
func (qn *queryNodeInfo) ReleaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return qn.client.ReleaseCollection(ctx, in)
|
||||
}
|
||||
|
||||
func (qn *queryNodeInfo) ReleasePartitions(in *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return qn.client.ReleasePartitions(in)
|
||||
func (qn *queryNodeInfo) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return qn.client.ReleasePartitions(ctx, in)
|
||||
}
|
||||
|
||||
func newQueryNodeInfo(client QueryNodeInterface) *queryNodeInfo {
|
||||
|
|
|
@ -24,26 +24,26 @@ import (
|
|||
)
|
||||
|
||||
type MasterServiceInterface interface {
|
||||
ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
|
||||
ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
|
||||
}
|
||||
|
||||
type DataServiceInterface interface {
|
||||
GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
|
||||
GetInsertChannels(req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
|
||||
GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
|
||||
GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
|
||||
}
|
||||
|
||||
type QueryNodeInterface interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
|
||||
AddQueryChannel(in *querypb.AddQueryChannelsRequest) (*commonpb.Status, error)
|
||||
RemoveQueryChannel(in *querypb.RemoveQueryChannelsRequest) (*commonpb.Status, error)
|
||||
WatchDmChannels(in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error)
|
||||
LoadSegments(in *querypb.LoadSegmentRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
ReleaseSegments(in *querypb.ReleaseSegmentRequest) (*commonpb.Status, error)
|
||||
GetSegmentInfo(req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
|
||||
AddQueryChannel(ctx context.Context, in *querypb.AddQueryChannelsRequest) (*commonpb.Status, error)
|
||||
RemoveQueryChannel(ctx context.Context, in *querypb.RemoveQueryChannelsRequest) (*commonpb.Status, error)
|
||||
WatchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error)
|
||||
LoadSegments(ctx context.Context, in *querypb.LoadSegmentRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
ReleaseSegments(ctx context.Context, in *querypb.ReleaseSegmentRequest) (*commonpb.Status, error)
|
||||
GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
|
||||
}
|
||||
|
||||
type queryChannelInfo struct {
|
||||
|
@ -95,14 +95,14 @@ func (qs *QueryService) UpdateStateCode(code internalpb2.StateCode) {
|
|||
qs.stateCode.Store(code)
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (qs *QueryService) GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error) {
|
||||
serviceComponentInfo := &internalpb2.ComponentInfo{
|
||||
NodeID: Params.QueryServiceID,
|
||||
StateCode: qs.stateCode.Load().(internalpb2.StateCode),
|
||||
}
|
||||
subComponentInfos := make([]*internalpb2.ComponentInfo, 0)
|
||||
for nodeID, node := range qs.queryNodes {
|
||||
componentStates, err := node.GetComponentStates()
|
||||
componentStates, err := node.GetComponentStates(ctx)
|
||||
if err != nil {
|
||||
subComponentInfos = append(subComponentInfos, &internalpb2.ComponentInfo{
|
||||
NodeID: nodeID,
|
||||
|
@ -121,15 +121,27 @@ func (qs *QueryService) GetComponentStates() (*internalpb2.ComponentStates, erro
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetTimeTickChannel() (string, error) {
|
||||
return Params.TimeTickChannelName, nil
|
||||
func (qs *QueryService) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: Params.TimeTickChannelName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetStatisticsChannel() (string, error) {
|
||||
return Params.StatsChannelName, nil
|
||||
func (qs *QueryService) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
Value: Params.StatsChannelName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) RegisterNode(req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
func (qs *QueryService) RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error) {
|
||||
fmt.Println("register query node =", req.Address)
|
||||
// TODO:: add mutex
|
||||
nodeID := req.Base.SourceID
|
||||
|
@ -187,7 +199,7 @@ func (qs *QueryService) RegisterNode(req *querypb.RegisterNodeRequest) (*querypb
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) ShowCollections(req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error) {
|
||||
func (qs *QueryService) ShowCollections(ctx context.Context, req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error) {
|
||||
dbID := req.DbID
|
||||
fmt.Println("show collection start, dbID = ", dbID)
|
||||
collections, err := qs.replica.getCollections(dbID)
|
||||
|
@ -212,7 +224,7 @@ func (qs *QueryService) ShowCollections(req *querypb.ShowCollectionRequest) (*qu
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) LoadCollection(req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
func (qs *QueryService) LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
schema := req.Schema
|
||||
|
@ -253,7 +265,7 @@ func (qs *QueryService) LoadCollection(req *querypb.LoadCollectionRequest) (*com
|
|||
CollectionID: collectionID,
|
||||
}
|
||||
|
||||
showPartitionResponse, err := qs.masterServiceClient.ShowPartitions(showPartitionRequest)
|
||||
showPartitionResponse, err := qs.masterServiceClient.ShowPartitions(ctx, showPartitionRequest)
|
||||
if err != nil {
|
||||
return fn(err), err
|
||||
}
|
||||
|
@ -270,13 +282,13 @@ func (qs *QueryService) LoadCollection(req *querypb.LoadCollectionRequest) (*com
|
|||
Schema: schema,
|
||||
}
|
||||
|
||||
status, err := qs.LoadPartitions(loadPartitionsRequest)
|
||||
status, err := qs.LoadPartitions(ctx, loadPartitionsRequest)
|
||||
|
||||
fmt.Println("load collection end, collectionID = ", collectionID)
|
||||
return status, err
|
||||
}
|
||||
|
||||
func (qs *QueryService) ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
func (qs *QueryService) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
fmt.Println("release collection start, collectionID = ", collectionID)
|
||||
|
@ -289,7 +301,7 @@ func (qs *QueryService) ReleaseCollection(req *querypb.ReleaseCollectionRequest)
|
|||
}
|
||||
|
||||
for nodeID, node := range qs.queryNodes {
|
||||
status, err := node.ReleaseCollection(req)
|
||||
status, err := node.ReleaseCollection(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Println("release collection end, node ", nodeID, " occur error")
|
||||
return status, err
|
||||
|
@ -298,7 +310,6 @@ func (qs *QueryService) ReleaseCollection(req *querypb.ReleaseCollectionRequest)
|
|||
|
||||
err = qs.replica.releaseCollection(dbID, collectionID)
|
||||
if err != nil {
|
||||
fmt.Println("release collection end, query service release replica error")
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
|
@ -312,7 +323,7 @@ func (qs *QueryService) ReleaseCollection(req *querypb.ReleaseCollectionRequest)
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) ShowPartitions(req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error) {
|
||||
func (qs *QueryService) ShowPartitions(ctx context.Context, req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error) {
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
partitions, err := qs.replica.getPartitions(dbID, collectionID)
|
||||
|
@ -336,7 +347,7 @@ func (qs *QueryService) ShowPartitions(req *querypb.ShowPartitionRequest) (*quer
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) LoadPartitions(req *querypb.LoadPartitionRequest) (*commonpb.Status, error) {
|
||||
func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionRequest) (*commonpb.Status, error) {
|
||||
//TODO::suggest different partitions have different dm channel
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
|
@ -390,7 +401,7 @@ func (qs *QueryService) LoadPartitions(req *querypb.LoadPartitionRequest) (*comm
|
|||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
}
|
||||
showSegmentResponse, err := qs.masterServiceClient.ShowSegments(showSegmentRequest)
|
||||
showSegmentResponse, err := qs.masterServiceClient.ShowSegments(ctx, showSegmentRequest)
|
||||
if err != nil {
|
||||
return fn(err), err
|
||||
}
|
||||
|
@ -402,7 +413,7 @@ func (qs *QueryService) LoadPartitions(req *querypb.LoadPartitionRequest) (*comm
|
|||
Schema: schema,
|
||||
}
|
||||
for _, node := range qs.queryNodes {
|
||||
_, err := node.LoadSegments(loadSegmentRequest)
|
||||
_, err := node.LoadSegments(ctx, loadSegmentRequest)
|
||||
if err != nil {
|
||||
return fn(err), nil
|
||||
}
|
||||
|
@ -412,7 +423,7 @@ func (qs *QueryService) LoadPartitions(req *querypb.LoadPartitionRequest) (*comm
|
|||
|
||||
segmentStates := make(map[UniqueID]*datapb.SegmentStateInfo)
|
||||
channel2segs := make(map[string][]UniqueID)
|
||||
resp, err := qs.dataServiceClient.GetSegmentStates(&datapb.SegmentStatesRequest{
|
||||
resp, err := qs.dataServiceClient.GetSegmentStates(ctx, &datapb.SegmentStatesRequest{
|
||||
SegmentIDs: segmentIDs,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -455,7 +466,7 @@ func (qs *QueryService) LoadPartitions(req *querypb.LoadPartitionRequest) (*comm
|
|||
queryNode := qs.queryNodes[nodeID]
|
||||
//TODO:: seek when loadSegment may cause more msgs consumed
|
||||
//TODO:: all query node should load partition's msg
|
||||
status, err := queryNode.LoadSegments(loadSegmentRequest)
|
||||
status, err := queryNode.LoadSegments(ctx, loadSegmentRequest)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
@ -470,7 +481,7 @@ func (qs *QueryService) LoadPartitions(req *querypb.LoadPartitionRequest) (*comm
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) ReleasePartitions(req *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
func (qs *QueryService) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
dbID := req.DbID
|
||||
collectionID := req.CollectionID
|
||||
partitionIDs := req.PartitionIDs
|
||||
|
@ -486,7 +497,7 @@ func (qs *QueryService) ReleasePartitions(req *querypb.ReleasePartitionRequest)
|
|||
req.PartitionIDs = toReleasedPartitionID
|
||||
|
||||
for _, node := range qs.queryNodes {
|
||||
status, err := node.client.ReleasePartitions(req)
|
||||
status, err := node.client.ReleasePartitions(ctx, req)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
@ -509,7 +520,7 @@ func (qs *QueryService) ReleasePartitions(req *querypb.ReleasePartitionRequest)
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) CreateQueryChannel() (*querypb.CreateQueryChannelResponse, error) {
|
||||
func (qs *QueryService) CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error) {
|
||||
channelID := len(qs.queryChannels)
|
||||
allocatedQueryChannel := "query-" + strconv.FormatInt(int64(channelID), 10)
|
||||
allocatedQueryResultChannel := "queryResult-" + strconv.FormatInt(int64(channelID), 10)
|
||||
|
@ -528,7 +539,7 @@ func (qs *QueryService) CreateQueryChannel() (*querypb.CreateQueryChannelRespons
|
|||
for nodeID, node := range qs.queryNodes {
|
||||
fmt.Println("node ", nodeID, " watch query channel")
|
||||
fn := func() error {
|
||||
_, err := node.AddQueryChannel(addQueryChannelsRequest)
|
||||
_, err := node.AddQueryChannel(ctx, addQueryChannelsRequest)
|
||||
return err
|
||||
}
|
||||
err := retry.Retry(10, time.Millisecond*200, fn)
|
||||
|
@ -553,7 +564,7 @@ func (qs *QueryService) CreateQueryChannel() (*querypb.CreateQueryChannelRespons
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetPartitionStates(req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error) {
|
||||
func (qs *QueryService) GetPartitionStates(ctx context.Context, req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error) {
|
||||
states, err := qs.replica.getPartitionStates(req.DbID, req.CollectionID, req.PartitionIDs)
|
||||
if err != nil {
|
||||
return &querypb.PartitionStatesResponse{
|
||||
|
@ -572,10 +583,10 @@ func (qs *QueryService) GetPartitionStates(req *querypb.PartitionStatesRequest)
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (qs *QueryService) GetSegmentInfo(req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
func (qs *QueryService) GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error) {
|
||||
segmentInfos := make([]*querypb.SegmentInfo, 0)
|
||||
for _, node := range qs.queryNodes {
|
||||
segmentInfo, err := node.client.GetSegmentInfo(req)
|
||||
segmentInfo, err := node.client.GetSegmentInfo(ctx, req)
|
||||
if err != nil {
|
||||
return &querypb.SegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -636,6 +647,7 @@ func (qs *QueryService) SetDataService(dataService DataServiceInterface) {
|
|||
}
|
||||
|
||||
func (qs *QueryService) watchDmChannels(dbID UniqueID, collectionID UniqueID) error {
|
||||
ctx := context.TODO()
|
||||
collection, err := qs.replica.getCollectionByID(0, collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -644,7 +656,7 @@ func (qs *QueryService) watchDmChannels(dbID UniqueID, collectionID UniqueID) er
|
|||
DbID: dbID,
|
||||
CollectionID: collectionID,
|
||||
}
|
||||
resp, err := qs.dataServiceClient.GetInsertChannels(&channelRequest)
|
||||
resp, err := qs.dataServiceClient.GetInsertChannels(ctx, &channelRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -697,7 +709,7 @@ func (qs *QueryService) watchDmChannels(dbID UniqueID, collectionID UniqueID) er
|
|||
request := &querypb.WatchDmChannelsRequest{
|
||||
ChannelIDs: channels,
|
||||
}
|
||||
_, err := node.WatchDmChannels(request)
|
||||
_, err := node.WatchDmChannels(ctx, request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
)
|
||||
|
||||
func TestQueryService_Init(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
msFactory := pulsarms.NewFactory()
|
||||
service, err := NewQueryService(context.Background(), msFactory)
|
||||
assert.Nil(t, err)
|
||||
|
@ -19,20 +20,20 @@ func TestQueryService_Init(t *testing.T) {
|
|||
service.Start()
|
||||
|
||||
t.Run("Test create channel", func(t *testing.T) {
|
||||
response, err := service.CreateQueryChannel()
|
||||
response, err := service.CreateQueryChannel(ctx)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, response.RequestChannel, "query-0")
|
||||
assert.Equal(t, response.ResultChannel, "queryResult-0")
|
||||
})
|
||||
|
||||
t.Run("Test Get statistics channel", func(t *testing.T) {
|
||||
response, err := service.GetStatisticsChannel()
|
||||
response, err := service.GetStatisticsChannel(ctx)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, response, "query-node-stats")
|
||||
})
|
||||
|
||||
t.Run("Test Get timeTick channel", func(t *testing.T) {
|
||||
response, err := service.GetTimeTickChannel()
|
||||
response, err := service.GetTimeTickChannel(ctx)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, response, "queryTimeTick")
|
||||
})
|
||||
|
@ -41,6 +42,7 @@ func TestQueryService_Init(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestQueryService_load(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
msFactory := pulsarms.NewFactory()
|
||||
service, err := NewQueryService(context.Background(), msFactory)
|
||||
assert.Nil(t, err)
|
||||
|
@ -51,13 +53,13 @@ func TestQueryService_load(t *testing.T) {
|
|||
registerNodeRequest := &querypb.RegisterNodeRequest{
|
||||
Address: &commonpb.Address{},
|
||||
}
|
||||
service.RegisterNode(registerNodeRequest)
|
||||
service.RegisterNode(ctx, registerNodeRequest)
|
||||
|
||||
t.Run("Test LoadSegment", func(t *testing.T) {
|
||||
loadCollectionRequest := &querypb.LoadCollectionRequest{
|
||||
CollectionID: 1,
|
||||
}
|
||||
response, err := service.LoadCollection(loadCollectionRequest)
|
||||
response, err := service.LoadCollection(ctx, loadCollectionRequest)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, response.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
})
|
||||
|
@ -67,7 +69,7 @@ func TestQueryService_load(t *testing.T) {
|
|||
CollectionID: 1,
|
||||
PartitionIDs: []UniqueID{1},
|
||||
}
|
||||
response, err := service.LoadPartitions(loadPartitionRequest)
|
||||
response, err := service.LoadPartitions(ctx, loadPartitionRequest)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, response.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
})
|
||||
|
|
|
@ -47,9 +47,9 @@ func GetLocalIP() string {
|
|||
return ipv4.LocalIP()
|
||||
}
|
||||
|
||||
func WaitForComponentStates(service StateComponent, serviceName string, states []internalpb2.StateCode, attempts int, sleep time.Duration) error {
|
||||
func WaitForComponentStates(ctx context.Context, service StateComponent, serviceName string, states []internalpb2.StateCode, attempts int, sleep time.Duration) error {
|
||||
checkFunc := func() error {
|
||||
resp, err := service.GetComponentStates()
|
||||
resp, err := service.GetComponentStates(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,16 +74,16 @@ func WaitForComponentStates(service StateComponent, serviceName string, states [
|
|||
return retry.Retry(attempts, sleep, checkFunc)
|
||||
}
|
||||
|
||||
func WaitForComponentInitOrHealthy(service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
return WaitForComponentStates(service, serviceName, []internalpb2.StateCode{internalpb2.StateCode_INITIALIZING, internalpb2.StateCode_HEALTHY}, attempts, sleep)
|
||||
func WaitForComponentInitOrHealthy(ctx context.Context, service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
return WaitForComponentStates(ctx, service, serviceName, []internalpb2.StateCode{internalpb2.StateCode_INITIALIZING, internalpb2.StateCode_HEALTHY}, attempts, sleep)
|
||||
}
|
||||
|
||||
func WaitForComponentInit(service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
return WaitForComponentStates(service, serviceName, []internalpb2.StateCode{internalpb2.StateCode_INITIALIZING}, attempts, sleep)
|
||||
func WaitForComponentInit(ctx context.Context, service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
return WaitForComponentStates(ctx, service, serviceName, []internalpb2.StateCode{internalpb2.StateCode_INITIALIZING}, attempts, sleep)
|
||||
}
|
||||
|
||||
func WaitForComponentHealthy(service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
return WaitForComponentStates(service, serviceName, []internalpb2.StateCode{internalpb2.StateCode_HEALTHY}, attempts, sleep)
|
||||
func WaitForComponentHealthy(ctx context.Context, service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
return WaitForComponentStates(ctx, service, serviceName, []internalpb2.StateCode{internalpb2.StateCode_HEALTHY}, attempts, sleep)
|
||||
}
|
||||
|
||||
func ParseIndexParamsMap(mStr string) (map[string]string, error) {
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
package funcutil
|
||||
|
||||
import "github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
type StateComponent interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package typeutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
@ -15,40 +17,40 @@ type Service interface {
|
|||
}
|
||||
|
||||
type Component interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
GetTimeTickChannel() (*milvuspb.StringResponse, error)
|
||||
GetStatisticsChannel() (*milvuspb.StringResponse, error)
|
||||
GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
|
||||
GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error)
|
||||
}
|
||||
|
||||
type IndexNodeInterface interface {
|
||||
Service
|
||||
Component
|
||||
BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error)
|
||||
DropIndex(req *indexpb.DropIndexRequest) (*commonpb.Status, error)
|
||||
BuildIndex(ctx context.Context, req *indexpb.BuildIndexCmd) (*commonpb.Status, error)
|
||||
DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type IndexServiceInterface interface {
|
||||
Service
|
||||
Component
|
||||
RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error)
|
||||
BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
|
||||
GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
|
||||
GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
|
||||
NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error)
|
||||
RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error)
|
||||
BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
|
||||
GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
|
||||
GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
|
||||
NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error)
|
||||
}
|
||||
|
||||
type QueryServiceInterface interface {
|
||||
Service
|
||||
Component
|
||||
|
||||
RegisterNode(req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error)
|
||||
ShowCollections(req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error)
|
||||
LoadCollection(req *querypb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ShowPartitions(req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error)
|
||||
LoadPartitions(req *querypb.LoadPartitionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
CreateQueryChannel() (*querypb.CreateQueryChannelResponse, error)
|
||||
GetPartitionStates(req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error)
|
||||
GetSegmentInfo(req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
|
||||
RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error)
|
||||
ShowCollections(ctx context.Context, req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error)
|
||||
LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
ShowPartitions(ctx context.Context, req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error)
|
||||
LoadPartitions(ctx context.Context, req *querypb.LoadPartitionRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error)
|
||||
GetPartitionStates(ctx context.Context, req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error)
|
||||
GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue