mirror of https://github.com/milvus-io/milvus.git
parent
0f2f4a0a75
commit
be980fbc38
|
@ -1,5 +1,5 @@
|
|||
run:
|
||||
go: '1.18'
|
||||
go: "1.18"
|
||||
skip-dirs:
|
||||
- build
|
||||
- configs
|
||||
|
@ -89,20 +89,20 @@ linters-settings:
|
|||
settings:
|
||||
ruleguard:
|
||||
failOnError: true
|
||||
rules: 'rules.go'
|
||||
rules: "rules.go"
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: 'errors'
|
||||
- pkg: "errors"
|
||||
desc: not allowd, use github.com/cockroachdb/errors
|
||||
- pkg: 'github.com/pkg/errors'
|
||||
- pkg: "github.com/pkg/errors"
|
||||
desc: not allowd, use github.com/cockroachdb/errors
|
||||
- pkg: 'github.com/pingcap/errors'
|
||||
- pkg: "github.com/pingcap/errors"
|
||||
desc: not allowd, use github.com/cockroachdb/errors
|
||||
- pkg: 'golang.org/x/xerrors'
|
||||
- pkg: "golang.org/x/xerrors"
|
||||
desc: not allowd, use github.com/cockroachdb/errors
|
||||
- pkg: 'github.com/go-errors/errors'
|
||||
- pkg: "github.com/go-errors/errors"
|
||||
desc: not allowd, use github.com/cockroachdb/errors
|
||||
forbidigo:
|
||||
forbid:
|
||||
|
@ -112,10 +112,15 @@ linters-settings:
|
|||
- '\.(ErrorCode|Reason) = '
|
||||
- 'Reason:\s+\w+\.Error\(\)'
|
||||
- 'errors.New\((.+)\.GetReason\(\)\)'
|
||||
- 'commonpb\.Status\{[\s\n]*ErrorCode:[\s\n]*.+[\s\S\n]*?\}'
|
||||
#- 'fmt\.Print.*' WIP
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude-rules:
|
||||
- path: .+_test\.go
|
||||
linters:
|
||||
- forbidigo
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- should have comment
|
||||
|
|
|
@ -32,7 +32,7 @@ type mockIDAllocator struct{}
|
|||
|
||||
func (tso *mockIDAllocator) AllocID(ctx context.Context, req *rootcoordpb.AllocIDRequest, opts ...grpc.CallOption) (*rootcoordpb.AllocIDResponse, error) {
|
||||
return &rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: int64(1),
|
||||
Count: req.Count,
|
||||
}, nil
|
||||
|
|
|
@ -619,7 +619,7 @@ func TestIndexBuilder(t *testing.T) {
|
|||
ic := mocks.NewMockIndexNodeClient(t)
|
||||
ic.EXPECT().GetJobStats(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(&indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TotalJobNum: 1,
|
||||
EnqueueJobNum: 0,
|
||||
InProgressJobNum: 1,
|
||||
|
@ -645,17 +645,17 @@ func TestIndexBuilder(t *testing.T) {
|
|||
})
|
||||
}
|
||||
return &indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ClusterID: in.ClusterID,
|
||||
IndexInfos: indexInfos,
|
||||
}, nil
|
||||
})
|
||||
|
||||
ic.EXPECT().CreateJob(mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
|
||||
ic.EXPECT().DropJobs(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
mt := createMetaTable(catalog)
|
||||
nodeManager := &IndexNodeManager{
|
||||
ctx: ctx,
|
||||
|
@ -811,7 +811,7 @@ func TestIndexBuilder_Error(t *testing.T) {
|
|||
ic := mocks.NewMockIndexNodeClient(t)
|
||||
ic.EXPECT().CreateJob(mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("error"))
|
||||
ic.EXPECT().GetJobStats(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskSlots: 1,
|
||||
}, nil)
|
||||
|
||||
|
@ -836,7 +836,7 @@ func TestIndexBuilder_Error(t *testing.T) {
|
|||
Reason: "mock fail",
|
||||
}, nil)
|
||||
ic.EXPECT().GetJobStats(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskSlots: 1,
|
||||
}, nil)
|
||||
|
||||
|
@ -963,7 +963,7 @@ func TestIndexBuilder_Error(t *testing.T) {
|
|||
ib.meta.catalog = ec
|
||||
ic := mocks.NewMockIndexNodeClient(t)
|
||||
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: []*indexpb.IndexTaskInfo{
|
||||
{
|
||||
BuildID: buildID,
|
||||
|
@ -995,7 +995,7 @@ func TestIndexBuilder_Error(t *testing.T) {
|
|||
ib.meta.catalog = ec
|
||||
ic := mocks.NewMockIndexNodeClient(t)
|
||||
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: []*indexpb.IndexTaskInfo{
|
||||
{
|
||||
BuildID: buildID,
|
||||
|
@ -1027,7 +1027,7 @@ func TestIndexBuilder_Error(t *testing.T) {
|
|||
ib.meta.catalog = sc
|
||||
ic := mocks.NewMockIndexNodeClient(t)
|
||||
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: nil,
|
||||
}, nil)
|
||||
ib.nodeManager = &IndexNodeManager{
|
||||
|
|
|
@ -205,7 +205,7 @@ func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexReques
|
|||
zap.String("IndexName", req.GetIndexName()), zap.Int64("fieldID", req.GetFieldID()),
|
||||
zap.Int64("IndexID", indexID))
|
||||
metrics.IndexRequestCounter.WithLabelValues(metrics.SuccessLabel).Inc()
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// GetIndexState gets the index state of the index name in the request from Proxy.
|
||||
|
@ -240,7 +240,7 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
|
|||
}, nil
|
||||
}
|
||||
ret := &indexpb.GetIndexStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
State: commonpb.IndexState_Finished,
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
|
|||
}
|
||||
|
||||
ret := &indexpb.GetSegmentIndexStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
States: make([]*indexpb.SegmentIndexState, 0),
|
||||
}
|
||||
indexID2CreateTs := s.meta.GetIndexIDByName(req.GetCollectionID(), req.GetIndexName())
|
||||
|
@ -469,7 +469,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
|
|||
log.Info("GetIndexBuildProgress success", zap.Int64("collectionID", req.GetCollectionID()),
|
||||
zap.String("indexName", req.GetIndexName()))
|
||||
return &indexpb.GetIndexBuildProgressResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexedRows: indexInfo.IndexedRows,
|
||||
TotalRows: indexInfo.TotalRows,
|
||||
PendingIndexRows: indexInfo.PendingIndexRows,
|
||||
|
@ -531,7 +531,7 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
|
|||
}
|
||||
log.Info("DescribeIndex success")
|
||||
return &indexpb.DescribeIndexResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: indexInfos,
|
||||
}, nil
|
||||
}
|
||||
|
@ -586,7 +586,7 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
|
|||
log.Debug("GetIndexStatisticsResponse success",
|
||||
zap.String("indexName", req.GetIndexName()))
|
||||
return &indexpb.GetIndexStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: indexInfos,
|
||||
}, nil
|
||||
}
|
||||
|
@ -610,7 +610,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
|
|||
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
||||
if len(indexes) == 0 {
|
||||
log.Info(fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
if !req.GetDropAll() && len(indexes) > 1 {
|
||||
|
@ -635,7 +635,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
|
|||
|
||||
log.Debug("DropIndex success", zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||
zap.String("indexName", req.GetIndexName()), zap.Int64s("indexIDs", indexIDs))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// GetIndexInfos gets the index file paths for segment from DataCoord.
|
||||
|
@ -651,7 +651,7 @@ func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoReq
|
|||
}, nil
|
||||
}
|
||||
ret := &indexpb.GetIndexInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegmentInfo: map[int64]*indexpb.SegmentInfo{},
|
||||
}
|
||||
|
||||
|
@ -694,8 +694,3 @@ func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoReq
|
|||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *Server) UnhealthyStatus() *commonpb.Status {
|
||||
code := s.stateCode.Load().(commonpb.StateCode)
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String()))
|
||||
}
|
||||
|
|
|
@ -85,11 +85,11 @@ func TestIndexNodeManager_PeekClient(t *testing.T) {
|
|||
}, nil),
|
||||
8: getMockedGetJobStatsClient(&indexpb.GetJobStatsResponse{
|
||||
TaskSlots: 1,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil),
|
||||
9: getMockedGetJobStatsClient(&indexpb.GetJobStatsResponse{
|
||||
TaskSlots: 10,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func TestIndexNodeManager_ClientSupportDisk(t *testing.T) {
|
|||
lock: sync.RWMutex{},
|
||||
nodeClients: map[UniqueID]types.IndexNodeClient{
|
||||
1: getMockedGetJobStatsClient(&indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskSlots: 1,
|
||||
JobInfos: nil,
|
||||
EnableDisk: true,
|
||||
|
@ -133,7 +133,7 @@ func TestIndexNodeManager_ClientSupportDisk(t *testing.T) {
|
|||
lock: sync.RWMutex{},
|
||||
nodeClients: map[UniqueID]types.IndexNodeClient{
|
||||
1: getMockedGetJobStatsClient(&indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskSlots: 1,
|
||||
JobInfos: nil,
|
||||
EnableDisk: false,
|
||||
|
|
|
@ -89,7 +89,7 @@ func (s *Server) getSystemInfoMetrics(
|
|||
}
|
||||
|
||||
resp := &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
|
||||
}
|
||||
var err error
|
||||
|
|
|
@ -112,7 +112,7 @@ func TestGetDataNodeMetrics(t *testing.T) {
|
|||
// mock parse error
|
||||
mockFailClientCreator = getMockFailedClientCreator(func() (*milvuspb.GetMetricsResponse, error) {
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: `{"error_reason": 1}`,
|
||||
}, nil
|
||||
})
|
||||
|
@ -157,7 +157,7 @@ func TestGetIndexNodeMetrics(t *testing.T) {
|
|||
info, err = svr.getIndexNodeMetrics(ctx, req, &mockMetricIndexNodeClient{
|
||||
mock: func() (*milvuspb.GetMetricsResponse, error) {
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: "XXXXXXXXXXXXX",
|
||||
ComponentName: "indexnode100",
|
||||
}, nil
|
||||
|
@ -187,7 +187,7 @@ func TestGetIndexNodeMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, nodeID),
|
||||
}, nil
|
||||
|
|
|
@ -167,7 +167,7 @@ func newMockDataNodeClient(id int64, ch chan interface{}) (*mockDataNodeClient,
|
|||
state: commonpb.StateCode_Initializing,
|
||||
ch: ch,
|
||||
addImportSegmentResp: &datapb.AddImportSegmentResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -214,13 +214,13 @@ func (c *mockDataNodeClient) FlushSegments(ctx context.Context, in *datapb.Flush
|
|||
|
||||
func (c *mockDataNodeClient) ResendSegmentStats(ctx context.Context, req *datapb.ResendSegmentStatsRequest, opts ...grpc.CallOption) (*datapb.ResendSegmentStatsResponse, error) {
|
||||
return &datapb.ResendSegmentStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *mockDataNodeClient) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest, opts ...grpc.CallOption) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ func (c *mockDataNodeClient) GetMetrics(ctx context.Context, req *milvuspb.GetMe
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, nodeID),
|
||||
}, nil
|
||||
|
@ -288,11 +288,11 @@ func (c *mockDataNodeClient) FlushChannels(ctx context.Context, req *datapb.Flus
|
|||
}
|
||||
|
||||
func (c *mockDataNodeClient) NotifyChannelOperation(ctx context.Context, req *datapb.ChannelOperationsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (c *mockDataNodeClient) CheckChannelOperationProgress(ctx context.Context, req *datapb.ChannelWatchInfo, opts ...grpc.CallOption) (*datapb.ChannelOperationProgressResponse, error) {
|
||||
return &datapb.ChannelOperationProgressResponse{Status: merr.Status(nil)}, nil
|
||||
return &datapb.ChannelOperationProgressResponse{Status: merr.Success()}, nil
|
||||
}
|
||||
|
||||
func (c *mockDataNodeClient) Stop() error {
|
||||
|
@ -357,7 +357,7 @@ func (m *mockRootCoordClient) GetComponentStates(ctx context.Context, req *milvu
|
|||
ExtraInfo: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
SubcomponentStates: []*milvuspb.ComponentInfo{},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -387,7 +387,7 @@ func (m *mockRootCoordClient) DescribeCollection(ctx context.Context, req *milvu
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "test",
|
||||
},
|
||||
|
@ -402,7 +402,7 @@ func (m *mockRootCoordClient) DescribeCollectionInternal(ctx context.Context, re
|
|||
|
||||
func (m *mockRootCoordClient) ShowCollections(ctx context.Context, req *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) {
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionNames: []string{"test"},
|
||||
}, nil
|
||||
}
|
||||
|
@ -437,7 +437,7 @@ func (m *mockRootCoordClient) HasPartition(ctx context.Context, req *milvuspb.Ha
|
|||
|
||||
func (m *mockRootCoordClient) ShowPartitions(ctx context.Context, req *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error) {
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionNames: []string{"_default"},
|
||||
PartitionIDs: []int64{0},
|
||||
}, nil
|
||||
|
@ -457,7 +457,7 @@ func (m *mockRootCoordClient) AllocTimestamp(ctx context.Context, req *rootcoord
|
|||
phy := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
ts := tsoutil.ComposeTS(phy, val)
|
||||
return &rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: ts,
|
||||
Count: req.Count,
|
||||
}, nil
|
||||
|
@ -469,7 +469,7 @@ func (m *mockRootCoordClient) AllocID(ctx context.Context, req *rootcoordpb.Allo
|
|||
}
|
||||
val := atomic.AddInt64(&m.cnt, int64(req.Count))
|
||||
return &rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: val,
|
||||
Count: req.Count,
|
||||
}, nil
|
||||
|
@ -490,7 +490,7 @@ func (m *mockRootCoordClient) DescribeSegments(ctx context.Context, req *rootcoo
|
|||
|
||||
func (m *mockRootCoordClient) GetDdChannel(ctx context.Context, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: "ddchannel",
|
||||
}, nil
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ func (m *mockRootCoordClient) AddNewSegment(ctx context.Context, in *datapb.Segm
|
|||
|
||||
func (m *mockRootCoordClient) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest, opts ...grpc.CallOption) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ func (m *mockRootCoordClient) GetMetrics(ctx context.Context, req *milvuspb.GetM
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.RootCoordRole, nodeID),
|
||||
}, nil
|
||||
|
@ -569,7 +569,7 @@ func (m *mockRootCoordClient) ListImportTasks(ctx context.Context, in *milvuspb.
|
|||
}
|
||||
|
||||
func (m *mockRootCoordClient) ReportImport(ctx context.Context, req *rootcoordpb.ImportResult, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
type mockCompactionHandler struct {
|
||||
|
|
|
@ -2415,13 +2415,13 @@ func TestShouldDropChannel(t *testing.T) {
|
|||
}
|
||||
myRoot := &myRootCoord{}
|
||||
myRoot.EXPECT().AllocTimestamp(mock.Anything, mock.Anything).Return(&rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: tsoutil.ComposeTSByTime(time.Now(), 0),
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
||||
myRoot.EXPECT().AllocID(mock.Anything, mock.Anything).Return(&rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: int64(tsoutil.ComposeTSByTime(time.Now(), 0)),
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
@ -2468,7 +2468,7 @@ func TestShouldDropChannel(t *testing.T) {
|
|||
t.Run("channel name not in kv, collection exist", func(t *testing.T) {
|
||||
myRoot.EXPECT().DescribeCollection(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionID: 0,
|
||||
}, nil).Once()
|
||||
assert.False(t, svr.handler.CheckShouldDropChannel("ch99", 0))
|
||||
|
@ -2477,7 +2477,7 @@ func TestShouldDropChannel(t *testing.T) {
|
|||
t.Run("collection name in kv, collection exist", func(t *testing.T) {
|
||||
myRoot.EXPECT().DescribeCollection(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionID: 0,
|
||||
}, nil).Once()
|
||||
assert.False(t, svr.handler.CheckShouldDropChannel("ch1", 0))
|
||||
|
@ -2497,7 +2497,7 @@ func TestShouldDropChannel(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
myRoot.EXPECT().DescribeCollection(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionID: 0,
|
||||
}, nil).Once()
|
||||
assert.True(t, svr.handler.CheckShouldDropChannel("ch1", 0))
|
||||
|
@ -3338,7 +3338,7 @@ type rootCoordSegFlushComplete struct {
|
|||
// SegmentFlushCompleted, override default behavior
|
||||
func (rc *rootCoordSegFlushComplete) SegmentFlushCompleted(ctx context.Context, req *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error) {
|
||||
if rc.flag {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError}, nil
|
||||
}
|
||||
|
@ -3415,7 +3415,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
resp, err := svr.GetFlushState(context.TODO(), &datapb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Flushed: true,
|
||||
}, resp)
|
||||
})
|
||||
|
@ -3463,7 +3463,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
resp, err := svr.GetFlushState(context.TODO(), &datapb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Flushed: false,
|
||||
}, resp)
|
||||
})
|
||||
|
@ -3511,7 +3511,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
resp, err := svr.GetFlushState(context.TODO(), &datapb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Flushed: true,
|
||||
}, resp)
|
||||
})
|
||||
|
@ -3547,7 +3547,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Flushed: true,
|
||||
}, resp)
|
||||
})
|
||||
|
@ -3583,7 +3583,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Flushed: false,
|
||||
}, resp)
|
||||
})
|
||||
|
@ -3602,7 +3602,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Flushed: true,
|
||||
}, resp)
|
||||
})
|
||||
|
@ -3671,7 +3671,7 @@ func TestGetFlushAllState(t *testing.T) {
|
|||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
DbNames: []string{"db1"},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
||||
|
@ -3683,7 +3683,7 @@ func TestGetFlushAllState(t *testing.T) {
|
|||
} else {
|
||||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{collection},
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
@ -3696,7 +3696,7 @@ func TestGetFlushAllState(t *testing.T) {
|
|||
} else {
|
||||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: vchannels,
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
@ -3753,25 +3753,25 @@ func TestGetFlushAllStateWithDB(t *testing.T) {
|
|||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
DbNames: []string{dbName},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil).Maybe()
|
||||
} else {
|
||||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
DbNames: []string{},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
||||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{collectionID},
|
||||
}, nil).Maybe()
|
||||
|
||||
svr.rootCoordClient.(*mocks.MockRootCoordClient).EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: vchannels,
|
||||
CollectionID: collectionID,
|
||||
CollectionName: collectionName,
|
||||
|
|
|
@ -39,7 +39,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/errorutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
|
@ -51,7 +50,7 @@ import (
|
|||
// GetTimeTickChannel legacy API, returns time tick channel name
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: Params.CommonCfg.DataCoordTimeTick.GetValue(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -144,7 +143,7 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
|
|||
zap.Time("flushTs", tsoutil.PhysicalTime(ts)))
|
||||
|
||||
return &datapb.FlushResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
DbID: req.GetDbID(),
|
||||
CollectionID: req.GetCollectionID(),
|
||||
SegmentIDs: sealedSegmentIDs,
|
||||
|
@ -215,13 +214,13 @@ func (s *Server) AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentI
|
|||
CollectionID: r.CollectionID,
|
||||
PartitionID: r.PartitionID,
|
||||
ExpireTime: allocation.ExpireTime,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
assigns = append(assigns, result)
|
||||
}
|
||||
}
|
||||
return &datapb.AssignSegmentIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegIDAssignments: assigns,
|
||||
}, nil
|
||||
}
|
||||
|
@ -235,7 +234,7 @@ func (s *Server) GetSegmentStates(ctx context.Context, req *datapb.GetSegmentSta
|
|||
}
|
||||
|
||||
resp := &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
for _, segmentID := range req.SegmentIDs {
|
||||
state := &datapb.SegmentStateInfo{
|
||||
|
@ -269,7 +268,7 @@ func (s *Server) GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsert
|
|||
}
|
||||
|
||||
resp := &datapb.GetInsertBinlogPathsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
binlogs := segment.GetBinlogs()
|
||||
fids := make([]UniqueID, 0, len(binlogs))
|
||||
|
@ -302,7 +301,7 @@ func (s *Server) GetCollectionStatistics(ctx context.Context, req *datapb.GetCol
|
|||
}
|
||||
|
||||
resp := &datapb.GetCollectionStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
nums := s.meta.GetNumRowsOfCollection(req.CollectionID)
|
||||
resp.Stats = append(resp.Stats, &commonpb.KeyValuePair{Key: "row_count", Value: strconv.FormatInt(nums, 10)})
|
||||
|
@ -319,7 +318,7 @@ func (s *Server) GetPartitionStatistics(ctx context.Context, req *datapb.GetPart
|
|||
zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||
)
|
||||
resp := &datapb.GetPartitionStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return &datapb.GetPartitionStatisticsResponse{
|
||||
|
@ -342,7 +341,7 @@ func (s *Server) GetPartitionStatistics(ctx context.Context, req *datapb.GetPart
|
|||
// GetSegmentInfoChannel legacy API, returns segment info statistics channel
|
||||
func (s *Server) GetSegmentInfoChannel(ctx context.Context, req *datapb.GetSegmentInfoChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: Params.CommonCfg.DataCoordSegmentInfo.GetValue(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -352,7 +351,7 @@ func (s *Server) GetSegmentInfoChannel(ctx context.Context, req *datapb.GetSegme
|
|||
func (s *Server) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoRequest) (*datapb.GetSegmentInfoResponse, error) {
|
||||
log := log.Ctx(ctx)
|
||||
resp := &datapb.GetSegmentInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return &datapb.GetSegmentInfoResponse{
|
||||
|
@ -447,7 +446,7 @@ func (s *Server) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
|
|||
|
||||
if segment.State == commonpb.SegmentState_Dropped {
|
||||
log.Info("save to dropped segment, ignore this request")
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
} else if !isSegmentHealthy(segment) {
|
||||
err := merr.WrapErrSegmentNotFound(segmentID)
|
||||
log.Warn("failed to get segment, the segment not healthy", zap.Error(err))
|
||||
|
@ -490,7 +489,7 @@ func (s *Server) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
|
|||
}
|
||||
}
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// DropVirtualChannel notifies vchannel dropped
|
||||
|
@ -498,7 +497,7 @@ func (s *Server) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
|
|||
func (s *Server) DropVirtualChannel(ctx context.Context, req *datapb.DropVirtualChannelRequest) (*datapb.DropVirtualChannelResponse, error) {
|
||||
log := log.Ctx(ctx)
|
||||
resp := &datapb.DropVirtualChannelResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return &datapb.DropVirtualChannelResponse{
|
||||
|
@ -576,7 +575,7 @@ func (s *Server) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStat
|
|||
}, nil
|
||||
}
|
||||
return &datapb.SetSegmentStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -602,7 +601,7 @@ func (s *Server) GetComponentStates(ctx context.Context, req *milvuspb.GetCompon
|
|||
Role: "datacoord",
|
||||
StateCode: code,
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
@ -619,7 +618,7 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
|
|||
)
|
||||
log.Info("get recovery info request received")
|
||||
resp := &datapb.GetRecoveryInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return &datapb.GetRecoveryInfoResponse{
|
||||
|
@ -751,7 +750,7 @@ func (s *Server) GetRecoveryInfoV2(ctx context.Context, req *datapb.GetRecoveryI
|
|||
)
|
||||
log.Info("get recovery info request received")
|
||||
resp := &datapb.GetRecoveryInfoResponseV2{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return &datapb.GetRecoveryInfoResponseV2{
|
||||
|
@ -828,7 +827,7 @@ func (s *Server) GetRecoveryInfoV2(ctx context.Context, req *datapb.GetRecoveryI
|
|||
func (s *Server) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedSegmentsRequest) (*datapb.GetFlushedSegmentsResponse, error) {
|
||||
log := log.Ctx(ctx)
|
||||
resp := &datapb.GetFlushedSegmentsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
collectionID := req.GetCollectionID()
|
||||
partitionID := req.GetPartitionID()
|
||||
|
@ -872,7 +871,7 @@ func (s *Server) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedS
|
|||
func (s *Server) GetSegmentsByStates(ctx context.Context, req *datapb.GetSegmentsByStatesRequest) (*datapb.GetSegmentsByStatesResponse, error) {
|
||||
log := log.Ctx(ctx)
|
||||
resp := &datapb.GetSegmentsByStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
collectionID := req.GetCollectionID()
|
||||
partitionID := req.GetPartitionID()
|
||||
|
@ -927,7 +926,7 @@ func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
|
|||
}
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Configuations: configList,
|
||||
}, nil
|
||||
}
|
||||
|
@ -994,7 +993,7 @@ func (s *Server) ManualCompaction(ctx context.Context, req *milvuspb.ManualCompa
|
|||
log.Info("received manual compaction")
|
||||
|
||||
resp := &milvuspb.ManualCompactionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
|
@ -1027,7 +1026,7 @@ func (s *Server) GetCompactionState(ctx context.Context, req *milvuspb.GetCompac
|
|||
)
|
||||
log.Info("received get compaction state request")
|
||||
resp := &milvuspb.GetCompactionStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
|
@ -1074,7 +1073,7 @@ func (s *Server) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.
|
|||
}
|
||||
|
||||
resp := &milvuspb.GetCompactionPlansResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if !Params.DataCoordCfg.EnableCompaction.GetAsBool() {
|
||||
resp.Status = merr.Status(merr.WrapErrServiceUnavailable("compaction disabled"))
|
||||
|
@ -1148,7 +1147,7 @@ func (s *Server) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
|
|||
)
|
||||
log.Info("receive watch channels request")
|
||||
resp := &datapb.WatchChannelsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
|
@ -1192,7 +1191,7 @@ func (s *Server) GetFlushState(ctx context.Context, req *datapb.GetFlushStateReq
|
|||
}, nil
|
||||
}
|
||||
|
||||
resp := &milvuspb.GetFlushStateResponse{Status: merr.Status(nil)}
|
||||
resp := &milvuspb.GetFlushStateResponse{Status: merr.Success()}
|
||||
if len(req.GetSegmentIDs()) > 0 {
|
||||
var unflushed []UniqueID
|
||||
for _, sid := range req.GetSegmentIDs() {
|
||||
|
@ -1256,7 +1255,7 @@ func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAll
|
|||
}, nil
|
||||
}
|
||||
|
||||
resp := &milvuspb.GetFlushAllStateResponse{Status: merr.Status(nil)}
|
||||
resp := &milvuspb.GetFlushAllStateResponse{Status: merr.Success()}
|
||||
|
||||
dbsRsp, err := s.broker.ListDatabases(ctx)
|
||||
if err != nil {
|
||||
|
@ -1310,7 +1309,7 @@ func (s *Server) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*da
|
|||
log := log.Ctx(ctx)
|
||||
log.Info("DataCoord receives import request", zap.Any("req", req))
|
||||
resp := &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
|
@ -1352,7 +1351,7 @@ func (s *Server) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
s.updateSegmentStatistics(req.GetStats())
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// UpdateChannelCheckpoint updates channel checkpoint in dataCoord.
|
||||
|
@ -1368,7 +1367,7 @@ func (s *Server) UpdateChannelCheckpoint(ctx context.Context, req *datapb.Update
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ReportDataNodeTtMsgs send datenode timetick messages to dataCoord.
|
||||
|
@ -1393,7 +1392,7 @@ func (s *Server) ReportDataNodeTtMsgs(ctx context.Context, req *datapb.ReportDat
|
|||
}
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) handleRPCTimetickMessage(ctx context.Context, ttMsg *msgpb.DataNodeTtMsg) error {
|
||||
|
@ -1515,7 +1514,7 @@ func (s *Server) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSe
|
|||
log.Error("failed to SaveBinlogPaths", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// UnsetIsImportingState unsets the isImporting states of the given segments.
|
||||
|
@ -1577,12 +1576,12 @@ func (s *Server) BroadcastAlteredCollection(ctx context.Context, req *datapb.Alt
|
|||
Properties: properties,
|
||||
}
|
||||
s.meta.AddCollection(collInfo)
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
clonedColl.Properties = properties
|
||||
s.meta.AddCollection(clonedColl)
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) {
|
||||
|
@ -1605,27 +1604,30 @@ func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthReque
|
|||
if err != nil {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
errReasons = append(errReasons, errorutil.UnHealthReason("DataNode", nodeID, err.Error()))
|
||||
errReasons = append(errReasons, fmt.Sprintf("failed to get DataNode %d: %v", nodeID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
sta, err := cli.GetComponentStates(ctx, &milvuspb.GetComponentStatesRequest{})
|
||||
isHealthy, reason := errorutil.UnHealthReasonWithComponentStatesOrErr("DataNode", nodeID, sta, err)
|
||||
if !isHealthy {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = merr.AnalyzeState("DataNode", nodeID, sta)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
errReasons = append(errReasons, reason)
|
||||
errReasons = append(errReasons, err.Error())
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := group.Wait()
|
||||
if err != nil || len(errReasons) != 0 {
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(nil), IsHealthy: false, Reasons: errReasons}, nil
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Success(), IsHealthy: false, Reasons: errReasons}, nil
|
||||
}
|
||||
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(nil), IsHealthy: true, Reasons: errReasons}, nil
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Success(), IsHealthy: true, Reasons: errReasons}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GcConfirm(ctx context.Context, request *datapb.GcConfirmRequest) (*datapb.GcConfirmResponse, error) {
|
||||
|
@ -1636,7 +1638,7 @@ func (s *Server) GcConfirm(ctx context.Context, request *datapb.GcConfirmRequest
|
|||
}
|
||||
|
||||
resp := &datapb.GcConfirmResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
resp.GcFinished = s.meta.GcConfirm(ctx, request.GetCollectionId(), request.GetPartitionId())
|
||||
return resp, nil
|
||||
|
|
|
@ -86,7 +86,7 @@ func (m *RootCoordFactory) AllocID(ctx context.Context, in *rootcoordpb.AllocIDR
|
|||
resp := &rootcoordpb.AllocIDResponse{
|
||||
ID: m.ID,
|
||||
Count: in.GetCount(),
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ func (m *ChannelManager) GetProgress(info *datapb.ChannelWatchInfo) *datapb.Chan
|
|||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
resp := &datapb.ChannelOperationProgressResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
OpID: info.GetOpID(),
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, paramtable.GetNodeID()),
|
||||
}, nil
|
||||
|
|
|
@ -205,7 +205,7 @@ func (ds *DataCoordFactory) AssignSegmentID(ctx context.Context, req *datapb.Ass
|
|||
return nil, errors.New("Error")
|
||||
}
|
||||
res := &datapb.AssignSegmentIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegIDAssignments: []*datapb.SegmentIDAssignment{
|
||||
{
|
||||
SegID: 666,
|
||||
|
@ -252,11 +252,11 @@ func (ds *DataCoordFactory) DropVirtualChannel(ctx context.Context, req *datapb.
|
|||
}
|
||||
|
||||
func (ds *DataCoordFactory) UpdateSegmentStatistics(ctx context.Context, req *datapb.UpdateSegmentStatisticsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) UpdateChannelCheckpoint(ctx context.Context, req *datapb.UpdateChannelCheckpointRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) ReportDataNodeTtMsgs(ctx context.Context, req *datapb.ReportDataNodeTtMsgsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
|
@ -268,23 +268,23 @@ func (ds *DataCoordFactory) ReportDataNodeTtMsgs(ctx context.Context, req *datap
|
|||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
}, nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSegmentRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) UnsetIsImportingState(ctx context.Context, req *datapb.UnsetIsImportingStateRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmentsDroppedRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) BroadcastAlteredCollection(ctx context.Context, req *datapb.AlterCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (ds *DataCoordFactory) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest, opts ...grpc.CallOption) (*milvuspb.CheckHealthResponse, error) {
|
||||
|
@ -319,7 +319,7 @@ func (ds *DataCoordFactory) GetSegmentInfo(ctx context.Context, req *datapb.GetS
|
|||
}
|
||||
}
|
||||
return &datapb.GetSegmentInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Infos: segmentInfos,
|
||||
}, nil
|
||||
}
|
||||
|
@ -1027,7 +1027,7 @@ func (m *RootCoordFactory) DescribeCollectionInternal(ctx context.Context, in *m
|
|||
func (m *RootCoordFactory) ShowPartitions(ctx context.Context, req *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error) {
|
||||
if m.ShowPartitionsErr {
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, fmt.Errorf("mock show partitions error")
|
||||
}
|
||||
|
||||
|
@ -1041,7 +1041,7 @@ func (m *RootCoordFactory) ShowPartitions(ctx context.Context, req *milvuspb.Sho
|
|||
}
|
||||
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionNames: m.ShowPartitionsNames,
|
||||
PartitionIDs: m.ShowPartitionsIDs,
|
||||
}, nil
|
||||
|
@ -1051,7 +1051,7 @@ func (m *RootCoordFactory) GetComponentStates(ctx context.Context, req *milvuspb
|
|||
return &milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{},
|
||||
SubcomponentStates: make([]*milvuspb.ComponentInfo, 0),
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1062,14 +1062,14 @@ func (m *RootCoordFactory) ReportImport(ctx context.Context, req *rootcoordpb.Im
|
|||
}
|
||||
}
|
||||
if m.ReportImportErr {
|
||||
return merr.Status(nil), fmt.Errorf("mock report import error")
|
||||
return merr.Success(), fmt.Errorf("mock report import error")
|
||||
}
|
||||
if m.ReportImportNotSuccess {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
}, nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// FailMessageStreamFactory mock MessageStreamFactory failure
|
||||
|
|
|
@ -61,7 +61,7 @@ func (node *DataNode) WatchDmChannels(ctx context.Context, in *datapb.WatchDmCha
|
|||
log.Warn("DataNode WatchDmChannels is not in use")
|
||||
|
||||
// TODO ERROR OF GRPC NOT IN USE
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// GetComponentStates will return current state of DataNode
|
||||
|
@ -79,7 +79,7 @@ func (node *DataNode) GetComponentStates(ctx context.Context, req *milvuspb.GetC
|
|||
StateCode: node.stateCode.Load().(commonpb.StateCode),
|
||||
},
|
||||
SubcomponentStates: make([]*milvuspb.ComponentInfo, 0),
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
return states, nil
|
||||
}
|
||||
|
@ -94,8 +94,7 @@ func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmen
|
|||
fmt.Sprint(paramtable.GetNodeID()),
|
||||
metrics.TotalLabel).Inc()
|
||||
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.FlushSegments failed", zap.Int64("nodeId", paramtable.GetNodeID()), zap.Error(err))
|
||||
|
||||
return merr.Status(err), nil
|
||||
|
@ -160,7 +159,7 @@ func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmen
|
|||
metrics.DataNodeFlushReqCounter.WithLabelValues(
|
||||
fmt.Sprint(paramtable.GetNodeID()),
|
||||
metrics.SuccessLabel).Inc()
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ResendSegmentStats resend un-flushed segment stats back upstream to DataCoord by resending DataNode time tick message.
|
||||
|
@ -172,7 +171,7 @@ func (node *DataNode) ResendSegmentStats(ctx context.Context, req *datapb.Resend
|
|||
log.Info("found segment(s) with stats to resend",
|
||||
zap.Int64s("segment IDs", segResent))
|
||||
return &datapb.ResendSegmentStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegResent: segResent,
|
||||
}, nil
|
||||
}
|
||||
|
@ -180,22 +179,21 @@ func (node *DataNode) ResendSegmentStats(ctx context.Context, req *datapb.Resend
|
|||
// GetTimeTickChannel currently do nothing
|
||||
func (node *DataNode) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetStatisticsChannel currently do nothing
|
||||
func (node *DataNode) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ShowConfigurations returns the configurations of DataNode matching req.Pattern
|
||||
func (node *DataNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
log.Debug("DataNode.ShowConfigurations", zap.String("pattern", req.Pattern))
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.ShowConfigurations failed", zap.Int64("nodeId", paramtable.GetNodeID()), zap.Error(err))
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
|
@ -213,15 +211,14 @@ func (node *DataNode) ShowConfigurations(ctx context.Context, req *internalpb.Sh
|
|||
}
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Configuations: configList,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMetrics return datanode metrics
|
||||
func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.GetMetrics failed", zap.Int64("nodeId", paramtable.GetNodeID()), zap.Error(err))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
|
@ -266,8 +263,7 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
|
|||
// Compaction handles compaction request from DataCoord
|
||||
// returns status as long as compaction task enqueued or invalid
|
||||
func (node *DataNode) Compaction(ctx context.Context, req *datapb.CompactionPlan) (*commonpb.Status, error) {
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.Compaction failed", zap.Int64("nodeId", paramtable.GetNodeID()), zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
@ -296,14 +292,13 @@ func (node *DataNode) Compaction(ctx context.Context, req *datapb.CompactionPlan
|
|||
|
||||
node.compactionExecutor.execute(task)
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// GetCompactionState called by DataCoord
|
||||
// return status of all compaction plans
|
||||
func (node *DataNode) GetCompactionState(ctx context.Context, req *datapb.CompactionStateRequest) (*datapb.CompactionStateResponse, error) {
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.GetCompactionState failed", zap.Int64("nodeId", paramtable.GetNodeID()), zap.Error(err))
|
||||
return &datapb.CompactionStateResponse{
|
||||
Status: merr.Status(err),
|
||||
|
@ -333,7 +328,7 @@ func (node *DataNode) GetCompactionState(ctx context.Context, req *datapb.Compac
|
|||
log.Info("Compaction results", zap.Int64s("planIDs", planIDs))
|
||||
}
|
||||
return &datapb.CompactionStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Results: results,
|
||||
}, nil
|
||||
}
|
||||
|
@ -347,8 +342,7 @@ func (node *DataNode) SyncSegments(ctx context.Context, req *datapb.SyncSegments
|
|||
zap.Int64("numOfRows", req.GetNumOfRows()),
|
||||
)
|
||||
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.SyncSegments failed", zap.Int64("nodeId", paramtable.GetNodeID()), zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
@ -381,7 +375,7 @@ func (node *DataNode) SyncSegments(ctx context.Context, req *datapb.SyncSegments
|
|||
}
|
||||
if oneSegment == 0 {
|
||||
log.Ctx(ctx).Warn("no valid segment, maybe the request is a retry")
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// oneSegment is definitely in the channel, guaranteed by the check before.
|
||||
|
@ -402,7 +396,7 @@ func (node *DataNode) SyncSegments(ctx context.Context, req *datapb.SyncSegments
|
|||
defer ds.fg.Unblock()
|
||||
channel.mergeFlushedSegments(ctx, targetSeg, req.GetPlanID(), req.GetCompactedFrom())
|
||||
node.compactionExecutor.injectDone(req.GetPlanID(), true)
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (node *DataNode) NotifyChannelOperation(ctx context.Context, req *datapb.ChannelOperationsRequest) (*commonpb.Status, error) {
|
||||
|
@ -434,7 +428,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||
}()
|
||||
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: req.GetImportTask().TaskId,
|
||||
DatanodeId: paramtable.GetNodeID(),
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -462,8 +456,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
logFields = append(logFields, zap.Error(err))
|
||||
log.Warn("DataNode import failed, node is not healthy", logFields...)
|
||||
return merr.Status(err), nil
|
||||
|
@ -540,7 +533,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||
return returnFailFunc("failed to import files", err)
|
||||
}
|
||||
|
||||
resp := merr.Status(nil)
|
||||
resp := merr.Success()
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -551,8 +544,7 @@ func (node *DataNode) FlushChannels(ctx context.Context, req *datapb.FlushChanne
|
|||
|
||||
log.Info("DataNode receives FlushChannels request")
|
||||
|
||||
if !node.isHealthy() {
|
||||
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
log.Warn("DataNode.FlushChannels failed", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
@ -565,7 +557,7 @@ func (node *DataNode) FlushChannels(ctx context.Context, req *datapb.FlushChanne
|
|||
fg.channel.setFlushTs(req.GetFlushTs())
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (node *DataNode) getPartitions(ctx context.Context, dbName string, collectionName string) (map[string]int64, error) {
|
||||
|
@ -697,7 +689,7 @@ func (node *DataNode) AddImportSegment(ctx context.Context, req *datapb.AddImpor
|
|||
}
|
||||
ds.flushingSegCache.Remove(req.GetSegmentId())
|
||||
return &datapb.AddImportSegmentResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ChannelPos: posID,
|
||||
}, nil
|
||||
}
|
||||
|
@ -768,7 +760,7 @@ func assignSegmentFunc(node *DataNode, req *datapb.ImportTaskRequest) importutil
|
|||
// ignore the returned error, since even report failed the segments still can be cleaned
|
||||
// retry 10 times, if the rootcoord is down, the report function will cost 20+ seconds
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: req.GetImportTask().TaskId,
|
||||
DatanodeId: paramtable.GetNodeID(),
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
|
|
@ -176,7 +176,7 @@ func TestTimetickManagerSendReport(t *testing.T) {
|
|||
if req.GetBase().Timestamp > uint64(tsInMill) {
|
||||
validTs.Store(true)
|
||||
}
|
||||
}).Return(merr.Status(nil), nil)
|
||||
}).Return(merr.Success(), nil)
|
||||
manager := newTimeTickSender(mockDataCoord, 0)
|
||||
go manager.start(ctx)
|
||||
|
||||
|
|
|
@ -544,7 +544,7 @@ func Test_NewServer(t *testing.T) {
|
|||
|
||||
t.Run("update seg stat", func(t *testing.T) {
|
||||
server.dataCoord = &MockDataCoord{
|
||||
updateSegStatResp: merr.Status(nil),
|
||||
updateSegStatResp: merr.Success(),
|
||||
}
|
||||
resp, err := server.UpdateSegmentStatistics(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
|
@ -553,7 +553,7 @@ func Test_NewServer(t *testing.T) {
|
|||
|
||||
t.Run("UpdateChannelCheckpoint", func(t *testing.T) {
|
||||
server.dataCoord = &MockDataCoord{
|
||||
updateChanPos: merr.Status(nil),
|
||||
updateChanPos: merr.Success(),
|
||||
}
|
||||
resp, err := server.UpdateChannelCheckpoint(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
|
@ -562,7 +562,7 @@ func Test_NewServer(t *testing.T) {
|
|||
|
||||
t.Run("save import segment", func(t *testing.T) {
|
||||
server.dataCoord = &MockDataCoord{
|
||||
addSegmentResp: merr.Status(nil),
|
||||
addSegmentResp: merr.Success(),
|
||||
}
|
||||
resp, err := server.SaveImportSegment(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
|
@ -571,7 +571,7 @@ func Test_NewServer(t *testing.T) {
|
|||
|
||||
t.Run("unset isImporting state", func(t *testing.T) {
|
||||
server.dataCoord = &MockDataCoord{
|
||||
unsetIsImportingStateResp: merr.Status(nil),
|
||||
unsetIsImportingStateResp: merr.Success(),
|
||||
}
|
||||
resp, err := server.UnsetIsImportingState(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
|
@ -580,7 +580,7 @@ func Test_NewServer(t *testing.T) {
|
|||
|
||||
t.Run("mark segments dropped", func(t *testing.T) {
|
||||
server.dataCoord = &MockDataCoord{
|
||||
markSegmentsDroppedResp: merr.Status(nil),
|
||||
markSegmentsDroppedResp: merr.Success(),
|
||||
}
|
||||
resp, err := server.MarkSegmentsDropped(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -172,7 +172,7 @@ func (m *mockDataCoord) GetComponentStates(ctx context.Context, req *milvuspb.Ge
|
|||
State: &milvuspb.ComponentInfo{
|
||||
StateCode: commonpb.StateCode_Healthy,
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SubcomponentStates: []*milvuspb.ComponentInfo{
|
||||
{
|
||||
StateCode: commonpb.StateCode_Healthy,
|
||||
|
@ -195,7 +195,7 @@ func (m *mockRootCoord) GetComponentStates(ctx context.Context, req *milvuspb.Ge
|
|||
State: &milvuspb.ComponentInfo{
|
||||
StateCode: commonpb.StateCode_Healthy,
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SubcomponentStates: []*milvuspb.ComponentInfo{
|
||||
{
|
||||
StateCode: commonpb.StateCode_Healthy,
|
||||
|
@ -315,7 +315,7 @@ func Test_NewServer(t *testing.T) {
|
|||
server.datanode = &MockDataNode{
|
||||
status: &commonpb.Status{},
|
||||
addImportSegmentResp: &datapb.AddImportSegmentResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
resp, err := server.AddImportSegment(ctx, nil)
|
||||
|
|
|
@ -1025,7 +1025,7 @@ func (s *Server) GetProxyMetrics(ctx context.Context, request *milvuspb.GetMetri
|
|||
func (s *Server) GetVersion(ctx context.Context, request *milvuspb.GetVersionRequest) (*milvuspb.GetVersionResponse, error) {
|
||||
buildTags := os.Getenv(metricsinfo.GitBuildTagsEnvKey)
|
||||
return &milvuspb.GetVersionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Version: buildTags,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func Test_NewServer(t *testing.T) {
|
|||
}
|
||||
|
||||
mqc := getQueryCoord()
|
||||
successStatus := merr.Status(nil)
|
||||
successStatus := merr.Success()
|
||||
|
||||
t.Run("Run", func(t *testing.T) {
|
||||
server.queryCoord = mqc
|
||||
|
|
|
@ -121,7 +121,7 @@ func (m *mockDataCoord) GetComponentStates(ctx context.Context, req *milvuspb.Ge
|
|||
State: &milvuspb.ComponentInfo{
|
||||
StateCode: commonpb.StateCode_Healthy,
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SubcomponentStates: []*milvuspb.ComponentInfo{
|
||||
{
|
||||
StateCode: commonpb.StateCode_Healthy,
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package indexnode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoSuchKey = errors.New("NoSuchKey")
|
||||
ErrEmptyInsertPaths = errors.New("empty insert paths")
|
||||
)
|
||||
|
||||
// msgIndexNodeIsUnhealthy return a message tha IndexNode is not healthy.
|
||||
func msgIndexNodeIsUnhealthy(nodeID UniqueID) string {
|
||||
return fmt.Sprintf("index node %d is not ready", nodeID)
|
||||
}
|
||||
|
||||
// errIndexNodeIsUnhealthy return an error that specified IndexNode is not healthy.
|
||||
func errIndexNodeIsUnhealthy(nodeID UniqueID) error {
|
||||
return errors.New(msgIndexNodeIsUnhealthy(nodeID))
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package indexnode
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
func TestMsgIndexNodeIsUnhealthy(t *testing.T) {
|
||||
nodeIDList := []typeutil.UniqueID{1, 2, 3}
|
||||
for _, nodeID := range nodeIDList {
|
||||
log.Info("TestMsgIndexNodeIsUnhealthy", zap.String("msg", msgIndexNodeIsUnhealthy(nodeID)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrIndexNodeIsUnhealthy(t *testing.T) {
|
||||
nodeIDList := []typeutil.UniqueID{1, 2, 3}
|
||||
for _, nodeID := range nodeIDList {
|
||||
log.Info("TestErrIndexNodeIsUnhealthy", zap.Error(errIndexNodeIsUnhealthy(nodeID)))
|
||||
}
|
||||
}
|
|
@ -53,7 +53,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/hardware"
|
||||
"github.com/milvus-io/milvus/pkg/util/lifetime"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
|
@ -297,7 +296,7 @@ func (i *IndexNode) GetComponentStates(ctx context.Context, req *milvuspb.GetCom
|
|||
ret := &milvuspb.ComponentStates{
|
||||
State: stateInfo,
|
||||
SubcomponentStates: nil, // todo add subcomponents states
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
log.RatedInfo(10, "IndexNode Component states",
|
||||
|
@ -312,7 +311,7 @@ func (i *IndexNode) GetTimeTickChannel(ctx context.Context, req *internalpb.GetT
|
|||
log.RatedInfo(10, "get IndexNode time tick channel ...")
|
||||
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -320,7 +319,7 @@ func (i *IndexNode) GetTimeTickChannel(ctx context.Context, req *internalpb.GetT
|
|||
func (i *IndexNode) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
log.RatedInfo(10, "get IndexNode statistics channel ...")
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -330,17 +329,14 @@ func (i *IndexNode) GetNodeID() int64 {
|
|||
|
||||
// ShowConfigurations returns the configurations of indexNode matching req.Pattern
|
||||
func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
if err := i.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
log.Warn("IndexNode.ShowConfigurations failed",
|
||||
zap.Int64("nodeId", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Pattern),
|
||||
zap.Error(errIndexNodeIsUnhealthy(paramtable.GetNodeID())))
|
||||
zap.Error(err))
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgIndexNodeIsUnhealthy(paramtable.GetNodeID()),
|
||||
},
|
||||
Status: merr.Status(err),
|
||||
Configuations: nil,
|
||||
}, nil
|
||||
}
|
||||
|
@ -355,7 +351,7 @@ func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.Show
|
|||
}
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Configuations: configList,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -85,16 +85,16 @@ func NewIndexNodeMock() *Mock {
|
|||
StateCode: commonpb.StateCode_Healthy,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
},
|
||||
CallGetStatisticsChannel: func(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
},
|
||||
CallCreateJob: func(ctx context.Context, req *indexpb.CreateJobRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
},
|
||||
CallQueryJobs: func(ctx context.Context, in *indexpb.QueryJobsRequest) (*indexpb.QueryJobsResponse, error) {
|
||||
indexInfos := make([]*indexpb.IndexTaskInfo, 0)
|
||||
|
@ -106,17 +106,17 @@ func NewIndexNodeMock() *Mock {
|
|||
})
|
||||
}
|
||||
return &indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ClusterID: in.ClusterID,
|
||||
IndexInfos: indexInfos,
|
||||
}, nil
|
||||
},
|
||||
CallDropJobs: func(ctx context.Context, in *indexpb.DropJobsRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
},
|
||||
CallGetJobStats: func(ctx context.Context, in *indexpb.GetJobStatsRequest) (*indexpb.GetJobStatsResponse, error) {
|
||||
return &indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TotalJobNum: 1,
|
||||
EnqueueJobNum: 0,
|
||||
InProgressJobNum: 1,
|
||||
|
@ -137,7 +137,7 @@ func NewIndexNodeMock() *Mock {
|
|||
},
|
||||
CallShowConfigurations: func(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ func getMockSystemInfoMetrics(
|
|||
resp, _ := metricsinfo.MarshalComponentInfos(nodeInfos)
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
|
||||
}, nil
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
|
@ -47,12 +46,11 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
|
|||
zap.Int64("indexBuildID", req.GetBuildID()),
|
||||
)
|
||||
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
stateCode := i.lifetime.GetState()
|
||||
if err := i.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
log.Warn("index node not ready",
|
||||
zap.String("state", stateCode.String()),
|
||||
zap.Error(err),
|
||||
)
|
||||
return merr.Status(merr.WrapErrServiceNotReady(stateCode.String())), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer i.lifetime.Done()
|
||||
log.Info("IndexNode building index ...",
|
||||
|
@ -106,7 +104,7 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
|
|||
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %s", req.BuildID, req.ClusterID)),
|
||||
serializedSize: 0,
|
||||
}
|
||||
ret := merr.Status(nil)
|
||||
ret := merr.Success()
|
||||
if err := i.sched.IndexBuildQueue.Enqueue(task); err != nil {
|
||||
log.Warn("IndexNode failed to schedule",
|
||||
zap.Error(err))
|
||||
|
@ -124,11 +122,10 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
|
|||
log := log.Ctx(ctx).With(
|
||||
zap.String("clusterID", req.GetClusterID()),
|
||||
).WithRateGroup("in.queryJobs", 1, 60)
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
stateCode := i.lifetime.GetState()
|
||||
log.Warn("index node not ready", zap.String("state", stateCode.String()))
|
||||
if err := i.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
log.Warn("index node not ready", zap.Error(err))
|
||||
return &indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(stateCode.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
defer i.lifetime.Done()
|
||||
|
@ -145,7 +142,7 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
|
|||
}
|
||||
})
|
||||
ret := &indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ClusterID: req.GetClusterID(),
|
||||
IndexInfos: make([]*indexpb.IndexTaskInfo, 0, len(req.GetBuildIDs())),
|
||||
}
|
||||
|
@ -177,10 +174,9 @@ func (i *IndexNode) DropJobs(ctx context.Context, req *indexpb.DropJobsRequest)
|
|||
zap.String("clusterID", req.ClusterID),
|
||||
zap.Int64s("indexBuildIDs", req.BuildIDs),
|
||||
)
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
stateCode := i.lifetime.GetState()
|
||||
log.Ctx(ctx).Warn("index node not ready", zap.String("state", stateCode.String()), zap.String("clusterID", req.ClusterID))
|
||||
return merr.Status(merr.WrapErrServiceNotReady(stateCode.String())), nil
|
||||
if err := i.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
log.Ctx(ctx).Warn("index node not ready", zap.Error(err), zap.String("clusterID", req.ClusterID))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer i.lifetime.Done()
|
||||
keys := make([]taskKey, 0, len(req.GetBuildIDs()))
|
||||
|
@ -195,15 +191,14 @@ func (i *IndexNode) DropJobs(ctx context.Context, req *indexpb.DropJobsRequest)
|
|||
}
|
||||
log.Ctx(ctx).Info("drop index build jobs success", zap.String("clusterID", req.GetClusterID()),
|
||||
zap.Int64s("indexBuildIDs", req.GetBuildIDs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) GetJobStats(ctx context.Context, req *indexpb.GetJobStatsRequest) (*indexpb.GetJobStatsResponse, error) {
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
stateCode := i.lifetime.GetState()
|
||||
log.Ctx(ctx).Warn("index node not ready", zap.String("state", stateCode.String()))
|
||||
if err := i.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
log.Ctx(ctx).Warn("index node not ready", zap.Error(err))
|
||||
return &indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(stateCode.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
defer i.lifetime.Done()
|
||||
|
@ -224,7 +219,7 @@ func (i *IndexNode) GetJobStats(ctx context.Context, req *indexpb.GetJobStatsReq
|
|||
zap.Int("slot", slots),
|
||||
)
|
||||
return &indexpb.GetJobStatsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TotalJobNum: int64(active) + int64(unissued),
|
||||
InProgressJobNum: int64(active),
|
||||
EnqueueJobNum: int64(unissued),
|
||||
|
@ -237,17 +232,14 @@ func (i *IndexNode) GetJobStats(ctx context.Context, req *indexpb.GetJobStatsReq
|
|||
// GetMetrics gets the metrics info of IndexNode.
|
||||
// TODO(dragondriver): cache the Metrics and set a retention to the cache
|
||||
func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
if err := i.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
log.Ctx(ctx).Warn("IndexNode.GetMetrics failed",
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
zap.String("req", req.GetRequest()),
|
||||
zap.Error(errIndexNodeIsUnhealthy(paramtable.GetNodeID())))
|
||||
zap.Error(err))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgIndexNodeIsUnhealthy(paramtable.GetNodeID()),
|
||||
},
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
defer i.lifetime.Done()
|
||||
|
|
|
@ -52,12 +52,12 @@ func TestAbnormalIndexNode(t *testing.T) {
|
|||
assert.ErrorIs(t, merr.Error(jobNumRsp.GetStatus()), merr.ErrServiceNotReady)
|
||||
|
||||
metricsResp, err := in.GetMetrics(ctx, &milvuspb.GetMetricsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metricsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
|
||||
err = merr.CheckRPCCall(metricsResp, err)
|
||||
assert.ErrorIs(t, err, merr.ErrServiceNotReady)
|
||||
|
||||
configurationResp, err := in.ShowConfigurations(ctx, &internalpb.ShowConfigurationsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, configurationResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
|
||||
err = merr.CheckRPCCall(configurationResp, err)
|
||||
assert.ErrorIs(t, err, merr.ErrServiceNotReady)
|
||||
}
|
||||
|
||||
func TestGetMetrics(t *testing.T) {
|
||||
|
|
|
@ -70,7 +70,7 @@ func getSystemInfoMetrics(
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
|
||||
}, nil
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/indexparamcheck"
|
||||
"github.com/milvus-io/milvus/pkg/util/indexparams"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
||||
)
|
||||
|
@ -189,8 +190,8 @@ func (it *indexBuildTask) LoadData(ctx context.Context) error {
|
|||
getValueByPath := func(path string) ([]byte, error) {
|
||||
data, err := it.cm.Read(ctx, path)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoSuchKey) {
|
||||
return nil, ErrNoSuchKey
|
||||
if errors.Is(err, merr.ErrIoKeyNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -405,12 +406,12 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
|
|||
func (it *indexBuildTask) parseFieldMetaFromBinlog(ctx context.Context) error {
|
||||
toLoadDataPaths := it.req.GetDataPaths()
|
||||
if len(toLoadDataPaths) == 0 {
|
||||
return ErrEmptyInsertPaths
|
||||
return merr.WrapErrParameterInvalidMsg("data insert path must be not empty")
|
||||
}
|
||||
data, err := it.cm.Read(ctx, toLoadDataPaths[0])
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoSuchKey) {
|
||||
return ErrNoSuchKey
|
||||
if errors.Is(err, merr.ErrIoKeyNotFound) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -421,7 +422,7 @@ func (it *indexBuildTask) parseFieldMetaFromBinlog(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
if len(insertData.Data) != 1 {
|
||||
return errors.New("we expect only one field in deserialized insert data")
|
||||
return merr.WrapErrParameterInvalidMsg("we expect only one field in deserialized insert data")
|
||||
}
|
||||
|
||||
it.collectionID = collectionID
|
||||
|
@ -445,7 +446,7 @@ func (it *indexBuildTask) decodeBlobs(ctx context.Context, blobs []*storage.Blob
|
|||
metrics.IndexNodeDecodeFieldLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(it.tr.RecordSpan().Seconds())
|
||||
|
||||
if len(insertData.Data) != 1 {
|
||||
return errors.New("we expect only one field in deserialized insert data")
|
||||
return merr.WrapErrParameterInvalidMsg("we expect only one field in deserialized insert data")
|
||||
}
|
||||
it.collectionID = collectionID
|
||||
it.partitionID = partitionID
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
)
|
||||
|
||||
|
@ -222,7 +223,7 @@ func (sched *TaskScheduler) processTask(t task, q TaskQueue) {
|
|||
if errors.Is(err, errCancel) {
|
||||
log.Ctx(t.Ctx()).Warn("index build task canceled, retry it", zap.String("task", t.Name()))
|
||||
t.SetState(commonpb.IndexState_Retry, err.Error())
|
||||
} else if errors.Is(err, ErrNoSuchKey) {
|
||||
} else if errors.Is(err, merr.ErrIoKeyNotFound) {
|
||||
t.SetState(commonpb.IndexState_Failed, err.Error())
|
||||
} else {
|
||||
t.SetState(commonpb.IndexState_Retry, err.Error())
|
||||
|
|
|
@ -72,13 +72,13 @@ func (coord *DataCoordMock) GetComponentStates(ctx context.Context, req *milvusp
|
|||
ExtraInfo: nil,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (coord *DataCoordMock) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: coord.statisticsChannel,
|
||||
}, nil
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func (coord *DataCoordMock) Register() error {
|
|||
|
||||
func (coord *DataCoordMock) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTickChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: coord.timeTickChannel,
|
||||
}, nil
|
||||
}
|
||||
|
@ -252,24 +252,24 @@ func (coord *DataCoordMock) Import(ctx context.Context, req *datapb.ImportTaskRe
|
|||
}
|
||||
|
||||
func (coord *DataCoordMock) UpdateSegmentStatistics(ctx context.Context, req *datapb.UpdateSegmentStatisticsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *DataCoordMock) UpdateChannelCheckpoint(ctx context.Context, req *datapb.UpdateChannelCheckpointRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *DataCoordMock) CreateIndex(ctx context.Context, req *indexpb.CreateIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *DataCoordMock) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *DataCoordMock) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRequest, opts ...grpc.CallOption) (*indexpb.GetIndexStateResponse, error) {
|
||||
return &indexpb.GetIndexStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
State: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
}, nil
|
||||
|
@ -278,21 +278,21 @@ func (coord *DataCoordMock) GetIndexState(ctx context.Context, req *indexpb.GetI
|
|||
// GetSegmentIndexState gets the index state of the segments in the request from RootCoord.
|
||||
func (coord *DataCoordMock) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest, opts ...grpc.CallOption) (*indexpb.GetSegmentIndexStateResponse, error) {
|
||||
return &indexpb.GetSegmentIndexStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetIndexInfos gets the index files of the IndexBuildIDs in the request from RootCoordinator.
|
||||
func (coord *DataCoordMock) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoRequest, opts ...grpc.CallOption) (*indexpb.GetIndexInfoResponse, error) {
|
||||
return &indexpb.GetIndexInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DescribeIndex describe the index info of the collection.
|
||||
func (coord *DataCoordMock) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRequest, opts ...grpc.CallOption) (*indexpb.DescribeIndexResponse, error) {
|
||||
return &indexpb.DescribeIndexResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: nil,
|
||||
}, nil
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ func (coord *DataCoordMock) DescribeIndex(ctx context.Context, req *indexpb.Desc
|
|||
// GetIndexStatistics get the statistics of the index.
|
||||
func (coord *DataCoordMock) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexStatisticsRequest, opts ...grpc.CallOption) (*indexpb.GetIndexStatisticsResponse, error) {
|
||||
return &indexpb.GetIndexStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: nil,
|
||||
}, nil
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ func (coord *DataCoordMock) GetIndexStatistics(ctx context.Context, req *indexpb
|
|||
// GetIndexBuildProgress get the index building progress by num rows.
|
||||
func (coord *DataCoordMock) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetIndexBuildProgressRequest, opts ...grpc.CallOption) (*indexpb.GetIndexBuildProgressResponse, error) {
|
||||
return &indexpb.GetIndexBuildProgressResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/crypto"
|
||||
"github.com/milvus-io/milvus/pkg/util/errorutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/logutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
|
@ -66,7 +65,7 @@ const SlowReadSpan = time.Second * 5
|
|||
// GetComponentStates gets the state of Proxy.
|
||||
func (node *Proxy) GetComponentStates(ctx context.Context, req *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) {
|
||||
stats := &milvuspb.ComponentStates{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
code := node.GetStateCode()
|
||||
nodeID := common.NotRegisteredID
|
||||
|
@ -86,7 +85,7 @@ func (node *Proxy) GetComponentStates(ctx context.Context, req *milvuspb.GetComp
|
|||
// GetStatisticsChannel gets statistics channel of Proxy.
|
||||
func (node *Proxy) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: "",
|
||||
}, nil
|
||||
}
|
||||
|
@ -131,7 +130,7 @@ func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *p
|
|||
}
|
||||
log.Info("complete to invalidate collection meta cache")
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (node *Proxy) CreateDatabase(ctx context.Context, request *milvuspb.CreateDatabaseRequest) (*commonpb.Status, error) {
|
||||
|
@ -1525,7 +1524,7 @@ func (node *Proxy) GetLoadingProgress(ctx context.Context, request *milvuspb.Get
|
|||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.SuccessLabel).Inc()
|
||||
metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &milvuspb.GetLoadingProgressResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Progress: loadProgress,
|
||||
RefreshProgress: refreshProgress,
|
||||
}, nil
|
||||
|
@ -1562,7 +1561,7 @@ func (node *Proxy) GetLoadState(ctx context.Context, request *milvuspb.GetLoadSt
|
|||
}
|
||||
|
||||
successResponse := &milvuspb.GetLoadStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
defer func() {
|
||||
log.Debug(
|
||||
|
@ -1757,18 +1756,11 @@ func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.Describe
|
|||
zap.Uint64("BeginTs", dit.BeginTs()),
|
||||
zap.Uint64("EndTs", dit.EndTs()))
|
||||
|
||||
errCode := commonpb.ErrorCode_UnexpectedError
|
||||
if dit.result != nil {
|
||||
errCode = dit.result.Status.GetErrorCode()
|
||||
}
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method,
|
||||
metrics.FailLabel).Inc()
|
||||
|
||||
return &milvuspb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: errCode,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1786,7 +1778,6 @@ func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.Describe
|
|||
// GetIndexStatistics get the information of index.
|
||||
func (node *Proxy) GetIndexStatistics(ctx context.Context, request *milvuspb.GetIndexStatisticsRequest) (*milvuspb.GetIndexStatisticsResponse, error) {
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
err := merr.WrapErrServiceNotReady(fmt.Sprintf("proxy %d is unhealthy", paramtable.GetNodeID()))
|
||||
return &milvuspb.GetIndexStatisticsResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
|
@ -1836,16 +1827,9 @@ func (node *Proxy) GetIndexStatistics(ctx context.Context, request *milvuspb.Get
|
|||
|
||||
if err := dit.WaitToFinish(); err != nil {
|
||||
log.Warn(rpcFailedToWaitToFinish(method), zap.Error(err), zap.Uint64("BeginTs", dit.BeginTs()), zap.Uint64("EndTs", dit.EndTs()))
|
||||
errCode := commonpb.ErrorCode_UnexpectedError
|
||||
if dit.result != nil {
|
||||
errCode = dit.result.Status.GetErrorCode()
|
||||
}
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(node.session.ServerID, 10), method, metrics.FailLabel).Inc()
|
||||
return &milvuspb.GetIndexStatisticsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: errCode,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -2311,7 +2295,7 @@ func (node *Proxy) Upsert(ctx context.Context, request *milvuspb.UpsertRequest)
|
|||
Condition: NewTaskCondition(ctx),
|
||||
req: request,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
@ -2511,7 +2495,7 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
|
|||
// Flush notify data nodes to persist the data of collection.
|
||||
func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*milvuspb.FlushResponse, error) {
|
||||
resp := &milvuspb.FlushResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -2911,7 +2895,7 @@ func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest)
|
|||
log := log.With(zap.String("db", req.GetDbName()))
|
||||
|
||||
resp := &milvuspb.FlushAllResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -3019,7 +3003,7 @@ func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.G
|
|||
zap.Any("collection", req.CollectionName))
|
||||
|
||||
resp := &milvuspb.GetPersistentSegmentInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -3107,7 +3091,7 @@ func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQue
|
|||
zap.Any("collection", req.CollectionName))
|
||||
|
||||
resp := &milvuspb.GetQuerySegmentInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -3255,15 +3239,13 @@ func (node *Proxy) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsReque
|
|||
zap.String("req", req.Request))
|
||||
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
err := merr.WrapErrServiceNotReady(fmt.Sprintf("proxy %d is unhealthy", paramtable.GetNodeID()))
|
||||
log.Warn("Proxy.GetMetrics failed",
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(err))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(err),
|
||||
Response: "",
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -3275,8 +3257,7 @@ func (node *Proxy) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsReque
|
|||
zap.Error(err))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(err),
|
||||
Response: "",
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -3324,7 +3305,6 @@ func (node *Proxy) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetrics
|
|||
zap.String("req", req.Request))
|
||||
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
err := merr.WrapErrServiceNotReady(fmt.Sprintf("proxy %d is unhealthy", paramtable.GetNodeID()))
|
||||
log.Warn("Proxy.GetProxyMetrics failed",
|
||||
zap.Error(err))
|
||||
|
||||
|
@ -3389,7 +3369,7 @@ func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceReq
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
status := merr.Status(nil)
|
||||
status := merr.Success()
|
||||
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, req.GetDbName(), req.GetCollectionName())
|
||||
if err != nil {
|
||||
|
@ -3629,7 +3609,7 @@ func (node *Proxy) Import(ctx context.Context, req *milvuspb.ImportRequest) (*mi
|
|||
zap.String("partition name", req.GetPartitionName()),
|
||||
zap.Strings("files", req.GetFiles()))
|
||||
resp := &milvuspb.ImportResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -3674,7 +3654,7 @@ func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportSt
|
|||
log.Debug("received get import state request",
|
||||
zap.Int64("taskID", req.GetTask()))
|
||||
resp := &milvuspb.GetImportStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -3711,7 +3691,7 @@ func (node *Proxy) ListImportTasks(ctx context.Context, req *milvuspb.ListImport
|
|||
|
||||
log.Debug("received list import tasks request")
|
||||
resp := &milvuspb.ListImportTasksResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -3758,7 +3738,7 @@ func (node *Proxy) InvalidateCredentialCache(ctx context.Context, request *proxy
|
|||
}
|
||||
log.Debug("complete to invalidate credential cache")
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// UpdateCredentialCache update the credential cache of specified username.
|
||||
|
@ -3784,7 +3764,7 @@ func (node *Proxy) UpdateCredentialCache(ctx context.Context, request *proxypb.U
|
|||
}
|
||||
log.Debug("complete to update credential cache")
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (node *Proxy) CreateCredential(ctx context.Context, req *milvuspb.CreateCredentialRequest) (*commonpb.Status, error) {
|
||||
|
@ -3955,7 +3935,7 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.ListCredUsersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Usernames: resp.Usernames,
|
||||
}, nil
|
||||
}
|
||||
|
@ -4238,12 +4218,12 @@ func (node *Proxy) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.Refr
|
|||
}
|
||||
log.Debug("RefreshPrivilegeInfoCache success")
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// SetRates limits the rates of requests.
|
||||
func (node *Proxy) SetRates(ctx context.Context, request *proxypb.SetRatesRequest) (*commonpb.Status, error) {
|
||||
resp := merr.Status(nil)
|
||||
resp := merr.Success()
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
resp = merr.Status(err)
|
||||
return resp, nil
|
||||
|
@ -4261,11 +4241,10 @@ func (node *Proxy) SetRates(ctx context.Context, request *proxypb.SetRatesReques
|
|||
|
||||
func (node *Proxy) CheckHealth(ctx context.Context, request *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) {
|
||||
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
|
||||
reason := errorutil.UnHealthReason("proxy", node.session.ServerID, "proxy is unhealthy")
|
||||
return &milvuspb.CheckHealthResponse{
|
||||
Status: merr.Status(err),
|
||||
IsHealthy: false,
|
||||
Reasons: []string{reason},
|
||||
Reasons: []string{err.Error()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -4314,7 +4293,7 @@ func (node *Proxy) CheckHealth(ctx context.Context, request *milvuspb.CheckHealt
|
|||
err := group.Wait()
|
||||
if err != nil || len(errReasons) != 0 {
|
||||
return &milvuspb.CheckHealthResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IsHealthy: false,
|
||||
Reasons: errReasons,
|
||||
}, nil
|
||||
|
@ -4322,7 +4301,7 @@ func (node *Proxy) CheckHealth(ctx context.Context, request *milvuspb.CheckHealt
|
|||
|
||||
states, reasons := node.multiRateLimiter.GetQuotaStates()
|
||||
return &milvuspb.CheckHealthResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
QuotaStates: states,
|
||||
Reasons: reasons,
|
||||
IsHealthy: true,
|
||||
|
@ -4808,7 +4787,7 @@ func (node *Proxy) Connect(ctx context.Context, request *milvuspb.ConnectRequest
|
|||
GetConnectionManager().register(ctx, int64(ts), request.GetClientInfo())
|
||||
|
||||
return &milvuspb.ConnectResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ServerInfo: serverInfo,
|
||||
Identifier: int64(ts),
|
||||
}, nil
|
||||
|
@ -4822,7 +4801,7 @@ func (node *Proxy) ListClientInfos(ctx context.Context, req *proxypb.ListClientI
|
|||
clients := GetConnectionManager().list()
|
||||
|
||||
return &proxypb.ListClientInfosResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ClientInfos: clients,
|
||||
}, nil
|
||||
}
|
||||
|
@ -4844,7 +4823,7 @@ func (node *Proxy) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimest
|
|||
log.Info("AllocTimestamp request success", zap.Uint64("timestamp", ts))
|
||||
|
||||
return &milvuspb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: ts,
|
||||
}, nil
|
||||
}
|
||||
|
@ -4852,6 +4831,6 @@ func (node *Proxy) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimest
|
|||
func (node *Proxy) GetVersion(ctx context.Context, request *milvuspb.GetVersionRequest) (*milvuspb.GetVersionResponse, error) {
|
||||
// TODO implement me
|
||||
return &milvuspb.GetVersionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ func TestProxyRenameCollection(t *testing.T) {
|
|||
t.Run("rename ok", func(t *testing.T) {
|
||||
rc := mocks.NewMockRootCoordClient(t)
|
||||
rc.On("RenameCollection", mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
node := &Proxy{
|
||||
session: &sessionutil.Session{SessionRaw: sessionutil.SessionRaw{ServerID: 1}},
|
||||
rootCoord: rc,
|
||||
|
@ -757,7 +757,7 @@ func TestProxy_Connect(t *testing.T) {
|
|||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady("initialization")),
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(paramtable.GetRole(), paramtable.GetNodeID(), "initialization")),
|
||||
}, nil)
|
||||
|
||||
node := &Proxy{rootCoord: r}
|
||||
|
@ -779,7 +779,7 @@ func TestProxy_Connect(t *testing.T) {
|
|||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{},
|
||||
}, nil)
|
||||
|
||||
|
@ -802,7 +802,7 @@ func TestProxy_Connect(t *testing.T) {
|
|||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"20230525"},
|
||||
}, nil)
|
||||
|
||||
|
@ -833,7 +833,7 @@ func TestProxy_Connect(t *testing.T) {
|
|||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"20230525"},
|
||||
}, nil)
|
||||
|
||||
|
@ -842,7 +842,7 @@ func TestProxy_Connect(t *testing.T) {
|
|||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(&rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: 20230518,
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
@ -923,7 +923,7 @@ func TestProxyCreateDatabase(t *testing.T) {
|
|||
t.Run("create database ok", func(t *testing.T) {
|
||||
rc := mocks.NewMockRootCoordClient(t)
|
||||
rc.On("CreateDatabase", mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
node.rootCoord = rc
|
||||
node.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
ctx := context.Background()
|
||||
|
@ -977,7 +977,7 @@ func TestProxyDropDatabase(t *testing.T) {
|
|||
t.Run("drop database ok", func(t *testing.T) {
|
||||
rc := mocks.NewMockRootCoordClient(t)
|
||||
rc.On("DropDatabase", mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
node.rootCoord = rc
|
||||
node.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
ctx := context.Background()
|
||||
|
@ -1032,7 +1032,7 @@ func TestProxyListDatabase(t *testing.T) {
|
|||
rc := mocks.NewMockRootCoordClient(t)
|
||||
rc.On("ListDatabases", mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
node.rootCoord = rc
|
||||
node.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
|
|
|
@ -84,7 +84,7 @@ func (s *LBPolicySuite) SetupTest() {
|
|||
},
|
||||
}, nil).Maybe()
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{1, 2, 3},
|
||||
}, nil).Maybe()
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ func (m *MockRootCoordClientInterface) ShowPartitions(ctx context.Context, in *m
|
|||
}
|
||||
if in.CollectionName == "collection1" {
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []typeutil.UniqueID{1, 2},
|
||||
CreatedTimestamps: []uint64{100, 200},
|
||||
CreatedUtcTimestamps: []uint64{100, 200},
|
||||
|
@ -80,7 +80,7 @@ func (m *MockRootCoordClientInterface) ShowPartitions(ctx context.Context, in *m
|
|||
}
|
||||
if in.CollectionName == "collection2" {
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []typeutil.UniqueID{3, 4},
|
||||
CreatedTimestamps: []uint64{201, 202},
|
||||
CreatedUtcTimestamps: []uint64{201, 202},
|
||||
|
@ -89,7 +89,7 @@ func (m *MockRootCoordClientInterface) ShowPartitions(ctx context.Context, in *m
|
|||
}
|
||||
if in.CollectionName == "errorCollection" {
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []typeutil.UniqueID{5, 6},
|
||||
CreatedTimestamps: []uint64{201},
|
||||
CreatedUtcTimestamps: []uint64{201},
|
||||
|
@ -114,7 +114,7 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i
|
|||
m.IncAccessCount()
|
||||
if in.CollectionName == "collection1" || in.CollectionID == 1 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionID: typeutil.UniqueID(1),
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
AutoID: true,
|
||||
|
@ -125,7 +125,7 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i
|
|||
}
|
||||
if in.CollectionName == "collection2" || in.CollectionID == 2 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionID: typeutil.UniqueID(2),
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
AutoID: true,
|
||||
|
@ -136,7 +136,7 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i
|
|||
}
|
||||
if in.CollectionName == "errorCollection" {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionID: typeutil.UniqueID(3),
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
AutoID: true,
|
||||
|
@ -160,7 +160,7 @@ func (m *MockRootCoordClientInterface) GetCredential(ctx context.Context, req *r
|
|||
if req.Username == "mockUser" {
|
||||
encryptedPassword, _ := crypto.PasswordEncrypt("mockPass")
|
||||
return &rootcoordpb.GetCredentialResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Username: "mockUser",
|
||||
Password: encryptedPassword,
|
||||
}, nil
|
||||
|
@ -176,7 +176,7 @@ func (m *MockRootCoordClientInterface) ListCredUsers(ctx context.Context, req *m
|
|||
}
|
||||
|
||||
return &milvuspb.ListCredUsersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Usernames: []string{"mockUser"},
|
||||
}, nil
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func (m *MockRootCoordClientInterface) ListPolicy(ctx context.Context, in *inter
|
|||
return m.listPolicy(ctx, in)
|
||||
}
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -514,7 +514,7 @@ func TestMetaCache_GetShards(t *testing.T) {
|
|||
|
||||
t.Run("without shardLeaders in collection info", func(t *testing.T) {
|
||||
qc.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Shards: []*querypb.ShardLeadersList{
|
||||
{
|
||||
ChannelName: "channel-1",
|
||||
|
@ -571,7 +571,7 @@ func TestMetaCache_ClearShards(t *testing.T) {
|
|||
|
||||
t.Run("Clear valid collection valid cache", func(t *testing.T) {
|
||||
qc.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Shards: []*querypb.ShardLeadersList{
|
||||
{
|
||||
ChannelName: "channel-1",
|
||||
|
@ -617,7 +617,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
|
|||
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
||||
}, nil
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
|
|||
t.Run("GetPrivilegeInfo", func(t *testing.T) {
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
||||
UserRoles: []string{funcutil.EncodeUserRoleCache("foo", "role1"), funcutil.EncodeUserRoleCache("foo", "role2"), funcutil.EncodeUserRoleCache("foo2", "role2")},
|
||||
}, nil
|
||||
|
@ -644,7 +644,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
|
|||
t.Run("GetPrivilegeInfo", func(t *testing.T) {
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
||||
UserRoles: []string{funcutil.EncodeUserRoleCache("foo", "role1"), funcutil.EncodeUserRoleCache("foo", "role2"), funcutil.EncodeUserRoleCache("foo2", "role2")},
|
||||
}, nil
|
||||
|
@ -681,7 +681,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
|
|||
t.Run("Delete user or drop role", func(t *testing.T) {
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
||||
UserRoles: []string{funcutil.EncodeUserRoleCache("foo", "role1"), funcutil.EncodeUserRoleCache("foo", "role2"), funcutil.EncodeUserRoleCache("foo2", "role2"), funcutil.EncodeUserRoleCache("foo2", "role3")},
|
||||
}, nil
|
||||
|
@ -706,7 +706,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
|
|||
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
||||
UserRoles: []string{funcutil.EncodeUserRoleCache("foo", "role1"), funcutil.EncodeUserRoleCache("foo", "role2"), funcutil.EncodeUserRoleCache("foo2", "role2"), funcutil.EncodeUserRoleCache("foo2", "role3")},
|
||||
}, nil
|
||||
|
@ -727,7 +727,7 @@ func TestMetaCache_RemoveCollection(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
queryCoord.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIDs: []UniqueID{1, 2},
|
||||
InMemoryPercentages: []int64{100, 50},
|
||||
}, nil)
|
||||
|
@ -776,12 +776,12 @@ func TestMetaCache_ExpireShardLeaderCache(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
queryCoord.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIDs: []UniqueID{1},
|
||||
InMemoryPercentages: []int64{100},
|
||||
}, nil)
|
||||
queryCoord.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Shards: []*querypb.ShardLeadersList{
|
||||
{
|
||||
ChannelName: "channel-1",
|
||||
|
@ -796,7 +796,7 @@ func TestMetaCache_ExpireShardLeaderCache(t *testing.T) {
|
|||
|
||||
queryCoord.ExpectedCalls = nil
|
||||
queryCoord.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Shards: []*querypb.ShardLeadersList{
|
||||
{
|
||||
ChannelName: "channel-1",
|
||||
|
@ -814,7 +814,7 @@ func TestMetaCache_ExpireShardLeaderCache(t *testing.T) {
|
|||
|
||||
queryCoord.ExpectedCalls = nil
|
||||
queryCoord.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Shards: []*querypb.ShardLeadersList{
|
||||
{
|
||||
ChannelName: "channel-1",
|
||||
|
@ -832,7 +832,7 @@ func TestMetaCache_ExpireShardLeaderCache(t *testing.T) {
|
|||
|
||||
queryCoord.ExpectedCalls = nil
|
||||
queryCoord.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Shards: []*querypb.ShardLeadersList{
|
||||
{
|
||||
ChannelName: "channel-1",
|
||||
|
|
|
@ -109,7 +109,7 @@ func getProxyMetrics(ctx context.Context, request *milvuspb.GetMetricsRequest, n
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
|
||||
}, nil
|
||||
|
@ -426,7 +426,7 @@ func getSystemInfoMetrics(
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
|
||||
}, nil
|
||||
|
|
|
@ -83,7 +83,7 @@ func TestProxy_metrics(t *testing.T) {
|
|||
resp, _ := metricsinfo.MarshalTopology(rootCoordTopology)
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.RootCoordRole, id),
|
||||
}, nil
|
||||
|
@ -132,7 +132,7 @@ func TestProxy_metrics(t *testing.T) {
|
|||
resp, _ := metricsinfo.MarshalTopology(coordTopology)
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, id),
|
||||
}, nil
|
||||
|
@ -189,7 +189,7 @@ func TestProxy_metrics(t *testing.T) {
|
|||
resp, _ := metricsinfo.MarshalTopology(coordTopology)
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, id),
|
||||
}, nil
|
||||
|
|
|
@ -52,7 +52,7 @@ func (tso *mockTimestampAllocatorInterface) AllocTimestamp(ctx context.Context,
|
|||
|
||||
tso.lastTs = ts
|
||||
return &rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: ts,
|
||||
Count: req.Count,
|
||||
}, nil
|
||||
|
|
|
@ -52,7 +52,7 @@ func TestPrivilegeInterceptor(t *testing.T) {
|
|||
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{
|
||||
funcutil.PolicyForPrivilege("role1", commonpb.ObjectType_Collection.String(), "col1", commonpb.ObjectPrivilege_PrivilegeLoad.String(), "default"),
|
||||
funcutil.PolicyForPrivilege("role1", commonpb.ObjectType_Collection.String(), "col1", commonpb.ObjectPrivilege_PrivilegeGetLoadState.String(), "default"),
|
||||
|
@ -180,7 +180,7 @@ func TestResourceGroupPrivilege(t *testing.T) {
|
|||
|
||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: []string{
|
||||
funcutil.PolicyForPrivilege("role1", commonpb.ObjectType_Global.String(), "*", commonpb.ObjectPrivilege_PrivilegeCreateResourceGroup.String(), "default"),
|
||||
funcutil.PolicyForPrivilege("role1", commonpb.ObjectType_Global.String(), "*", commonpb.ObjectPrivilege_PrivilegeDropResourceGroup.String(), "default"),
|
||||
|
|
|
@ -4237,7 +4237,7 @@ func TestProxy_GetLoadState(t *testing.T) {
|
|||
{
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady("initialization")),
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(paramtable.GetRole(), paramtable.GetNodeID(), "initialization")),
|
||||
CollectionIDs: nil,
|
||||
InMemoryPercentages: []int64{},
|
||||
}, nil)
|
||||
|
@ -4290,7 +4290,7 @@ func TestProxy_GetLoadState(t *testing.T) {
|
|||
ExtraInfo: nil,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||
|
@ -4325,7 +4325,7 @@ func TestProxy_GetLoadState(t *testing.T) {
|
|||
ExtraInfo: nil,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||
|
@ -4356,7 +4356,7 @@ func TestProxy_GetLoadState(t *testing.T) {
|
|||
ExtraInfo: nil,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
|
||||
mockErr := merr.WrapErrServiceMemoryLimitExceeded(110, 100)
|
||||
|
|
|
@ -199,7 +199,7 @@ func TestRateLimitInterceptor(t *testing.T) {
|
|||
limiter := limiterMock{rate: 100}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
serverInfo := &grpc.UnaryServerInfo{FullMethod: "MockFullMethod"}
|
||||
|
|
|
@ -136,7 +136,7 @@ func (coord *RootCoordMock) CreateAlias(ctx context.Context, req *milvuspb.Creat
|
|||
}
|
||||
|
||||
coord.collAlias2ID[req.Alias] = collID
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) DropAlias(ctx context.Context, req *milvuspb.DropAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
|
@ -159,7 +159,7 @@ func (coord *RootCoordMock) DropAlias(ctx context.Context, req *milvuspb.DropAli
|
|||
}
|
||||
|
||||
delete(coord.collAlias2ID, req.Alias)
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) AlterAlias(ctx context.Context, req *milvuspb.AlterAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
|
@ -188,7 +188,7 @@ func (coord *RootCoordMock) AlterAlias(ctx context.Context, req *milvuspb.AlterA
|
|||
}, nil
|
||||
}
|
||||
coord.collAlias2ID[req.Alias] = collID
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) updateState(state commonpb.StateCode) {
|
||||
|
@ -217,7 +217,7 @@ func (coord *RootCoordMock) GetComponentStates(ctx context.Context, req *milvusp
|
|||
ExtraInfo: nil,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -232,7 +232,7 @@ func (coord *RootCoordMock) GetStatisticsChannel(ctx context.Context, req *inter
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: coord.statisticsChannel,
|
||||
}, nil
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ func (coord *RootCoordMock) GetTimeTickChannel(ctx context.Context, req *interna
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: coord.timeTickChannel,
|
||||
}, nil
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ func (coord *RootCoordMock) CreateCollection(ctx context.Context, req *milvuspb.
|
|||
coord.collID2Partitions[collID].partitionID2Meta[id] = partitionMeta{}
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) DropCollection(ctx context.Context, req *milvuspb.DropCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
|
@ -387,7 +387,7 @@ func (coord *RootCoordMock) DropCollection(ctx context.Context, req *milvuspb.Dr
|
|||
|
||||
delete(coord.collID2Partitions, collID)
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) HasCollection(ctx context.Context, req *milvuspb.HasCollectionRequest, opts ...grpc.CallOption) (*milvuspb.BoolResponse, error) {
|
||||
|
@ -407,7 +407,7 @@ func (coord *RootCoordMock) HasCollection(ctx context.Context, req *milvuspb.Has
|
|||
_, exist := coord.collName2ID[req.CollectionName]
|
||||
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: exist,
|
||||
}, nil
|
||||
}
|
||||
|
@ -466,7 +466,7 @@ func (coord *RootCoordMock) DescribeCollection(ctx context.Context, req *milvusp
|
|||
}
|
||||
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: meta.schema,
|
||||
CollectionID: collID,
|
||||
ShardsNum: meta.shardsNum,
|
||||
|
@ -510,7 +510,7 @@ func (coord *RootCoordMock) ShowCollections(ctx context.Context, req *milvuspb.S
|
|||
}
|
||||
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionNames: names,
|
||||
CollectionIds: ids,
|
||||
CreatedTimestamps: createdTimestamps,
|
||||
|
@ -559,7 +559,7 @@ func (coord *RootCoordMock) CreatePartition(ctx context.Context, req *milvuspb.C
|
|||
createdUtcTimestamp: ts,
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) DropPartition(ctx context.Context, req *milvuspb.DropPartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
|
@ -595,7 +595,7 @@ func (coord *RootCoordMock) DropPartition(ctx context.Context, req *milvuspb.Dro
|
|||
delete(coord.collID2Partitions[collID].partitionName2ID, req.PartitionName)
|
||||
delete(coord.collID2Partitions[collID].partitionID2Name, partitionID)
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) HasPartition(ctx context.Context, req *milvuspb.HasPartitionRequest, opts ...grpc.CallOption) (*milvuspb.BoolResponse, error) {
|
||||
|
@ -628,7 +628,7 @@ func (coord *RootCoordMock) HasPartition(ctx context.Context, req *milvuspb.HasP
|
|||
|
||||
_, partitionExist := coord.collID2Partitions[collID].partitionName2ID[req.PartitionName]
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: partitionExist,
|
||||
}, nil
|
||||
}
|
||||
|
@ -682,7 +682,7 @@ func (coord *RootCoordMock) ShowPartitions(ctx context.Context, req *milvuspb.Sh
|
|||
}
|
||||
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionNames: names,
|
||||
PartitionIDs: ids,
|
||||
CreatedTimestamps: createdTimestamps,
|
||||
|
@ -783,7 +783,7 @@ func (coord *RootCoordMock) AllocTimestamp(ctx context.Context, req *rootcoordpb
|
|||
|
||||
coord.lastTs = ts
|
||||
return &rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: ts,
|
||||
Count: req.Count,
|
||||
}, nil
|
||||
|
@ -803,7 +803,7 @@ func (coord *RootCoordMock) AllocID(ctx context.Context, req *rootcoordpb.AllocI
|
|||
}
|
||||
begin, _ := uniquegenerator.GetUniqueIntGeneratorIns().GetInts(int(req.Count))
|
||||
return &rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: int64(begin),
|
||||
Count: req.Count,
|
||||
}, nil
|
||||
|
@ -817,7 +817,7 @@ func (coord *RootCoordMock) UpdateChannelTimeTick(ctx context.Context, req *inte
|
|||
Reason: fmt.Sprintf("state code = %s", commonpb.StateCode_name[int32(code)]),
|
||||
}, nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) DescribeSegment(ctx context.Context, req *milvuspb.DescribeSegmentRequest, opts ...grpc.CallOption) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
|
@ -832,7 +832,7 @@ func (coord *RootCoordMock) DescribeSegment(ctx context.Context, req *milvuspb.D
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.DescribeSegmentResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexID: 0,
|
||||
BuildID: 0,
|
||||
EnableIndex: false,
|
||||
|
@ -851,7 +851,7 @@ func (coord *RootCoordMock) ShowSegments(ctx context.Context, req *milvuspb.Show
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.ShowSegmentsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegmentIDs: nil,
|
||||
}, nil
|
||||
}
|
||||
|
@ -868,7 +868,7 @@ func (coord *RootCoordMock) InvalidateCollectionMetaCache(ctx context.Context, i
|
|||
Reason: fmt.Sprintf("state code = %s", commonpb.StateCode_name[int32(code)]),
|
||||
}, nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error) {
|
||||
|
@ -879,7 +879,7 @@ func (coord *RootCoordMock) SegmentFlushCompleted(ctx context.Context, in *datap
|
|||
Reason: fmt.Sprintf("state code = %s", commonpb.StateCode_name[int32(code)]),
|
||||
}, nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (coord *RootCoordMock) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest, opts ...grpc.CallOption) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
|
@ -940,7 +940,7 @@ func (coord *RootCoordMock) Import(ctx context.Context, req *milvuspb.ImportRequ
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.ImportResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Tasks: make([]int64, 3),
|
||||
}, nil
|
||||
}
|
||||
|
@ -958,7 +958,7 @@ func (coord *RootCoordMock) GetImportState(ctx context.Context, req *milvuspb.Ge
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.GetImportStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
RowCount: 10,
|
||||
IdList: make([]int64, 3),
|
||||
}, nil
|
||||
|
@ -976,7 +976,7 @@ func (coord *RootCoordMock) ListImportTasks(ctx context.Context, in *milvuspb.Li
|
|||
}, nil
|
||||
}
|
||||
return &milvuspb.ListImportTasksResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Tasks: make([]*milvuspb.GetImportStateResponse, 3),
|
||||
}, nil
|
||||
}
|
||||
|
@ -989,7 +989,7 @@ func (coord *RootCoordMock) ReportImport(ctx context.Context, req *rootcoordpb.I
|
|||
Reason: fmt.Sprintf("state code = %s", commonpb.StateCode_name[int32(code)]),
|
||||
}, nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func NewRootCoordMock(opts ...RootCoordMockOption) *RootCoordMock {
|
||||
|
|
|
@ -55,14 +55,14 @@ func (mockD *mockDataCoord) AssignSegmentID(ctx context.Context, req *datapb.Ass
|
|||
PartitionID: r.PartitionID,
|
||||
ExpireTime: mockD.expireTime,
|
||||
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
assigns = append(assigns, result)
|
||||
}
|
||||
}
|
||||
|
||||
return &datapb.AssignSegmentIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegIDAssignments: assigns,
|
||||
}, nil
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ func (mockD *mockDataCoord3) AssignSegmentID(ctx context.Context, req *datapb.As
|
|||
}
|
||||
|
||||
return &datapb.AssignSegmentIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SegIDAssignments: assigns,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -511,7 +511,7 @@ func (t *describeCollectionTask) PreExecute(ctx context.Context) error {
|
|||
func (t *describeCollectionTask) Execute(ctx context.Context) error {
|
||||
var err error
|
||||
t.result = &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "",
|
||||
Description: "",
|
||||
|
@ -1324,7 +1324,7 @@ func (t *flushTask) Execute(ctx context.Context) error {
|
|||
coll2FlushTs[collName] = resp.GetFlushTs()
|
||||
}
|
||||
t.result = &milvuspb.FlushResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
DbName: t.GetDbName(),
|
||||
CollSegIDs: coll2Segments,
|
||||
FlushCollSegIDs: flushColl2Segments,
|
||||
|
|
|
@ -157,7 +157,7 @@ func getPrimaryKeysFromExpr(schema *schemapb.CollectionSchema, termExpr *planpb.
|
|||
|
||||
func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
||||
dt.result = &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
|
|
@ -335,7 +335,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
rc := mocks.NewMockRootCoordClient(t)
|
||||
rc.EXPECT().AllocID(mock.Anything, mock.Anything).Return(
|
||||
&rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: 0,
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
@ -376,7 +376,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
vChannels: channels,
|
||||
lb: lb,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
@ -409,7 +409,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
lb := NewMockLBPolicy(t)
|
||||
rc.EXPECT().AllocID(mock.Anything, mock.Anything).Return(
|
||||
&rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: 0,
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
@ -426,7 +426,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
idAllocator: allocator,
|
||||
lb: lb,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
@ -450,7 +450,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
server := client.CreateServer()
|
||||
|
||||
server.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
|
@ -482,7 +482,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
lb := NewMockLBPolicy(t)
|
||||
rc.EXPECT().AllocID(mock.Anything, mock.Anything).Return(
|
||||
&rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: 0,
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
@ -499,7 +499,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
idAllocator: allocator,
|
||||
lb: lb,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
@ -523,7 +523,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
server := client.CreateServer()
|
||||
|
||||
server.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
|
@ -551,7 +551,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
lb := NewMockLBPolicy(t)
|
||||
rc.EXPECT().AllocID(mock.Anything, mock.Anything).Return(
|
||||
&rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: 0,
|
||||
Count: 1,
|
||||
}, nil)
|
||||
|
@ -568,7 +568,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
idAllocator: allocator,
|
||||
lb: lb,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
@ -592,7 +592,7 @@ func TestDeleteTask_Execute(t *testing.T) {
|
|||
server := client.CreateServer()
|
||||
|
||||
server.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
|
|
|
@ -925,7 +925,7 @@ func (gist *getIndexStateTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
gist.result = &milvuspb.GetIndexStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
State: state.GetState(),
|
||||
FailReason: state.GetFailReason(),
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ func TestGetIndexStateTask_Execute(t *testing.T) {
|
|||
rootCoord := newMockRootCoord()
|
||||
queryCoord := getMockQueryCoord()
|
||||
queryCoord.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIDs: []int64{},
|
||||
}, nil)
|
||||
datacoord := NewDataCoordMock()
|
||||
|
@ -88,7 +88,7 @@ func TestGetIndexStateTask_Execute(t *testing.T) {
|
|||
|
||||
rootCoord.DescribeCollectionFunc = func(ctx context.Context, request *milvuspb.DescribeCollectionRequest, opts ...grpc.CallOption) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: newTestSchema(),
|
||||
CollectionID: collectionID,
|
||||
CollectionName: request.CollectionName,
|
||||
|
@ -97,7 +97,7 @@ func TestGetIndexStateTask_Execute(t *testing.T) {
|
|||
|
||||
datacoord.GetIndexStateFunc = func(ctx context.Context, request *indexpb.GetIndexStateRequest, opts ...grpc.CallOption) (*indexpb.GetIndexStateResponse, error) {
|
||||
return &indexpb.GetIndexStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
State: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
}, nil
|
||||
|
@ -116,7 +116,7 @@ func TestDropIndexTask_PreExecute(t *testing.T) {
|
|||
paramtable.Init()
|
||||
qc := getMockQueryCoord()
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIDs: []int64{},
|
||||
}, nil)
|
||||
dc := NewDataCoordMock()
|
||||
|
@ -177,7 +177,7 @@ func TestDropIndexTask_PreExecute(t *testing.T) {
|
|||
t.Run("coll has been loaded", func(t *testing.T) {
|
||||
qc := getMockQueryCoord()
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
CollectionIDs: []int64{collectionID},
|
||||
}, nil)
|
||||
dit.queryCoord = qc
|
||||
|
|
|
@ -98,7 +98,7 @@ func (it *insertTask) PreExecute(ctx context.Context) error {
|
|||
defer sp.End()
|
||||
|
||||
it.result = &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
|
|
@ -141,7 +141,7 @@ func TestQueryTask_all(t *testing.T) {
|
|||
},
|
||||
ctx: ctx,
|
||||
result: &milvuspb.QueryResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
FieldsData: []*schemapb.FieldData{},
|
||||
},
|
||||
request: &milvuspb.QueryRequest{
|
||||
|
@ -184,7 +184,7 @@ func TestQueryTask_all(t *testing.T) {
|
|||
|
||||
result1 := &internalpb.RetrieveResults{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_RetrieveResult},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{Data: generateInt64Array(hitNum)},
|
||||
|
|
|
@ -767,7 +767,7 @@ func reduceSearchResultData(ctx context.Context, subSearchResultData []*schemapb
|
|||
zap.String("metricType", metricType))
|
||||
|
||||
ret := &milvuspb.SearchResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Results: &schemapb.SearchResultData{
|
||||
NumQueries: nq,
|
||||
TopK: topk,
|
||||
|
|
|
@ -1689,7 +1689,7 @@ func TestSearchTask_ErrExecute(t *testing.T) {
|
|||
},
|
||||
ctx: ctx,
|
||||
result: &milvuspb.SearchResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
request: &milvuspb.SearchRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -1736,7 +1736,7 @@ func TestSearchTask_ErrExecute(t *testing.T) {
|
|||
qn.ExpectedCalls = nil
|
||||
qn.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(nil, nil).Maybe()
|
||||
qn.EXPECT().Search(mock.Anything, mock.Anything).Return(&internalpb.SearchResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
assert.NoError(t, task.Execute(ctx))
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ func (g *getStatisticsTask) PostExecute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
g.result = &milvuspb.GetStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Stats: result,
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,7 @@ func (g *getStatisticsTask) getStatisticsFromDataCoord(ctx context.Context) erro
|
|||
g.resultBuf = typeutil.NewConcurrentSet[*internalpb.GetStatisticsResponse]()
|
||||
}
|
||||
g.resultBuf.Insert(&internalpb.GetStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Stats: result.Stats,
|
||||
})
|
||||
return nil
|
||||
|
@ -465,7 +465,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
|
|||
// return merr.Error(result.GetStatus())
|
||||
// }
|
||||
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
|
||||
// Status: merr.Status(nil),
|
||||
// Status: merr.Success(),
|
||||
// Stats: result.Stats,
|
||||
// })
|
||||
// log.Debug("get partition statistics from DataCoord execute done", zap.Int64("msgID", g.ID()))
|
||||
|
@ -480,7 +480,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
|
|||
// return err
|
||||
// }
|
||||
// g.result = &milvuspb.GetPartitionStatisticsResponse{
|
||||
// Status: merr.Status(nil),
|
||||
// Status: merr.Success(),
|
||||
// Stats: g.innerResult,
|
||||
// }
|
||||
// return nil
|
||||
|
@ -537,7 +537,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
|
|||
// return merr.Error(result.GetStatus())
|
||||
// }
|
||||
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
|
||||
// Status: merr.Status(nil),
|
||||
// Status: merr.Success(),
|
||||
// Stats: result.Stats,
|
||||
// })
|
||||
// } else { // some partitions have been loaded, get some partition statistics from datacoord
|
||||
|
@ -560,7 +560,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
|
|||
// return merr.Error(result.GetStatus())
|
||||
// }
|
||||
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
|
||||
// Status: merr.Status(nil),
|
||||
// Status: merr.Success(),
|
||||
// Stats: result.Stats,
|
||||
// })
|
||||
// }
|
||||
|
@ -576,7 +576,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
|
|||
// return err
|
||||
// }
|
||||
// g.result = &milvuspb.GetCollectionStatisticsResponse{
|
||||
// Status: merr.Status(nil),
|
||||
// Status: merr.Success(),
|
||||
// Stats: g.innerResult,
|
||||
// }
|
||||
// return nil
|
||||
|
@ -659,7 +659,7 @@ func (g *getCollectionStatisticsTask) Execute(ctx context.Context) error {
|
|||
return merr.Error(result.GetStatus())
|
||||
}
|
||||
g.result = &milvuspb.GetCollectionStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Stats: result.Stats,
|
||||
}
|
||||
return nil
|
||||
|
@ -749,7 +749,7 @@ func (g *getPartitionStatisticsTask) Execute(ctx context.Context) error {
|
|||
return merr.Error(result.GetStatus())
|
||||
}
|
||||
g.result = &milvuspb.GetPartitionStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Stats: result.Stats,
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -69,7 +69,7 @@ func (s *StatisticTaskSuite) SetupTest() {
|
|||
},
|
||||
}, nil).Maybe()
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{1, 2, 3},
|
||||
}, nil).Maybe()
|
||||
|
||||
|
@ -166,7 +166,7 @@ func (s *StatisticTaskSuite) getStatisticsTask(ctx context.Context) *getStatisti
|
|||
ctx: ctx,
|
||||
collectionName: s.collectionName,
|
||||
result: &milvuspb.GetStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
request: &milvuspb.GetStatisticsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
|
|
@ -1272,11 +1272,11 @@ func TestDropPartitionTask(t *testing.T) {
|
|||
partitionName := prefix + funcutil.GenRandomStr()
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{},
|
||||
}, nil)
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
|
||||
mockCache := NewMockCache(t)
|
||||
|
@ -1632,7 +1632,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
Condition: NewTaskCondition(ctx),
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -1672,7 +1672,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
idAllocator: idAllocator,
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -1717,7 +1717,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
idAllocator: idAllocator,
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -1877,7 +1877,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
Condition: NewTaskCondition(ctx),
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -1962,7 +1962,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
},
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -2004,7 +2004,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
idAllocator: idAllocator,
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -2052,7 +2052,7 @@ func TestCreateAlias_all(t *testing.T) {
|
|||
Alias: "alias1",
|
||||
},
|
||||
ctx: ctx,
|
||||
result: merr.Status(nil),
|
||||
result: merr.Success(),
|
||||
rootCoord: rc,
|
||||
}
|
||||
|
||||
|
@ -2088,7 +2088,7 @@ func TestDropAlias_all(t *testing.T) {
|
|||
Alias: "alias1",
|
||||
},
|
||||
ctx: ctx,
|
||||
result: merr.Status(nil),
|
||||
result: merr.Success(),
|
||||
rootCoord: rc,
|
||||
}
|
||||
|
||||
|
@ -2126,7 +2126,7 @@ func TestAlterAlias_all(t *testing.T) {
|
|||
Alias: "alias1",
|
||||
},
|
||||
ctx: ctx,
|
||||
result: merr.Status(nil),
|
||||
result: merr.Success(),
|
||||
rootCoord: rc,
|
||||
}
|
||||
|
||||
|
@ -2515,11 +2515,11 @@ func Test_loadCollectionTask_Execute(t *testing.T) {
|
|||
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{},
|
||||
}, nil)
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
|
||||
dbName := funcutil.GenRandomStr()
|
||||
|
@ -2536,7 +2536,7 @@ func Test_loadCollectionTask_Execute(t *testing.T) {
|
|||
|
||||
rc.DescribeCollectionFunc = func(ctx context.Context, request *milvuspb.DescribeCollectionRequest, opts ...grpc.CallOption) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: newTestSchema(),
|
||||
CollectionID: collectionID,
|
||||
CollectionName: request.CollectionName,
|
||||
|
@ -2585,7 +2585,7 @@ func Test_loadCollectionTask_Execute(t *testing.T) {
|
|||
t.Run("no vector index", func(t *testing.T) {
|
||||
dc.DescribeIndexFunc = func(ctx context.Context, request *indexpb.DescribeIndexRequest, opts ...grpc.CallOption) (*indexpb.DescribeIndexResponse, error) {
|
||||
return &indexpb.DescribeIndexResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: []*indexpb.IndexInfo{
|
||||
{
|
||||
CollectionID: collectionID,
|
||||
|
@ -2616,11 +2616,11 @@ func Test_loadPartitionTask_Execute(t *testing.T) {
|
|||
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{},
|
||||
}, nil)
|
||||
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil)
|
||||
|
||||
dbName := funcutil.GenRandomStr()
|
||||
|
@ -2637,7 +2637,7 @@ func Test_loadPartitionTask_Execute(t *testing.T) {
|
|||
|
||||
rc.DescribeCollectionFunc = func(ctx context.Context, request *milvuspb.DescribeCollectionRequest, opts ...grpc.CallOption) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: newTestSchema(),
|
||||
CollectionID: collectionID,
|
||||
CollectionName: request.CollectionName,
|
||||
|
@ -2686,7 +2686,7 @@ func Test_loadPartitionTask_Execute(t *testing.T) {
|
|||
t.Run("no vector index", func(t *testing.T) {
|
||||
dc.DescribeIndexFunc = func(ctx context.Context, request *indexpb.DescribeIndexRequest, opts ...grpc.CallOption) (*indexpb.DescribeIndexResponse, error) {
|
||||
return &indexpb.DescribeIndexResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IndexInfos: []*indexpb.IndexInfo{
|
||||
{
|
||||
CollectionID: collectionID,
|
||||
|
@ -2716,7 +2716,7 @@ func TestCreateResourceGroupTask(t *testing.T) {
|
|||
|
||||
defer rc.Close()
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().CreateResourceGroup(mock.Anything, mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
|
||||
qc.EXPECT().CreateResourceGroup(mock.Anything, mock.Anything, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
ctx := context.Background()
|
||||
mgr := newShardClientMgr()
|
||||
|
@ -2755,7 +2755,7 @@ func TestDropResourceGroupTask(t *testing.T) {
|
|||
|
||||
defer rc.Close()
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().DropResourceGroup(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
|
||||
qc.EXPECT().DropResourceGroup(mock.Anything, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
ctx := context.Background()
|
||||
mgr := newShardClientMgr()
|
||||
|
@ -2794,7 +2794,7 @@ func TestTransferNodeTask(t *testing.T) {
|
|||
|
||||
defer rc.Close()
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().TransferNode(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
|
||||
qc.EXPECT().TransferNode(mock.Anything, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
ctx := context.Background()
|
||||
mgr := newShardClientMgr()
|
||||
|
@ -2833,7 +2833,7 @@ func TestTransferNodeTask(t *testing.T) {
|
|||
func TestTransferReplicaTask(t *testing.T) {
|
||||
rc := &MockRootCoordClientInterface{}
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().TransferReplica(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
|
||||
qc.EXPECT().TransferReplica(mock.Anything, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
ctx := context.Background()
|
||||
mgr := newShardClientMgr()
|
||||
|
@ -2876,7 +2876,7 @@ func TestListResourceGroupsTask(t *testing.T) {
|
|||
rc := &MockRootCoordClientInterface{}
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().ListResourceGroups(mock.Anything, mock.Anything).Return(&milvuspb.ListResourceGroupsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ResourceGroups: []string{meta.DefaultResourceGroupName, "rg"},
|
||||
}, nil)
|
||||
|
||||
|
@ -2918,7 +2918,7 @@ func TestDescribeResourceGroupTask(t *testing.T) {
|
|||
rc := &MockRootCoordClientInterface{}
|
||||
qc := getQueryCoordClient()
|
||||
qc.EXPECT().DescribeResourceGroup(mock.Anything, mock.Anything).Return(&querypb.DescribeResourceGroupResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ResourceGroup: &querypb.ResourceGroupInfo{
|
||||
Name: "rg",
|
||||
Capacity: 2,
|
||||
|
@ -3010,7 +3010,7 @@ func TestDescribeResourceGroupTaskFailed(t *testing.T) {
|
|||
|
||||
qc.ExpectedCalls = nil
|
||||
qc.EXPECT().DescribeResourceGroup(mock.Anything, mock.Anything).Return(&querypb.DescribeResourceGroupResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ResourceGroup: &querypb.ResourceGroupInfo{
|
||||
Name: "rg",
|
||||
Capacity: 2,
|
||||
|
@ -3353,7 +3353,7 @@ func TestPartitionKey(t *testing.T) {
|
|||
Condition: NewTaskCondition(ctx),
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
@ -3402,7 +3402,7 @@ func TestPartitionKey(t *testing.T) {
|
|||
},
|
||||
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
@ -3433,7 +3433,7 @@ func TestPartitionKey(t *testing.T) {
|
|||
},
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
|
|
|
@ -276,7 +276,7 @@ func (it *upsertTask) PreExecute(ctx context.Context) error {
|
|||
log := log.Ctx(ctx).With(zap.String("collectionName", collectionName))
|
||||
|
||||
it.result = &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
|
|
|
@ -1041,7 +1041,7 @@ func Test_isPartitionIsLoaded(t *testing.T) {
|
|||
},
|
||||
}, nil)
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{partID},
|
||||
}, nil)
|
||||
loaded, err := isPartitionLoaded(ctx, qc, collID, []int64{partID})
|
||||
|
@ -1066,7 +1066,7 @@ func Test_isPartitionIsLoaded(t *testing.T) {
|
|||
},
|
||||
}, nil)
|
||||
qc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: []int64{partID},
|
||||
}, errors.New("error"))
|
||||
loaded, err := isPartitionLoaded(ctx, qc, collID, []int64{partID})
|
||||
|
@ -1404,7 +1404,7 @@ func Test_InsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1446,7 +1446,7 @@ func Test_InsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
_, err = checkPrimaryFieldData(case2.schema, case2.result, case2.insertMsg, true)
|
||||
|
@ -1486,7 +1486,7 @@ func Test_InsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
_, err = checkPrimaryFieldData(case3.schema, case3.result, case3.insertMsg, true)
|
||||
|
@ -1530,7 +1530,7 @@ func Test_InsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
case4.schema.Fields[0].IsPrimaryKey = true
|
||||
|
@ -1569,7 +1569,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
_, err := checkPrimaryFieldData(case1.schema, case1.result, case1.insertMsg, false)
|
||||
|
@ -1612,7 +1612,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
_, err = checkPrimaryFieldData(case2.schema, case2.result, case2.insertMsg, false)
|
||||
|
@ -1652,7 +1652,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
_, err = checkPrimaryFieldData(case3.schema, case3.result, case3.insertMsg, false)
|
||||
|
@ -1695,7 +1695,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
case4.schema.Fields[0].IsPrimaryKey = true
|
||||
|
@ -1739,7 +1739,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
case5.schema.Fields[0].IsPrimaryKey = true
|
||||
|
@ -1788,7 +1788,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
},
|
||||
}
|
||||
case6.schema.Fields[0].IsPrimaryKey = true
|
||||
|
|
|
@ -87,7 +87,7 @@ func (suite *DistControllerTestSuite) TearDownSuite() {
|
|||
func (suite *DistControllerTestSuite) TestStart() {
|
||||
dispatchCalled := atomic.NewBool(false)
|
||||
suite.mockCluster.EXPECT().GetDataDistribution(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||
&querypb.GetDataDistributionResponse{Status: merr.Status(nil), NodeID: 1},
|
||||
&querypb.GetDataDistributionResponse{Status: merr.Success(), NodeID: 1},
|
||||
nil,
|
||||
)
|
||||
suite.mockScheduler.EXPECT().Dispatch(int64(1)).Run(func(node int64) { dispatchCalled.Store(true) })
|
||||
|
@ -115,7 +115,7 @@ func (suite *DistControllerTestSuite) TestStop() {
|
|||
suite.controller.StartDistInstance(context.TODO(), 1)
|
||||
called := atomic.NewBool(false)
|
||||
suite.mockCluster.EXPECT().GetDataDistribution(mock.Anything, mock.Anything, mock.Anything).Maybe().Return(
|
||||
&querypb.GetDataDistributionResponse{Status: merr.Status(nil), NodeID: 1},
|
||||
&querypb.GetDataDistributionResponse{Status: merr.Success(), NodeID: 1},
|
||||
nil,
|
||||
).Run(func(args mock.Arguments) {
|
||||
called.Store(true)
|
||||
|
@ -140,7 +140,7 @@ func (suite *DistControllerTestSuite) TestSyncAll() {
|
|||
suite.mockCluster.EXPECT().GetDataDistribution(mock.Anything, mock.Anything, mock.Anything).Call.Return(
|
||||
func(ctx context.Context, nodeID int64, req *querypb.GetDataDistributionRequest) *querypb.GetDataDistributionResponse {
|
||||
return &querypb.GetDataDistributionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
NodeID: nodeID,
|
||||
}
|
||||
},
|
||||
|
|
|
@ -134,10 +134,10 @@ func (suite *JobSuite) SetupSuite() {
|
|||
suite.cluster = session.NewMockCluster(suite.T())
|
||||
suite.cluster.EXPECT().
|
||||
LoadPartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
suite.cluster.EXPECT().
|
||||
ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil).Maybe()
|
||||
Return(merr.Success(), nil).Maybe()
|
||||
}
|
||||
|
||||
func (suite *JobSuite) SetupTest() {
|
||||
|
@ -1339,7 +1339,7 @@ func (suite *JobSuite) TestCallReleasePartitionFailed() {
|
|||
return call.Method != "ReleasePartitions"
|
||||
})
|
||||
suite.cluster.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
}
|
||||
|
||||
func (suite *JobSuite) TestSyncNewCreatedPartition() {
|
||||
|
|
|
@ -82,7 +82,7 @@ func (node *MockQueryNode) Start() error {
|
|||
err = node.server.Serve(lis)
|
||||
}()
|
||||
|
||||
successStatus := merr.Status(nil)
|
||||
successStatus := merr.Success()
|
||||
node.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{
|
||||
Status: successStatus,
|
||||
NodeID: node.ID,
|
||||
|
|
|
@ -197,7 +197,7 @@ func (suite *CollectionObserverSuite) SetupTest() {
|
|||
|
||||
mockCluster := session.NewMockCluster(suite.T())
|
||||
suite.leaderObserver = NewLeaderObserver(suite.dist, suite.meta, suite.targetMgr, suite.broker, mockCluster)
|
||||
mockCluster.EXPECT().SyncDistribution(mock.Anything, mock.Anything, mock.Anything).Return(merr.Status(nil), nil).Maybe()
|
||||
mockCluster.EXPECT().SyncDistribution(mock.Anything, mock.Anything, mock.Anything).Return(merr.Success(), nil).Maybe()
|
||||
|
||||
// Test object
|
||||
suite.ob = NewCollectionObserver(
|
||||
|
|
|
@ -532,7 +532,7 @@ func (s *Server) GetComponentStates(ctx context.Context, req *milvuspb.GetCompon
|
|||
}
|
||||
|
||||
return &milvuspb.ComponentStates{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
State: serviceComponentInfo,
|
||||
// SubcomponentStates: subComponentInfos,
|
||||
}, nil
|
||||
|
@ -540,13 +540,13 @@ func (s *Server) GetComponentStates(ctx context.Context, req *milvuspb.GetCompon
|
|||
|
||||
func (s *Server) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: Params.CommonCfg.QueryCoordTimeTick.GetValue(),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ func (suite *ServerSuite) TestRecover() {
|
|||
|
||||
func (suite *ServerSuite) TestNodeUp() {
|
||||
node1 := mocks.NewMockQueryNode(suite.T(), suite.server.etcdCli, 100)
|
||||
node1.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{Status: merr.Status(nil)}, nil)
|
||||
node1.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{Status: merr.Success()}, nil)
|
||||
err := node1.Start()
|
||||
suite.NoError(err)
|
||||
defer node1.Stop()
|
||||
|
@ -204,7 +204,7 @@ func (suite *ServerSuite) TestNodeUp() {
|
|||
suite.server.nodeMgr.Add(session.NewNodeInfo(1001, "localhost"))
|
||||
|
||||
node2 := mocks.NewMockQueryNode(suite.T(), suite.server.etcdCli, 101)
|
||||
node2.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{Status: merr.Status(nil)}, nil).Maybe()
|
||||
node2.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{Status: merr.Success()}, nil).Maybe()
|
||||
err = node2.Start()
|
||||
suite.NoError(err)
|
||||
defer node2.Stop()
|
||||
|
@ -308,7 +308,7 @@ func (suite *ServerSuite) TestEnableActiveStandby() {
|
|||
mockDataCoord := coordMocks.NewMockDataCoordClient(suite.T())
|
||||
|
||||
mockRootCoord.EXPECT().DescribeCollection(mock.Anything, mock.Anything).Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: &schemapb.CollectionSchema{},
|
||||
}, nil).Maybe()
|
||||
for _, collection := range suite.collections {
|
||||
|
@ -319,7 +319,7 @@ func (suite *ServerSuite) TestEnableActiveStandby() {
|
|||
CollectionID: collection,
|
||||
}
|
||||
mockRootCoord.EXPECT().ShowPartitions(mock.Anything, req).Return(&milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: suite.partitions[collection],
|
||||
}, nil).Maybe()
|
||||
suite.expectGetRecoverInfoByMockDataCoord(collection, mockDataCoord)
|
||||
|
@ -408,8 +408,8 @@ func (suite *ServerSuite) expectGetRecoverInfo(collection int64) {
|
|||
}
|
||||
|
||||
func (suite *ServerSuite) expectLoadAndReleasePartitions(querynode *mocks.MockQueryNode) {
|
||||
querynode.EXPECT().LoadPartitions(mock.Anything, mock.Anything).Return(merr.Status(nil), nil).Maybe()
|
||||
querynode.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(merr.Status(nil), nil).Maybe()
|
||||
querynode.EXPECT().LoadPartitions(mock.Anything, mock.Anything).Return(merr.Success(), nil).Maybe()
|
||||
querynode.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(merr.Success(), nil).Maybe()
|
||||
}
|
||||
|
||||
func (suite *ServerSuite) expectGetRecoverInfoByMockDataCoord(collection int64, dataCoord *coordMocks.MockDataCoordClient) {
|
||||
|
@ -443,7 +443,7 @@ func (suite *ServerSuite) expectGetRecoverInfoByMockDataCoord(collection int64,
|
|||
}
|
||||
}
|
||||
dataCoord.EXPECT().GetRecoveryInfoV2(mock.Anything, getRecoveryInfoRequest).Return(&datapb.GetRecoveryInfoResponseV2{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Channels: vChannels,
|
||||
Segments: segmentInfos,
|
||||
}, nil).Maybe()
|
||||
|
@ -532,7 +532,7 @@ func (suite *ServerSuite) hackBroker(server *Server) {
|
|||
|
||||
for _, collection := range suite.collections {
|
||||
mockRootCoord.EXPECT().DescribeCollection(mock.Anything, mock.Anything).Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Schema: &schemapb.CollectionSchema{},
|
||||
}, nil).Maybe()
|
||||
req := &milvuspb.ShowPartitionsRequest{
|
||||
|
@ -542,7 +542,7 @@ func (suite *ServerSuite) hackBroker(server *Server) {
|
|||
CollectionID: collection,
|
||||
}
|
||||
mockRootCoord.EXPECT().ShowPartitions(mock.Anything, req).Return(&milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: suite.partitions[collection],
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/errorutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
|
@ -180,7 +179,7 @@ func (s *Server) ShowPartitions(ctx context.Context, req *querypb.ShowPartitions
|
|||
}
|
||||
|
||||
return &querypb.ShowPartitionsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionIDs: partitions,
|
||||
InMemoryPercentages: percentages,
|
||||
RefreshProgress: refreshProgresses,
|
||||
|
@ -244,7 +243,7 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
|||
}
|
||||
|
||||
metrics.QueryCoordLoadCount.WithLabelValues(metrics.SuccessLabel).Inc()
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
|
@ -285,7 +284,7 @@ func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl
|
|||
metrics.QueryCoordReleaseLatency.WithLabelValues().Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
meta.GlobalFailedLoadCache.Remove(req.GetCollectionID())
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) {
|
||||
|
@ -344,7 +343,7 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
|
|||
}
|
||||
|
||||
metrics.QueryCoordLoadCount.WithLabelValues(metrics.SuccessLabel).Inc()
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) checkResourceGroup(collectionID int64, resourceGroups []string) error {
|
||||
|
@ -410,7 +409,7 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
|
|||
metrics.QueryCoordReleaseLatency.WithLabelValues().Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
meta.GlobalFailedLoadCache.Remove(req.GetCollectionID())
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) {
|
||||
|
@ -476,7 +475,7 @@ func (s *Server) GetPartitionStates(ctx context.Context, req *querypb.GetPartiti
|
|||
}
|
||||
|
||||
return &querypb.GetPartitionStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PartitionDescriptions: states,
|
||||
}, nil
|
||||
}
|
||||
|
@ -517,7 +516,7 @@ func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfo
|
|||
}
|
||||
|
||||
return &querypb.GetSegmentInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Infos: infos,
|
||||
}, nil
|
||||
}
|
||||
|
@ -544,7 +543,7 @@ func (s *Server) SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncN
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// refreshCollection must be called after loading a collection. It looks for new segments that are not loaded yet and
|
||||
|
@ -700,7 +699,7 @@ func (s *Server) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques
|
|||
log.Warn(msg, zap.Error(err))
|
||||
return merr.Status(errors.Wrap(err, msg)), nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
|
@ -725,7 +724,7 @@ func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
|
|||
}
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Configuations: configList,
|
||||
}, nil
|
||||
}
|
||||
|
@ -745,7 +744,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
}
|
||||
|
||||
resp := &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole,
|
||||
paramtable.GetNodeID()),
|
||||
}
|
||||
|
@ -793,7 +792,7 @@ func (s *Server) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReque
|
|||
}
|
||||
|
||||
resp := &milvuspb.GetReplicasResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Replicas: make([]*milvuspb.ReplicaInfo, 0),
|
||||
}
|
||||
|
||||
|
@ -845,7 +844,7 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
|
|||
}
|
||||
|
||||
resp := &querypb.GetShardLeadersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
percentage := s.meta.CollectionManager.CalculateLoadPercentage(req.GetCollectionID())
|
||||
|
@ -970,16 +969,15 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
|
|||
|
||||
func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) {
|
||||
if err := merr.CheckHealthy(s.State()); err != nil {
|
||||
reason := errorutil.UnHealthReason("querycoord", paramtable.GetNodeID(), "querycoord is unhealthy")
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(err), IsHealthy: false, Reasons: []string{reason}}, nil
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(err), IsHealthy: false, Reasons: []string{err.Error()}}, nil
|
||||
}
|
||||
|
||||
errReasons, err := s.checkNodeHealth(ctx)
|
||||
if err != nil || len(errReasons) != 0 {
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(nil), IsHealthy: false, Reasons: errReasons}, nil
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Success(), IsHealthy: false, Reasons: errReasons}, nil
|
||||
}
|
||||
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(nil), IsHealthy: true, Reasons: errReasons}, nil
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Success(), IsHealthy: true, Reasons: errReasons}, nil
|
||||
}
|
||||
|
||||
func (s *Server) checkNodeHealth(ctx context.Context) ([]string, error) {
|
||||
|
@ -991,13 +989,17 @@ func (s *Server) checkNodeHealth(ctx context.Context) ([]string, error) {
|
|||
node := node
|
||||
group.Go(func() error {
|
||||
resp, err := s.cluster.GetComponentStates(ctx, node.ID())
|
||||
isHealthy, reason := errorutil.UnHealthReasonWithComponentStatesOrErr("querynode", node.ID(), resp, err)
|
||||
if !isHealthy {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = merr.AnalyzeState("QueryNode", node.ID(), resp)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
errReasons = append(errReasons, reason)
|
||||
errReasons = append(errReasons, err.Error())
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1022,7 +1024,7 @@ func (s *Server) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateRe
|
|||
log.Warn("failed to create resource group", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) {
|
||||
|
@ -1048,7 +1050,7 @@ func (s *Server) DropResourceGroup(ctx context.Context, req *milvuspb.DropResour
|
|||
log.Warn("failed to drop resource group", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) {
|
||||
|
@ -1104,7 +1106,7 @@ func (s *Server) TransferNode(ctx context.Context, req *milvuspb.TransferNodeReq
|
|||
|
||||
utils.AddNodesToCollectionsInRG(s.meta, req.GetTargetResourceGroup(), nodes...)
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) {
|
||||
|
@ -1164,7 +1166,7 @@ func (s *Server) TransferReplica(ctx context.Context, req *querypb.TransferRepli
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (s *Server) transferReplica(targetRG string, replicas []*meta.Replica) error {
|
||||
|
@ -1188,7 +1190,7 @@ func (s *Server) ListResourceGroups(ctx context.Context, req *milvuspb.ListResou
|
|||
|
||||
log.Info("list resource group request received")
|
||||
resp := &milvuspb.ListResourceGroupsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.State()); err != nil {
|
||||
log.Warn("failed to list resource group", zap.Error(err))
|
||||
|
@ -1207,7 +1209,7 @@ func (s *Server) DescribeResourceGroup(ctx context.Context, req *querypb.Describ
|
|||
|
||||
log.Info("describe resource group request received")
|
||||
resp := &querypb.DescribeResourceGroupResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
if err := merr.CheckHealthy(s.State()); err != nil {
|
||||
log.Warn("failed to describe resource group", zap.Error(err))
|
||||
|
|
|
@ -876,7 +876,7 @@ func (suite *ServiceSuite) TestReleaseCollection() {
|
|||
server := suite.server
|
||||
|
||||
suite.cluster.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
|
||||
// Test release all collections
|
||||
for _, collection := range suite.collections {
|
||||
|
@ -916,7 +916,7 @@ func (suite *ServiceSuite) TestReleasePartition() {
|
|||
|
||||
// Test release all partitions
|
||||
suite.cluster.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
for _, collection := range suite.collections {
|
||||
req := &querypb.ReleasePartitionsRequest{
|
||||
CollectionID: collection,
|
||||
|
@ -1319,7 +1319,7 @@ func (suite *ServiceSuite) TestGetMetrics() {
|
|||
|
||||
for _, node := range suite.nodes {
|
||||
suite.cluster.EXPECT().GetMetrics(ctx, node, mock.Anything).Return(&milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ComponentName: "QueryNode",
|
||||
}, nil)
|
||||
}
|
||||
|
@ -1740,7 +1740,7 @@ func (suite *ServiceSuite) expectLoadPartitions() {
|
|||
suite.broker.EXPECT().DescribeIndex(mock.Anything, mock.Anything).
|
||||
Return(nil, nil)
|
||||
suite.cluster.EXPECT().LoadPartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), nil)
|
||||
Return(merr.Success(), nil)
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) getAllSegments(collection int64) []int64 {
|
||||
|
|
|
@ -109,7 +109,7 @@ func (suite *ClusterTestSuite) createTestServers() []querypb.QueryNodeServer {
|
|||
}
|
||||
|
||||
func (suite *ClusterTestSuite) createDefaultMockServer() querypb.QueryNodeServer {
|
||||
succStatus := merr.Status(nil)
|
||||
succStatus := merr.Success()
|
||||
svr := mocks.NewMockQueryNodeServer(suite.T())
|
||||
// TODO: register more mock methods
|
||||
svr.EXPECT().LoadSegments(
|
||||
|
@ -218,7 +218,7 @@ func (suite *ClusterTestSuite) TestLoadSegments() {
|
|||
Infos: []*querypb.SegmentLoadInfo{{}},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.LoadSegments(ctx, 1, &querypb.LoadSegmentsRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -244,7 +244,7 @@ func (suite *ClusterTestSuite) TestWatchDmChannels() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.WatchDmChannels(ctx, 1, &querypb.WatchDmChannelsRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -262,7 +262,7 @@ func (suite *ClusterTestSuite) TestUnsubDmChannel() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.UnsubDmChannel(ctx, 1, &querypb.UnsubDmChannelRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -280,7 +280,7 @@ func (suite *ClusterTestSuite) TestReleaseSegments() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.ReleaseSegments(ctx, 1, &querypb.ReleaseSegmentsRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -298,7 +298,7 @@ func (suite *ClusterTestSuite) TestLoadAndReleasePartitions() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.LoadPartitions(ctx, 1, &querypb.LoadPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -313,7 +313,7 @@ func (suite *ClusterTestSuite) TestLoadAndReleasePartitions() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.ReleasePartitions(ctx, 1, &querypb.ReleasePartitionsRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -331,7 +331,7 @@ func (suite *ClusterTestSuite) TestGetDataDistribution() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), resp.GetStatus())
|
||||
suite.Equal(merr.Success(), resp.GetStatus())
|
||||
|
||||
resp, err = suite.cluster.GetDataDistribution(ctx, 1, &querypb.GetDataDistributionRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
@ -348,7 +348,7 @@ func (suite *ClusterTestSuite) TestGetMetrics() {
|
|||
ctx := context.TODO()
|
||||
resp, err := suite.cluster.GetMetrics(ctx, 0, &milvuspb.GetMetricsRequest{})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), resp.GetStatus())
|
||||
suite.Equal(merr.Success(), resp.GetStatus())
|
||||
|
||||
resp, err = suite.cluster.GetMetrics(ctx, 1, &milvuspb.GetMetricsRequest{})
|
||||
suite.NoError(err)
|
||||
|
@ -364,7 +364,7 @@ func (suite *ClusterTestSuite) TestSyncDistribution() {
|
|||
Base: &commonpb.MsgBase{},
|
||||
})
|
||||
suite.NoError(err)
|
||||
suite.Equal(merr.Status(nil), status)
|
||||
suite.Equal(merr.Success(), status)
|
||||
|
||||
status, err = suite.cluster.SyncDistribution(ctx, 1, &querypb.SyncDistributionRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
|
|
|
@ -228,7 +228,7 @@ func (suite *TaskSuite) TestSubscribeChannelTask() {
|
|||
},
|
||||
},
|
||||
}, nil)
|
||||
suite.cluster.EXPECT().WatchDmChannels(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().WatchDmChannels(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test subscribe channel task
|
||||
tasks := []Task{}
|
||||
|
@ -323,7 +323,7 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() {
|
|||
targetNode := int64(1)
|
||||
|
||||
// Expect
|
||||
suite.cluster.EXPECT().UnsubDmChannel(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().UnsubDmChannel(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test unsubscribe channel task
|
||||
tasks := []Task{}
|
||||
|
@ -408,7 +408,7 @@ func (suite *TaskSuite) TestLoadSegmentTask() {
|
|||
}, nil)
|
||||
suite.broker.EXPECT().GetIndexInfo(mock.Anything, suite.collection, segment).Return(nil, nil)
|
||||
}
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test load segment task
|
||||
suite.dist.ChannelDistManager.Update(targetNode, meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
|
@ -504,7 +504,7 @@ func (suite *TaskSuite) TestLoadSegmentTaskNotIndex() {
|
|||
}, nil)
|
||||
suite.broker.EXPECT().GetIndexInfo(mock.Anything, suite.collection, segment).Return(nil, merr.WrapErrIndexNotFoundForSegment(segment))
|
||||
}
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test load segment task
|
||||
suite.dist.ChannelDistManager.Update(targetNode, meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
|
@ -653,7 +653,7 @@ func (suite *TaskSuite) TestReleaseSegmentTask() {
|
|||
}
|
||||
|
||||
// Expect
|
||||
suite.cluster.EXPECT().ReleaseSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().ReleaseSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test load segment task
|
||||
view := &meta.LeaderView{
|
||||
|
@ -714,7 +714,7 @@ func (suite *TaskSuite) TestReleaseGrowingSegmentTask() {
|
|||
targetNode := int64(3)
|
||||
|
||||
// Expect
|
||||
suite.cluster.EXPECT().ReleaseSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().ReleaseSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
tasks := []Task{}
|
||||
for _, segment := range suite.releaseSegments {
|
||||
|
@ -798,8 +798,8 @@ func (suite *TaskSuite) TestMoveSegmentTask() {
|
|||
}, nil)
|
||||
suite.broker.EXPECT().GetIndexInfo(mock.Anything, suite.collection, segment).Return(nil, nil)
|
||||
}
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, leader, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().ReleaseSegments(mock.Anything, leader, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, leader, mock.Anything).Return(merr.Success(), nil)
|
||||
suite.cluster.EXPECT().ReleaseSegments(mock.Anything, leader, mock.Anything).Return(merr.Success(), nil)
|
||||
vchannel := &datapb.VchannelInfo{
|
||||
CollectionID: suite.collection,
|
||||
ChannelName: channel.ChannelName,
|
||||
|
@ -968,7 +968,7 @@ func (suite *TaskSuite) TestTaskCanceled() {
|
|||
}, nil)
|
||||
suite.broker.EXPECT().GetIndexInfo(mock.Anything, suite.collection, segment).Return(nil, nil)
|
||||
}
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test load segment task
|
||||
suite.dist.ChannelDistManager.Update(targetNode, meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||
|
@ -1055,7 +1055,7 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||
}, nil)
|
||||
suite.broker.EXPECT().GetIndexInfo(mock.Anything, suite.collection, segment).Return(nil, nil)
|
||||
}
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Status(nil), nil)
|
||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
// Test load segment task
|
||||
suite.meta.ReplicaManager.Put(createReplica(suite.collection, targetNode))
|
||||
|
|
|
@ -356,7 +356,7 @@ func (s *RemoteWorkerSuite) TestQueryStream() {
|
|||
|
||||
for _, id := range ids {
|
||||
err := server.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{Data: []int64{id}},
|
||||
|
@ -418,7 +418,7 @@ func (s *RemoteWorkerSuite) TestQueryStream() {
|
|||
server := client.CreateServer()
|
||||
|
||||
err := server.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{Data: []int64{id}},
|
||||
|
|
|
@ -119,11 +119,11 @@ func (sd *shardDelegator) getLogger(ctx context.Context) *log.MLogger {
|
|||
|
||||
// Serviceable returns whether delegator is serviceable now.
|
||||
func (sd *shardDelegator) Serviceable() bool {
|
||||
return lifetime.IsWorking(sd.lifetime.GetState())
|
||||
return lifetime.IsWorking(sd.lifetime.GetState()) == nil
|
||||
}
|
||||
|
||||
func (sd *shardDelegator) Stopped() bool {
|
||||
return !lifetime.NotStopped(sd.lifetime.GetState())
|
||||
return lifetime.NotStopped(sd.lifetime.GetState()) != nil
|
||||
}
|
||||
|
||||
// Start sets delegator to working state.
|
||||
|
@ -178,8 +178,8 @@ func (sd *shardDelegator) modifyQueryRequest(req *querypb.QueryRequest, scope qu
|
|||
// Search preforms search operation on shard.
|
||||
func (sd *shardDelegator) Search(ctx context.Context, req *querypb.SearchRequest) ([]*internalpb.SearchResults, error) {
|
||||
log := sd.getLogger(ctx)
|
||||
if !sd.lifetime.Add(lifetime.IsWorking) {
|
||||
return nil, errors.New("delegator is not serviceable")
|
||||
if err := sd.lifetime.Add(lifetime.IsWorking); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sd.lifetime.Done()
|
||||
|
||||
|
@ -306,8 +306,8 @@ func (sd *shardDelegator) QueryStream(ctx context.Context, req *querypb.QueryReq
|
|||
// Query performs query operation on shard.
|
||||
func (sd *shardDelegator) Query(ctx context.Context, req *querypb.QueryRequest) ([]*internalpb.RetrieveResults, error) {
|
||||
log := sd.getLogger(ctx)
|
||||
if !sd.lifetime.Add(lifetime.IsWorking) {
|
||||
return nil, errors.New("delegator is not serviceable")
|
||||
if err := sd.lifetime.Add(lifetime.IsWorking); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sd.lifetime.Done()
|
||||
|
||||
|
@ -371,8 +371,8 @@ func (sd *shardDelegator) Query(ctx context.Context, req *querypb.QueryRequest)
|
|||
// GetStatistics returns statistics aggregated by delegator.
|
||||
func (sd *shardDelegator) GetStatistics(ctx context.Context, req *querypb.GetStatisticsRequest) ([]*internalpb.GetStatisticsResponse, error) {
|
||||
log := sd.getLogger(ctx)
|
||||
if !sd.lifetime.Add(lifetime.IsWorking) {
|
||||
return nil, errors.New("delegator is not serviceable")
|
||||
if err := sd.lifetime.Add(lifetime.IsWorking); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sd.lifetime.Done()
|
||||
|
||||
|
@ -656,7 +656,7 @@ func NewShardDelegator(collectionID UniqueID, replicaID UniqueID, channel string
|
|||
}
|
||||
m := sync.Mutex{}
|
||||
sd.tsCond = sync.NewCond(&m)
|
||||
if sd.lifetime.Add(lifetime.NotStopped) {
|
||||
if sd.lifetime.Add(lifetime.NotStopped) == nil {
|
||||
go sd.watchTSafe()
|
||||
}
|
||||
log.Info("finish build new shardDelegator")
|
||||
|
|
|
@ -659,7 +659,7 @@ func (s *DelegatorSuite) TestQueryStream() {
|
|||
}
|
||||
|
||||
srv.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{Data: req.GetSegmentIDs()},
|
||||
|
@ -676,7 +676,7 @@ func (s *DelegatorSuite) TestQueryStream() {
|
|||
s.EqualValues([]string{s.vchannelName}, req.GetDmlChannels())
|
||||
s.ElementsMatch([]int64{1002, 1003}, req.GetSegmentIDs())
|
||||
srv.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{Data: req.GetSegmentIDs()},
|
||||
|
@ -809,7 +809,7 @@ func (s *DelegatorSuite) TestQueryStream() {
|
|||
s.EqualValues([]string{s.vchannelName}, req.GetDmlChannels())
|
||||
s.ElementsMatch([]int64{1002, 1003}, req.GetSegmentIDs())
|
||||
srv.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{Data: req.GetSegmentIDs()},
|
||||
|
@ -1057,7 +1057,7 @@ func TestDelegatorWatchTsafe(t *testing.T) {
|
|||
|
||||
m := sync.Mutex{}
|
||||
sd.tsCond = sync.NewCond(&m)
|
||||
if sd.lifetime.Add(lifetime.NotStopped) {
|
||||
if sd.lifetime.Add(lifetime.NotStopped) == nil {
|
||||
go sd.watchTSafe()
|
||||
}
|
||||
|
||||
|
@ -1085,7 +1085,7 @@ func TestDelegatorTSafeListenerClosed(t *testing.T) {
|
|||
m := sync.Mutex{}
|
||||
sd.tsCond = sync.NewCond(&m)
|
||||
signal := make(chan struct{})
|
||||
if sd.lifetime.Add(lifetime.NotStopped) {
|
||||
if sd.lifetime.Add(lifetime.NotStopped) == nil {
|
||||
go func() {
|
||||
sd.watchTSafe()
|
||||
close(signal)
|
||||
|
|
|
@ -37,7 +37,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
|
@ -102,7 +101,7 @@ func (node *QueryNode) loadDeltaLogs(ctx context.Context, req *querypb.LoadSegme
|
|||
return merr.Status(finalErr)
|
||||
}
|
||||
|
||||
return merr.Status(nil)
|
||||
return merr.Success()
|
||||
}
|
||||
|
||||
func (node *QueryNode) loadIndex(ctx context.Context, req *querypb.LoadSegmentsRequest) *commonpb.Status {
|
||||
|
@ -111,7 +110,7 @@ func (node *QueryNode) loadIndex(ctx context.Context, req *querypb.LoadSegmentsR
|
|||
zap.Int64s("segmentIDs", lo.Map(req.GetInfos(), func(info *querypb.SegmentLoadInfo, _ int) int64 { return info.GetSegmentID() })),
|
||||
)
|
||||
|
||||
status := merr.Status(nil)
|
||||
status := merr.Success()
|
||||
log.Info("start to load index")
|
||||
|
||||
for _, info := range req.GetInfos() {
|
||||
|
@ -361,8 +360,8 @@ func (node *QueryNode) searchChannel(ctx context.Context, req *querypb.SearchReq
|
|||
)
|
||||
traceID := trace.SpanFromContext(ctx).SpanContext().TraceID()
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
return nil, merr.WrapErrServiceNotReady(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID()))
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
||||
|
@ -495,7 +494,7 @@ func segmentStatsResponse(segStats []segments.SegmentStats) *internalpb.GetStati
|
|||
resultMap["row_count"] = strconv.FormatInt(totalRowNum, 10)
|
||||
|
||||
ret := &internalpb.GetStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Stats: funcutil.Map2KeyValuePair(resultMap),
|
||||
}
|
||||
return ret
|
||||
|
@ -534,7 +533,7 @@ func reduceStatisticResponse(results []*internalpb.GetStatisticsResponse) (*inte
|
|||
}
|
||||
|
||||
ret := &internalpb.GetStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Stats: funcutil.Map2KeyValuePair(stringMap),
|
||||
}
|
||||
return ret, nil
|
||||
|
|
|
@ -204,7 +204,7 @@ func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest,
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, paramtable.GetNodeID()),
|
||||
}, nil
|
||||
|
|
|
@ -224,7 +224,7 @@ func DecodeSearchResults(searchResults []*internalpb.SearchResults) ([]*schemapb
|
|||
|
||||
func EncodeSearchResultData(searchResultData *schemapb.SearchResultData, nq int64, topk int64, metricType string) (searchResults *internalpb.SearchResults, err error) {
|
||||
searchResults = &internalpb.SearchResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
NumQueries: nq,
|
||||
TopK: topk,
|
||||
MetricType: metricType,
|
||||
|
@ -247,7 +247,7 @@ func MergeInternalRetrieveResult(ctx context.Context, retrieveResults []*interna
|
|||
)
|
||||
var (
|
||||
ret = &internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: &schemapb.IDs{},
|
||||
}
|
||||
skipDupCnt int64
|
||||
|
|
|
@ -103,7 +103,7 @@ func retrieveOnSegmentsWithStream(ctx context.Context, segments []Segment, segTy
|
|||
}
|
||||
|
||||
if err = svr.Send(&internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: result.GetIds(),
|
||||
FieldsData: result.GetFieldsData(),
|
||||
}); err != nil {
|
||||
|
|
|
@ -58,7 +58,7 @@ import (
|
|||
// GetComponentStates returns information about whether the node is healthy
|
||||
func (node *QueryNode) GetComponentStates(ctx context.Context, req *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) {
|
||||
stats := &milvuspb.ComponentStates{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
code := node.lifetime.GetState()
|
||||
|
@ -80,7 +80,7 @@ func (node *QueryNode) GetComponentStates(ctx context.Context, req *milvuspb.Get
|
|||
// TimeTickChannel contains many time tick messages, which will be sent by query nodes
|
||||
func (node *QueryNode) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: paramtable.Get().CommonCfg.QueryCoordTimeTick.GetValue(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func (node *QueryNode) GetTimeTickChannel(ctx context.Context, req *internalpb.G
|
|||
// Statistics channel contains statistics infos of query nodes, such as segment infos, memory infos
|
||||
func (node *QueryNode) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -102,9 +102,7 @@ func (node *QueryNode) GetStatistics(ctx context.Context, req *querypb.GetStatis
|
|||
zap.Uint64("guaranteeTimestamp", req.GetReq().GetGuaranteeTimestamp()),
|
||||
zap.Uint64("timeTravel", req.GetReq().GetTravelTimestamp()))
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return &internalpb.GetStatisticsResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
|
@ -119,7 +117,7 @@ func (node *QueryNode) GetStatistics(ctx context.Context, req *querypb.GetStatis
|
|||
}, nil
|
||||
}
|
||||
failRet := &internalpb.GetStatisticsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
var toReduceResults []*internalpb.GetStatisticsResponse
|
||||
|
@ -208,9 +206,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDm
|
|||
)
|
||||
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -229,7 +225,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDm
|
|||
if !node.subscribingChannels.Insert(channel.GetChannelName()) {
|
||||
msg := "channel subscribing..."
|
||||
log.Warn(msg)
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
defer node.subscribingChannels.Remove(channel.GetChannelName())
|
||||
|
||||
|
@ -243,7 +239,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDm
|
|||
_, exist := node.delegators.Get(channel.GetChannelName())
|
||||
if exist {
|
||||
log.Info("channel already subscribed")
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
node.manager.Collection.PutOrRef(req.GetCollectionID(), req.GetSchema(),
|
||||
|
@ -333,7 +329,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDm
|
|||
// delegator after all steps done
|
||||
delegator.Start()
|
||||
log.Info("watch dml channel success")
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmChannelRequest) (*commonpb.Status, error) {
|
||||
|
@ -346,9 +342,7 @@ func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmC
|
|||
log.Info("received unsubscribe channel request")
|
||||
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -373,7 +367,7 @@ func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmC
|
|||
}
|
||||
log.Info("unsubscribed channel")
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (node *QueryNode) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) {
|
||||
|
@ -384,9 +378,7 @@ func (node *QueryNode) LoadPartitions(ctx context.Context, req *querypb.LoadPart
|
|||
|
||||
log.Info("received load partitions request")
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -397,7 +389,7 @@ func (node *QueryNode) LoadPartitions(ctx context.Context, req *querypb.LoadPart
|
|||
}
|
||||
|
||||
log.Info("load partitions done")
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// LoadSegments load historical data into query node, historical data can be vector data or index
|
||||
|
@ -417,9 +409,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, req *querypb.LoadSegmen
|
|||
zap.Bool("needTransfer", req.GetNeedTransfer()),
|
||||
)
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
node.lifetime.Done()
|
||||
|
@ -446,7 +436,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, req *querypb.LoadSegmen
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
if req.GetLoadScope() == querypb.LoadScope_Delta {
|
||||
|
@ -477,19 +467,17 @@ func (node *QueryNode) LoadSegments(ctx context.Context, req *querypb.LoadSegmen
|
|||
log.Info("load segments done...",
|
||||
zap.Int64s("segments", lo.Map(loaded, func(s segments.Segment, _ int) int64 { return s.ID() })))
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ReleaseCollection clears all data related to this collection on the querynode
|
||||
func (node *QueryNode) ReleaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ReleasePartitions clears all data related to this partition on the querynode
|
||||
|
@ -502,9 +490,7 @@ func (node *QueryNode) ReleasePartitions(ctx context.Context, req *querypb.Relea
|
|||
log.Info("received release partitions request")
|
||||
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -517,7 +503,7 @@ func (node *QueryNode) ReleasePartitions(ctx context.Context, req *querypb.Relea
|
|||
}
|
||||
|
||||
log.Info("release partitions done")
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ReleaseSegments remove the specified segments from query node according segmentIDs, partitionIDs, and collectionID
|
||||
|
@ -535,9 +521,7 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, req *querypb.Release
|
|||
)
|
||||
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthyOrStopping); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -578,7 +562,7 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, req *querypb.Release
|
|||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
log.Info("start to release segments")
|
||||
|
@ -589,14 +573,12 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, req *querypb.Release
|
|||
}
|
||||
node.manager.Collection.Unref(req.GetCollectionID(), uint32(sealedCount))
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// GetSegmentInfo returns segment information of the collection on the queryNode, and the information includes memSize, numRow, indexName, indexID ...
|
||||
func (node *QueryNode) GetSegmentInfo(ctx context.Context, in *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return &querypb.GetSegmentInfoResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
|
@ -648,7 +630,7 @@ func (node *QueryNode) GetSegmentInfo(ctx context.Context, in *querypb.GetSegmen
|
|||
}
|
||||
|
||||
return &querypb.GetSegmentInfoResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Infos: segmentInfos,
|
||||
}, nil
|
||||
}
|
||||
|
@ -664,8 +646,8 @@ func (node *QueryNode) SearchSegments(ctx context.Context, req *querypb.SearchRe
|
|||
)
|
||||
|
||||
resp := &internalpb.SearchResults{}
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
resp.Status = merr.Status(merr.WrapErrServiceNotReady(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID())))
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
return resp, nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -742,9 +724,7 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
|
|||
|
||||
tr := timerecord.NewTimeRecorderWithTrace(ctx, "SearchRequest")
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return &internalpb.SearchResults{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
|
@ -760,7 +740,7 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
|
|||
}
|
||||
|
||||
failRet := &internalpb.SearchResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
collection := node.manager.Collection.Get(req.GetReq().GetCollectionID())
|
||||
if collection == nil {
|
||||
|
@ -840,7 +820,7 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
|
|||
// only used for delegator query segments from worker
|
||||
func (node *QueryNode) QuerySegments(ctx context.Context, req *querypb.QueryRequest) (*internalpb.RetrieveResults, error) {
|
||||
resp := &internalpb.RetrieveResults{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
msgID := req.Req.Base.GetMsgID()
|
||||
traceID := trace.SpanFromContext(ctx).SpanContext().TraceID()
|
||||
|
@ -852,8 +832,7 @@ func (node *QueryNode) QuerySegments(ctx context.Context, req *querypb.QueryRequ
|
|||
zap.String("scope", req.GetScope().String()),
|
||||
)
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
err := merr.WrapErrServiceUnavailable(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID()))
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
return resp, nil
|
||||
}
|
||||
|
@ -933,9 +912,7 @@ func (node *QueryNode) Query(ctx context.Context, req *querypb.QueryRequest) (*i
|
|||
)
|
||||
tr := timerecord.NewTimeRecorderWithTrace(ctx, "QueryRequest")
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return &internalpb.RetrieveResults{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
|
@ -1021,9 +998,7 @@ func (node *QueryNode) QueryStream(req *querypb.QueryRequest, srv querypb.QueryN
|
|||
zap.Bool("isCount", req.GetReq().GetIsCount()),
|
||||
)
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
concurrentSrv.Send(&internalpb.RetrieveResults{Status: merr.Status(err)})
|
||||
return nil
|
||||
}
|
||||
|
@ -1090,8 +1065,8 @@ func (node *QueryNode) QueryStreamSegments(req *querypb.QueryRequest, srv queryp
|
|||
}
|
||||
}()
|
||||
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
resp.Status = merr.Status(merr.WrapErrServiceUnavailable(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID())))
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
concurrentSrv.Send(resp)
|
||||
return nil
|
||||
}
|
||||
|
@ -1127,13 +1102,12 @@ func (node *QueryNode) QueryStreamSegments(req *querypb.QueryRequest, srv queryp
|
|||
|
||||
// SyncReplicaSegments syncs replica node & segments states
|
||||
func (node *QueryNode) SyncReplicaSegments(ctx context.Context, req *querypb.SyncReplicaSegmentsRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ShowConfigurations returns the configurations of queryNode matching req.Pattern
|
||||
func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
err := merr.WrapErrServiceNotReady(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID()))
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
log.Warn("QueryNode.ShowConfigurations failed",
|
||||
zap.Int64("nodeId", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Pattern),
|
||||
|
@ -1156,15 +1130,14 @@ func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.S
|
|||
}
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Configuations: configList,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMetrics return system infos of the query node, such as total memory, memory usage, cpu usage ...
|
||||
func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
err := merr.WrapErrServiceNotReady(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID()))
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
log.Warn("QueryNode.GetMetrics failed",
|
||||
zap.Int64("nodeId", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
|
@ -1225,9 +1198,8 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||
zap.Int64("msgID", req.GetBase().GetMsgID()),
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
)
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
err := merr.WrapErrServiceNotReady(fmt.Sprintf("node id: %d is unhealthy", paramtable.GetNodeID()))
|
||||
log.Warn("QueryNode.GetMetrics failed",
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
log.Warn("QueryNode.GetDataDistribution failed",
|
||||
zap.Error(err))
|
||||
|
||||
return &querypb.GetDataDistributionResponse{
|
||||
|
@ -1305,7 +1277,7 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||
})
|
||||
|
||||
return &querypb.GetDataDistributionResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
NodeID: paramtable.GetNodeID(),
|
||||
Segments: segmentVersionInfos,
|
||||
Channels: channelVersionInfos,
|
||||
|
@ -1317,9 +1289,7 @@ func (node *QueryNode) SyncDistribution(ctx context.Context, req *querypb.SyncDi
|
|||
log := log.Ctx(ctx).With(zap.Int64("collectionID", req.GetCollectionID()),
|
||||
zap.String("channel", req.GetChannel()), zap.Int64("currentNodeID", paramtable.GetNodeID()))
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -1405,7 +1375,7 @@ func (node *QueryNode) SyncDistribution(ctx context.Context, req *querypb.SyncDi
|
|||
}, true)
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// Delete is used to forward delete message between delegator and workers.
|
||||
|
@ -1417,9 +1387,7 @@ func (node *QueryNode) Delete(ctx context.Context, req *querypb.DeleteRequest) (
|
|||
)
|
||||
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
msg := fmt.Sprintf("query node %d is not ready", paramtable.GetNodeID())
|
||||
err := merr.WrapErrServiceNotReady(msg)
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
defer node.lifetime.Done()
|
||||
|
@ -1451,5 +1419,5 @@ func (node *QueryNode) Delete(ctx context.Context, req *querypb.DeleteRequest) (
|
|||
}
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/conc"
|
||||
"github.com/milvus-io/milvus/pkg/util/lifetime"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
)
|
||||
|
@ -59,8 +58,8 @@ type scheduler struct {
|
|||
// Add a new task into scheduler,
|
||||
// error will be returned if scheduler reaches some limit.
|
||||
func (s *scheduler) Add(task Task) (err error) {
|
||||
if !s.lifetime.Add(lifetime.IsWorking) {
|
||||
return merr.WrapErrServiceUnavailable("scheduler closed")
|
||||
if err := s.lifetime.Add(lifetime.IsWorking); err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.lifetime.Done()
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ func (t *QueryTask) Execute() error {
|
|||
Base: &commonpb.MsgBase{
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Ids: reducedResult.Ids,
|
||||
FieldsData: reducedResult.FieldsData,
|
||||
CostAggregation: &internalpb.CostAggregation{
|
||||
|
|
|
@ -166,7 +166,7 @@ func (t *SearchTask) Execute() error {
|
|||
Base: &commonpb.MsgBase{
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
MetricType: req.GetReq().GetMetricType(),
|
||||
NumQueries: t.originNqs[i],
|
||||
TopK: t.originTopks[i],
|
||||
|
@ -219,7 +219,7 @@ func (t *SearchTask) Execute() error {
|
|||
Base: &commonpb.MsgBase{
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
MetricType: req.GetReq().GetMetricType(),
|
||||
NumQueries: t.originNqs[i],
|
||||
TopK: t.originTopks[i],
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
)
|
||||
|
||||
func TestServerBroker_ReleaseCollection(t *testing.T) {
|
||||
|
@ -228,7 +229,7 @@ func TestServerBroker_GetSegmentIndexState(t *testing.T) {
|
|||
c := newTestCore(withValidDataCoord())
|
||||
c.dataCoord.(*mockDataCoord).GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
|
||||
return &indexpb.GetSegmentIndexStateResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
States: []*indexpb.SegmentIndexState{
|
||||
{
|
||||
SegmentID: 1,
|
||||
|
@ -353,12 +354,13 @@ func TestServerBroker_GcConfirm(t *testing.T) {
|
|||
|
||||
t.Run("non success", func(t *testing.T) {
|
||||
dc := mocks.NewMockDataCoordClient(t)
|
||||
err := errors.New("mock error")
|
||||
dc.On("GcConfirm",
|
||||
mock.Anything, // context.Context
|
||||
mock.Anything, // *datapb.GcConfirmRequest
|
||||
mock.Anything,
|
||||
).Return(
|
||||
&datapb.GcConfirmResponse{Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GcConfirm")},
|
||||
&datapb.GcConfirmResponse{Status: merr.Status(err)},
|
||||
nil)
|
||||
c := newTestCore(withDataCoord(dc))
|
||||
broker := newServerBroker(c)
|
||||
|
@ -372,7 +374,7 @@ func TestServerBroker_GcConfirm(t *testing.T) {
|
|||
mock.Anything, // *datapb.GcConfirmRequest
|
||||
mock.Anything,
|
||||
).Return(
|
||||
&datapb.GcConfirmResponse{Status: succStatus(), GcFinished: true},
|
||||
&datapb.GcConfirmResponse{Status: merr.Success(), GcFinished: true},
|
||||
nil)
|
||||
c := newTestCore(withDataCoord(dc))
|
||||
broker := newServerBroker(c)
|
||||
|
|
|
@ -708,11 +708,11 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
|||
StateCode: commonpb.StateCode_Healthy,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
|
||||
return &datapb.WatchChannelsResponse{Status: succStatus()}, nil
|
||||
return &datapb.WatchChannelsResponse{Status: merr.Success()}, nil
|
||||
}
|
||||
|
||||
core := newTestCore(withValidIDAllocator(),
|
||||
|
|
|
@ -462,7 +462,7 @@ func TestGarbageCollector_RemoveCreatingPartition(t *testing.T) {
|
|||
})
|
||||
|
||||
qc := mocks.NewMockQueryCoordClient(t)
|
||||
qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
|
||||
qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
core := newTestCore(withTtSynchronizer(ticker),
|
||||
withMeta(meta),
|
||||
|
@ -488,7 +488,7 @@ func TestGarbageCollector_RemoveCreatingPartition(t *testing.T) {
|
|||
|
||||
qc := mocks.NewMockQueryCoordClient(t)
|
||||
qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(merr.Status(nil), fmt.Errorf("mock err")).
|
||||
Return(merr.Success(), fmt.Errorf("mock err")).
|
||||
Run(func(ctx context.Context, req *querypb.ReleasePartitionsRequest, opts ...grpc.CallOption) {
|
||||
signal <- struct{}{}
|
||||
})
|
||||
|
@ -521,7 +521,7 @@ func TestGarbageCollector_RemoveCreatingPartition(t *testing.T) {
|
|||
})
|
||||
|
||||
qc := mocks.NewMockQueryCoordClient(t)
|
||||
qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
|
||||
qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything).Return(merr.Success(), nil)
|
||||
|
||||
core := newTestCore(withTtSynchronizer(ticker),
|
||||
withMeta(meta),
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
)
|
||||
|
||||
// hasCollectionTask has collection request task
|
||||
|
@ -39,7 +40,7 @@ func (t *hasCollectionTask) Prepare(ctx context.Context) error {
|
|||
|
||||
// Execute task execution
|
||||
func (t *hasCollectionTask) Execute(ctx context.Context) error {
|
||||
t.Rsp.Status = succStatus()
|
||||
t.Rsp.Status = merr.Success()
|
||||
ts := getTravelTs(t.Req)
|
||||
// TODO: what if err != nil && common.IsCollectionNotExistError == false, should we consider this RPC as failure?
|
||||
_, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetDbName(), t.Req.GetCollectionName(), ts)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -40,12 +41,12 @@ func (t *hasPartitionTask) Prepare(ctx context.Context) error {
|
|||
|
||||
// Execute task execution
|
||||
func (t *hasPartitionTask) Execute(ctx context.Context) error {
|
||||
t.Rsp.Status = succStatus()
|
||||
t.Rsp.Status = merr.Success()
|
||||
t.Rsp.Value = false
|
||||
// TODO: why HasPartitionRequest doesn't contain Timestamp but other requests do.
|
||||
coll, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetDbName(), t.Req.CollectionName, typeutil.MaxTimestamp)
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
|
||||
t.Rsp.Status = merr.Status(err)
|
||||
return err
|
||||
}
|
||||
for _, part := range coll.Partitions {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
)
|
||||
|
||||
func Test_hasPartitionTask_Prepare(t *testing.T) {
|
||||
|
@ -57,7 +58,9 @@ func Test_hasPartitionTask_Prepare(t *testing.T) {
|
|||
|
||||
func Test_hasPartitionTask_Execute(t *testing.T) {
|
||||
t.Run("fail to get collection", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
metaTable := mockrootcoord.NewIMetaTable(t)
|
||||
metaTable.EXPECT().GetCollectionByName(mock.Anything, mock.Anything, "test coll", mock.Anything).Return(nil, merr.WrapErrCollectionNotFound("test coll"))
|
||||
core := newTestCore(withMeta(metaTable))
|
||||
task := &hasPartitionTask{
|
||||
baseTask: newBaseTask(context.Background(), core),
|
||||
Req: &milvuspb.HasPartitionRequest{
|
||||
|
@ -70,7 +73,8 @@ func Test_hasPartitionTask_Execute(t *testing.T) {
|
|||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
|
||||
assert.ErrorIs(t, merr.Error(task.Rsp.GetStatus()), merr.ErrCollectionNotFound)
|
||||
assert.False(t, task.Rsp.GetValue())
|
||||
})
|
||||
|
||||
|
|
|
@ -432,7 +432,7 @@ func (m *importManager) importJob(ctx context.Context, req *milvuspb.ImportReque
|
|||
}
|
||||
|
||||
resp := &milvuspb.ImportResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Tasks: make([]int64, 0),
|
||||
}
|
||||
|
||||
|
@ -730,7 +730,7 @@ func (m *importManager) setCollectionPartitionName(dbName string, colID, partID
|
|||
}
|
||||
|
||||
func (m *importManager) copyTaskInfo(input *datapb.ImportTaskInfo, output *milvuspb.GetImportStateResponse) {
|
||||
output.Status = merr.Status(nil)
|
||||
output.Status = merr.Success()
|
||||
|
||||
output.Id = input.GetId()
|
||||
output.CollectionId = input.GetCollectionId()
|
||||
|
@ -752,7 +752,7 @@ func (m *importManager) copyTaskInfo(input *datapb.ImportTaskInfo, output *milvu
|
|||
// getTaskState looks for task with the given ID and returns its import state.
|
||||
func (m *importManager) getTaskState(tID int64) *milvuspb.GetImportStateResponse {
|
||||
resp := &milvuspb.GetImportStateResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Infos: make([]*commonpb.KeyValuePair, 0),
|
||||
}
|
||||
// (1) Search in pending tasks list.
|
||||
|
|
|
@ -92,16 +92,16 @@ func TestImportManager_NewImportManager(t *testing.T) {
|
|||
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
if mockCallImportServiceErr {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, errors.New("mock err")
|
||||
}
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
|
@ -346,17 +346,17 @@ func TestImportManager_TestEtcdCleanUp(t *testing.T) {
|
|||
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
if mockCallImportServiceErr {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, errors.New("mock err")
|
||||
}
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
|
@ -432,22 +432,22 @@ func TestImportManager_TestFlipTaskStateLoop(t *testing.T) {
|
|||
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
if mockCallImportServiceErr {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, errors.New("mock err")
|
||||
}
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
callUnsetIsImportingState := func(context.Context, *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
flipPersistedTaskInterval = 20
|
||||
|
@ -517,7 +517,7 @@ func TestImportManager_ImportJob(t *testing.T) {
|
|||
mockKv := memkv.NewMemoryKV()
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
// nil request
|
||||
|
@ -572,7 +572,7 @@ func TestImportManager_ImportJob(t *testing.T) {
|
|||
|
||||
importServiceFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -601,7 +601,7 @@ func TestImportManager_ImportJob(t *testing.T) {
|
|||
}
|
||||
count++
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -665,7 +665,7 @@ func TestImportManager_AllDataNodesBusy(t *testing.T) {
|
|||
if count < len(dnList) {
|
||||
count++
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
DatanodeId: dnList[count-1],
|
||||
}, nil
|
||||
}
|
||||
|
@ -678,7 +678,7 @@ func TestImportManager_AllDataNodesBusy(t *testing.T) {
|
|||
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -738,7 +738,7 @@ func TestImportManager_TaskState(t *testing.T) {
|
|||
mockKv := memkv.NewMemoryKV()
|
||||
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -749,7 +749,7 @@ func TestImportManager_TaskState(t *testing.T) {
|
|||
}
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -785,7 +785,7 @@ func TestImportManager_TaskState(t *testing.T) {
|
|||
}
|
||||
|
||||
mgr.callUnsetIsImportingState = func(context.Context, *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
// index doesn't exist, the persist task will be set to completed
|
||||
ti, err := mgr.updateTaskInfo(info)
|
||||
|
@ -842,7 +842,7 @@ func TestImportManager_AllocFail(t *testing.T) {
|
|||
mockKv := memkv.NewMemoryKV()
|
||||
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -854,7 +854,7 @@ func TestImportManager_AllocFail(t *testing.T) {
|
|||
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
mgr := newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callGetSegmentStates, nil, nil)
|
||||
|
@ -887,7 +887,7 @@ func TestImportManager_ListAllTasks(t *testing.T) {
|
|||
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -996,7 +996,7 @@ func TestImportManager_ListAllTasks(t *testing.T) {
|
|||
// accept tasks to working list
|
||||
mgr.callImportService = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,8 @@ package rootcoord
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
)
|
||||
|
||||
type listDatabaseTask struct {
|
||||
|
@ -34,10 +34,10 @@ func (t *listDatabaseTask) Prepare(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (t *listDatabaseTask) Execute(ctx context.Context) error {
|
||||
t.Resp.Status = succStatus()
|
||||
t.Resp.Status = merr.Success()
|
||||
ret, err := t.core.meta.ListDatabases(ctx, t.GetTs())
|
||||
if err != nil {
|
||||
t.Resp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error())
|
||||
t.Resp.Status = merr.Status(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ func (c *Core) getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetric
|
|||
}
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.RootCoordRole, c.session.ServerID),
|
||||
}, nil
|
||||
|
|
|
@ -43,6 +43,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/retry"
|
||||
|
@ -383,7 +384,7 @@ func (m mockProxy) GetComponentStates(ctx context.Context, req *milvuspb.GetComp
|
|||
func newMockProxy() *mockProxy {
|
||||
r := &mockProxy{}
|
||||
r.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
@ -412,7 +413,7 @@ func withValidProxyManager() Opt {
|
|||
}
|
||||
p := newMockProxy()
|
||||
p.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
p.GetComponentStatesFunc = func(ctx context.Context) (*milvuspb.ComponentStates, error) {
|
||||
return &milvuspb.ComponentStates{
|
||||
|
@ -431,7 +432,7 @@ func withInvalidProxyManager() Opt {
|
|||
}
|
||||
p := newMockProxy()
|
||||
p.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), errors.New("error mock InvalidateCollectionMetaCache")
|
||||
return merr.Success(), errors.New("error mock InvalidateCollectionMetaCache")
|
||||
}
|
||||
p.GetComponentStatesFunc = func(ctx context.Context) (*milvuspb.ComponentStates, error) {
|
||||
return &milvuspb.ComponentStates{
|
||||
|
@ -569,10 +570,11 @@ func withQueryCoord(qc types.QueryCoordClient) Opt {
|
|||
|
||||
func withUnhealthyQueryCoord() Opt {
|
||||
qc := &mocks.MockQueryCoordClient{}
|
||||
err := errors.New("mock error")
|
||||
qc.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(
|
||||
&milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Abnormal},
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"),
|
||||
Status: merr.Status(err),
|
||||
}, retry.Unrecoverable(errors.New("error mock GetComponentStates")),
|
||||
)
|
||||
return withQueryCoord(qc)
|
||||
|
@ -583,7 +585,7 @@ func withInvalidQueryCoord() Opt {
|
|||
qc.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(
|
||||
&milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Healthy},
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil,
|
||||
)
|
||||
qc.EXPECT().ReleaseCollection(mock.Anything, mock.Anything).Return(
|
||||
|
@ -602,16 +604,17 @@ func withFailedQueryCoord() Opt {
|
|||
qc.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(
|
||||
&milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Healthy},
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil,
|
||||
)
|
||||
err := errors.New("mock error")
|
||||
qc.EXPECT().ReleaseCollection(mock.Anything, mock.Anything).Return(
|
||||
failStatus(commonpb.ErrorCode_UnexpectedError, "mock release collection error"), nil,
|
||||
merr.Status(err), nil,
|
||||
)
|
||||
|
||||
qc.EXPECT().GetSegmentInfo(mock.Anything, mock.Anything).Return(
|
||||
&querypb.GetSegmentInfoResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock get segment info error"),
|
||||
Status: merr.Status(err),
|
||||
}, nil,
|
||||
)
|
||||
|
||||
|
@ -623,25 +626,25 @@ func withValidQueryCoord() Opt {
|
|||
qc.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(
|
||||
&milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Healthy},
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil,
|
||||
)
|
||||
qc.EXPECT().ReleaseCollection(mock.Anything, mock.Anything).Return(
|
||||
succStatus(), nil,
|
||||
merr.Success(), nil,
|
||||
)
|
||||
|
||||
qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(
|
||||
succStatus(), nil,
|
||||
merr.Success(), nil,
|
||||
)
|
||||
|
||||
qc.EXPECT().GetSegmentInfo(mock.Anything, mock.Anything).Return(
|
||||
&querypb.GetSegmentInfoResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil,
|
||||
)
|
||||
|
||||
qc.EXPECT().SyncNewCreatedPartition(mock.Anything, mock.Anything).Return(
|
||||
succStatus(), nil,
|
||||
merr.Success(), nil,
|
||||
)
|
||||
|
||||
return withQueryCoord(qc)
|
||||
|
@ -686,10 +689,11 @@ func withDataCoord(dc types.DataCoordClient) Opt {
|
|||
|
||||
func withUnhealthyDataCoord() Opt {
|
||||
dc := newMockDataCoord()
|
||||
err := errors.New("mock error")
|
||||
dc.GetComponentStatesFunc = func(ctx context.Context) (*milvuspb.ComponentStates, error) {
|
||||
return &milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Abnormal},
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"),
|
||||
Status: merr.Status(err),
|
||||
}, retry.Unrecoverable(errors.New("error mock GetComponentStates"))
|
||||
}
|
||||
return withDataCoord(dc)
|
||||
|
@ -700,7 +704,7 @@ func withInvalidDataCoord() Opt {
|
|||
dc.GetComponentStatesFunc = func(ctx context.Context) (*milvuspb.ComponentStates, error) {
|
||||
return &milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Healthy},
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
|
||||
|
@ -735,22 +739,23 @@ func withFailedDataCoord() Opt {
|
|||
dc.GetComponentStatesFunc = func(ctx context.Context) (*milvuspb.ComponentStates, error) {
|
||||
return &milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Healthy},
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
err := errors.New("mock error")
|
||||
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
|
||||
return &datapb.WatchChannelsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock watch channels error"),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
|
||||
return &datapb.FlushResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock flush error"),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock import error"),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
dc.UnsetIsImportingStateFunc = func(ctx context.Context, req *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
||||
|
@ -760,15 +765,15 @@ func withFailedDataCoord() Opt {
|
|||
}, nil
|
||||
}
|
||||
dc.broadCastAlteredCollectionFunc = func(ctx context.Context, req *datapb.AlterCollectionRequest) (*commonpb.Status, error) {
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, "mock broadcast altered collection error"), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
dc.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
|
||||
return &indexpb.GetSegmentIndexStateResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock GetSegmentIndexStateFunc fail"),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
dc.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, "mock DropIndexFunc fail"), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
return withDataCoord(dc)
|
||||
}
|
||||
|
@ -778,37 +783,37 @@ func withValidDataCoord() Opt {
|
|||
dc.GetComponentStatesFunc = func(ctx context.Context) (*milvuspb.ComponentStates, error) {
|
||||
return &milvuspb.ComponentStates{
|
||||
State: &milvuspb.ComponentInfo{StateCode: commonpb.StateCode_Healthy},
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
|
||||
return &datapb.WatchChannelsResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
|
||||
return &datapb.FlushResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.UnsetIsImportingStateFunc = func(ctx context.Context, req *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
dc.broadCastAlteredCollectionFunc = func(ctx context.Context, req *datapb.AlterCollectionRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
dc.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
|
||||
return &indexpb.GetSegmentIndexStateResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
return withDataCoord(dc)
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request *
|
|||
}
|
||||
p.collArray = append(p.collArray, request.CollectionName)
|
||||
p.collIDs = append(p.collIDs, request.CollectionID)
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (p *proxyMock) GetCollArray() []string {
|
||||
|
@ -88,11 +88,11 @@ func (p *proxyMock) InvalidateCredentialCache(ctx context.Context, request *prox
|
|||
if p.returnGrpcError {
|
||||
return nil, fmt.Errorf("grpc error")
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (p *proxyMock) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func TestProxyClientManager_GetProxyClients(t *testing.T) {
|
||||
|
@ -173,7 +173,7 @@ func TestProxyClientManager_InvalidateCollectionMetaCache(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), errors.New("error mock InvalidateCollectionMetaCache")
|
||||
return merr.Success(), errors.New("error mock InvalidateCollectionMetaCache")
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -185,8 +185,9 @@ func TestProxyClientManager_InvalidateCollectionMetaCache(t *testing.T) {
|
|||
t.Run("mock error code", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
mockErr := errors.New("mock error")
|
||||
p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil
|
||||
return merr.Status(mockErr), nil
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -199,7 +200,7 @@ func TestProxyClientManager_InvalidateCollectionMetaCache(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -221,7 +222,7 @@ func TestProxyClientManager_InvalidateCredentialCache(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), errors.New("error mock InvalidateCredentialCache")
|
||||
return merr.Success(), errors.New("error mock InvalidateCredentialCache")
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -233,8 +234,9 @@ func TestProxyClientManager_InvalidateCredentialCache(t *testing.T) {
|
|||
t.Run("mock error code", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
mockErr := errors.New("mock error")
|
||||
p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil
|
||||
return merr.Status(mockErr), nil
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -247,7 +249,7 @@ func TestProxyClientManager_InvalidateCredentialCache(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -269,7 +271,7 @@ func TestProxyClientManager_RefreshPolicyInfoCache(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), errors.New("error mock RefreshPolicyInfoCache")
|
||||
return merr.Success(), errors.New("error mock RefreshPolicyInfoCache")
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -281,8 +283,9 @@ func TestProxyClientManager_RefreshPolicyInfoCache(t *testing.T) {
|
|||
t.Run("mock error code", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
mockErr := errors.New("mock error")
|
||||
p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil
|
||||
return merr.Status(mockErr), nil
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
@ -295,7 +298,7 @@ func TestProxyClientManager_RefreshPolicyInfoCache(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
p1 := newMockProxy()
|
||||
p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
pcm := &proxyClientManager{proxyClient: map[int64]types.ProxyClient{
|
||||
TestProxyID: p1,
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -50,16 +51,17 @@ type dataCoordMockForQuota struct {
|
|||
}
|
||||
|
||||
func (d *dataCoordMockForQuota) GetMetrics(ctx context.Context, request *milvuspb.GetMetricsRequest, opts ...grpc.CallOption) (*milvuspb.GetMetricsResponse, error) {
|
||||
mockErr := errors.New("mock error")
|
||||
if d.retErr {
|
||||
return nil, fmt.Errorf("mock err")
|
||||
return nil, mockErr
|
||||
}
|
||||
if d.retFailStatus {
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock failure status"),
|
||||
Status: merr.Status(mockErr),
|
||||
}, nil
|
||||
}
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -87,7 +89,7 @@ func TestQuotaCenter(t *testing.T) {
|
|||
qc := mocks.NewMockQueryCoordClient(t)
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
meta.EXPECT().GetCollectionByID(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, merr.ErrCollectionNotFound).Maybe()
|
||||
qc.EXPECT().GetMetrics(mock.Anything, mock.Anything).Return(&milvuspb.GetMetricsResponse{Status: succStatus()}, nil)
|
||||
qc.EXPECT().GetMetrics(mock.Anything, mock.Anything).Return(&milvuspb.GetMetricsResponse{Status: merr.Success()}, nil)
|
||||
quotaCenter := NewQuotaCenter(pcm, qc, &dataCoordMockForQuota{}, core.tsoAllocator, meta)
|
||||
err = quotaCenter.syncMetrics()
|
||||
assert.Error(t, err) // for empty response
|
||||
|
@ -106,7 +108,7 @@ func TestQuotaCenter(t *testing.T) {
|
|||
assert.Error(t, err)
|
||||
|
||||
qc.EXPECT().GetMetrics(mock.Anything, mock.Anything).Return(&milvuspb.GetMetricsResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock failure status"),
|
||||
Status: merr.Status(err),
|
||||
}, nil)
|
||||
quotaCenter = NewQuotaCenter(pcm, qc, &dataCoordMockForQuota{}, core.tsoAllocator, meta)
|
||||
err = quotaCenter.syncMetrics()
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -30,6 +29,7 @@ import (
|
|||
"github.com/samber/lo"
|
||||
"github.com/tikv/client-go/v2/txnkv"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
|
@ -59,7 +59,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/crypto"
|
||||
"github.com/milvus-io/milvus/pkg/util/errorutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/logutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
|
@ -118,7 +117,7 @@ type Core struct {
|
|||
|
||||
quotaCenter *QuotaCenter
|
||||
|
||||
stateCode atomic.Value
|
||||
stateCode atomic.Int32
|
||||
initOnce sync.Once
|
||||
startOnce sync.Once
|
||||
session *sessionutil.Session
|
||||
|
@ -152,14 +151,12 @@ func NewCore(c context.Context, factory dependency.Factory) (*Core, error) {
|
|||
|
||||
// UpdateStateCode update state code
|
||||
func (c *Core) UpdateStateCode(code commonpb.StateCode) {
|
||||
c.stateCode.Store(code)
|
||||
c.stateCode.Store(int32(code))
|
||||
log.Info("update rootcoord state", zap.String("state", code.String()))
|
||||
}
|
||||
|
||||
func (c *Core) checkHealthy() (commonpb.StateCode, bool) {
|
||||
code := c.stateCode.Load().(commonpb.StateCode)
|
||||
ok := code == commonpb.StateCode_Healthy
|
||||
return code, ok
|
||||
func (c *Core) GetStateCode() commonpb.StateCode {
|
||||
return commonpb.StateCode(c.stateCode.Load())
|
||||
}
|
||||
|
||||
func (c *Core) sendTimeTick(t Timestamp, reason string) error {
|
||||
|
@ -182,7 +179,7 @@ func (c *Core) sendTimeTick(t Timestamp, reason string) error {
|
|||
}
|
||||
|
||||
func (c *Core) sendMinDdlTsAsTt() {
|
||||
code := c.stateCode.Load().(commonpb.StateCode)
|
||||
code := c.GetStateCode()
|
||||
if code != commonpb.StateCode_Healthy {
|
||||
log.Warn("rootCoord is not healthy, skip send timetick")
|
||||
return
|
||||
|
@ -757,7 +754,7 @@ func (c *Core) Stop() error {
|
|||
|
||||
// GetComponentStates get states of components
|
||||
func (c *Core) GetComponentStates(ctx context.Context, req *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) {
|
||||
code := c.stateCode.Load().(commonpb.StateCode)
|
||||
code := c.GetStateCode()
|
||||
|
||||
nodeID := common.NotRegisteredID
|
||||
if c.session != nil && c.session.Registered() {
|
||||
|
@ -772,7 +769,7 @@ func (c *Core) GetComponentStates(ctx context.Context, req *milvuspb.GetComponen
|
|||
StateCode: code,
|
||||
ExtraInfo: nil,
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
SubcomponentStates: []*milvuspb.ComponentInfo{
|
||||
{
|
||||
NodeID: nodeID,
|
||||
|
@ -787,7 +784,7 @@ func (c *Core) GetComponentStates(ctx context.Context, req *milvuspb.GetComponen
|
|||
// GetTimeTickChannel get timetick channel name
|
||||
func (c *Core) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: Params.CommonCfg.RootCoordTimeTick.GetValue(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -795,14 +792,14 @@ func (c *Core) GetTimeTickChannel(ctx context.Context, req *internalpb.GetTimeTi
|
|||
// GetStatisticsChannel get statistics channel name
|
||||
func (c *Core) GetStatisticsChannel(ctx context.Context, req *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) {
|
||||
return &milvuspb.StringResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Value: Params.CommonCfg.RootCoordStatistics.GetValue(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) CreateDatabase(ctx context.Context, in *milvuspb.CreateDatabaseRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
method := "CreateDatabase"
|
||||
|
@ -824,7 +821,7 @@ func (c *Core) CreateDatabase(ctx context.Context, in *milvuspb.CreateDatabaseRe
|
|||
zap.String("dbName", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
|
@ -835,7 +832,7 @@ func (c *Core) CreateDatabase(ctx context.Context, in *milvuspb.CreateDatabaseRe
|
|||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
|
@ -844,12 +841,12 @@ func (c *Core) CreateDatabase(ctx context.Context, in *milvuspb.CreateDatabaseRe
|
|||
log.Ctx(ctx).Info("done to create database", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("dbName", in.GetDbName()),
|
||||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (c *Core) DropDatabase(ctx context.Context, in *milvuspb.DropDatabaseRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
method := "DropDatabase"
|
||||
|
@ -870,7 +867,7 @@ func (c *Core) DropDatabase(ctx context.Context, in *milvuspb.DropDatabaseReques
|
|||
zap.String("dbName", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
if err := t.WaitToFinish(); err != nil {
|
||||
|
@ -880,7 +877,7 @@ func (c *Core) DropDatabase(ctx context.Context, in *milvuspb.DropDatabaseReques
|
|||
zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
|
@ -889,12 +886,12 @@ func (c *Core) DropDatabase(ctx context.Context, in *milvuspb.DropDatabaseReques
|
|||
log.Ctx(ctx).Info("done to drop database", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("dbName", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return succStatus(), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (c *Core) ListDatabases(ctx context.Context, in *milvuspb.ListDatabasesRequest) (*milvuspb.ListDatabasesResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
ret := &milvuspb.ListDatabasesResponse{Status: merr.Status(merr.WrapErrServiceNotReady(code.String()))}
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
ret := &milvuspb.ListDatabasesResponse{Status: merr.Status(err)}
|
||||
return ret, nil
|
||||
}
|
||||
method := "ListDatabases"
|
||||
|
@ -914,7 +911,7 @@ func (c *Core) ListDatabases(ctx context.Context, in *milvuspb.ListDatabasesRequ
|
|||
log.Info("failed to enqueue request to list databases", zap.Error(err))
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return &milvuspb.ListDatabasesResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ListDatabases failed: "+err.Error()),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -922,7 +919,7 @@ func (c *Core) ListDatabases(ctx context.Context, in *milvuspb.ListDatabasesRequ
|
|||
log.Info("failed to list databases", zap.Error(err))
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return &milvuspb.ListDatabasesResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ListDatabases failed: "+err.Error()),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -934,8 +931,8 @@ func (c *Core) ListDatabases(ctx context.Context, in *milvuspb.ListDatabasesRequ
|
|||
|
||||
// CreateCollection create collection
|
||||
func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.TotalLabel).Inc()
|
||||
|
@ -981,13 +978,13 @@ func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollecti
|
|||
zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// DropCollection drop collection
|
||||
func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.TotalLabel).Inc()
|
||||
|
@ -1030,14 +1027,14 @@ func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRe
|
|||
log.Ctx(ctx).Info("done to drop collection", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// HasCollection check collection existence
|
||||
func (c *Core) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1090,7 +1087,7 @@ func (c *Core) describeCollection(ctx context.Context, in *milvuspb.DescribeColl
|
|||
}
|
||||
|
||||
func convertModelToDesc(collInfo *model.Collection, aliases []string) *milvuspb.DescribeCollectionResponse {
|
||||
resp := &milvuspb.DescribeCollectionResponse{Status: merr.Status(nil)}
|
||||
resp := &milvuspb.DescribeCollectionResponse{Status: merr.Success()}
|
||||
|
||||
resp.Schema = &schemapb.CollectionSchema{
|
||||
Name: collInfo.Name,
|
||||
|
@ -1120,9 +1117,9 @@ func convertModelToDesc(collInfo *model.Collection, aliases []string) *milvuspb.
|
|||
}
|
||||
|
||||
func (c *Core) describeCollectionImpl(ctx context.Context, in *milvuspb.DescribeCollectionRequest, allowUnavailable bool) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1143,7 +1140,7 @@ func (c *Core) describeCollectionImpl(ctx context.Context, in *milvuspb.Describe
|
|||
t := &describeCollectionTask{
|
||||
baseTask: newBaseTask(ctx, c),
|
||||
Req: in,
|
||||
Rsp: &milvuspb.DescribeCollectionResponse{Status: merr.Status(nil)},
|
||||
Rsp: &milvuspb.DescribeCollectionResponse{Status: merr.Success()},
|
||||
allowUnavailable: allowUnavailable,
|
||||
}
|
||||
|
||||
|
@ -1189,9 +1186,9 @@ func (c *Core) DescribeCollectionInternal(ctx context.Context, in *milvuspb.Desc
|
|||
|
||||
// ShowCollections list all collection names
|
||||
func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1236,8 +1233,8 @@ func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollections
|
|||
}
|
||||
|
||||
func (c *Core) AlterCollection(ctx context.Context, in *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("AlterCollection", metrics.TotalLabel).Inc()
|
||||
|
@ -1282,13 +1279,13 @@ func (c *Core) AlterCollection(ctx context.Context, in *milvuspb.AlterCollection
|
|||
zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("name", in.GetCollectionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// CreatePartition create partition
|
||||
func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.TotalLabel).Inc()
|
||||
|
@ -1336,13 +1333,13 @@ func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartition
|
|||
zap.String("collection", in.GetCollectionName()),
|
||||
zap.String("partition", in.GetPartitionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// DropPartition drop partition
|
||||
func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.TotalLabel).Inc()
|
||||
|
@ -1389,14 +1386,14 @@ func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequ
|
|||
zap.String("collection", in.GetCollectionName()),
|
||||
zap.String("partition", in.GetPartitionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// HasPartition check partition existence
|
||||
func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1443,9 +1440,9 @@ func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionReques
|
|||
}
|
||||
|
||||
func (c *Core) showPartitionsImpl(ctx context.Context, in *milvuspb.ShowPartitionsRequest, allowUnavailable bool) (*milvuspb.ShowPartitionsResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.ShowPartitionsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1507,14 +1504,14 @@ func (c *Core) ShowPartitionsInternal(ctx context.Context, in *milvuspb.ShowPart
|
|||
func (c *Core) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) {
|
||||
// ShowSegments Only used in GetPersistentSegmentInfo, it's already deprecated for a long time.
|
||||
// Though we continue to keep current logic, it's not right enough since RootCoord only contains indexed segments.
|
||||
return &milvuspb.ShowSegmentsResponse{Status: merr.Status(nil)}, nil
|
||||
return &milvuspb.ShowSegmentsResponse{Status: merr.Success()}, nil
|
||||
}
|
||||
|
||||
// AllocTimestamp alloc timestamp
|
||||
func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestampRequest) (*rootcoordpb.AllocTimestampResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1532,7 +1529,7 @@ func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestam
|
|||
ts = ts - uint64(in.GetCount()) + 1
|
||||
metrics.RootCoordTimestamp.Set(float64(ts))
|
||||
return &rootcoordpb.AllocTimestampResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Timestamp: ts,
|
||||
Count: in.GetCount(),
|
||||
}, nil
|
||||
|
@ -1540,9 +1537,9 @@ func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestam
|
|||
|
||||
// AllocID alloc ids
|
||||
func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*rootcoordpb.AllocIDResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
start, _, err := c.idAllocator.Alloc(in.Count)
|
||||
|
@ -1559,7 +1556,7 @@ func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*ro
|
|||
|
||||
metrics.RootCoordIDAllocCounter.Add(float64(in.Count))
|
||||
return &rootcoordpb.AllocIDResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
ID: start,
|
||||
Count: in.Count,
|
||||
}, nil
|
||||
|
@ -1568,9 +1565,9 @@ func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*ro
|
|||
// UpdateChannelTimeTick used to handle ChannelTimeTickMsg
|
||||
func (c *Core) UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) {
|
||||
log := log.Ctx(ctx)
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
log.Warn("failed to updateTimeTick because rootcoord is not healthy", zap.Any("state", code))
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
log.Warn("failed to updateTimeTick because rootcoord is not healthy", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
if in.Base.MsgType != commonpb.MsgType_TimeTick {
|
||||
log.Warn("failed to updateTimeTick because base messasge is not timetick, state", zap.Any("base message type", in.Base.MsgType))
|
||||
|
@ -1583,26 +1580,26 @@ func (c *Core) UpdateChannelTimeTick(ctx context.Context, in *internalpb.Channel
|
|||
zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies.
|
||||
func (c *Core) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
err := c.proxyClientManager.InvalidateCollectionMetaCache(ctx, in)
|
||||
if err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ShowConfigurations returns the configurations of RootCoord matching req.Pattern
|
||||
func (c *Core) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
Configuations: nil,
|
||||
}, nil
|
||||
}
|
||||
|
@ -1617,16 +1614,16 @@ func (c *Core) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfi
|
|||
}
|
||||
|
||||
return &internalpb.ShowConfigurationsResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Configuations: configList,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMetrics get metrics
|
||||
func (c *Core) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
Response: "",
|
||||
}, nil
|
||||
}
|
||||
|
@ -1673,8 +1670,8 @@ func (c *Core) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) (
|
|||
|
||||
// CreateAlias create collection alias
|
||||
func (c *Core) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.TotalLabel).Inc()
|
||||
|
@ -1722,13 +1719,13 @@ func (c *Core) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest)
|
|||
zap.String("alias", in.GetAlias()),
|
||||
zap.String("collection", in.GetCollectionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// DropAlias drop collection alias
|
||||
func (c *Core) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc()
|
||||
|
@ -1772,13 +1769,13 @@ func (c *Core) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest) (*c
|
|||
zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("alias", in.GetAlias()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// AlterAlias alter collection alias
|
||||
func (c *Core) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc()
|
||||
|
@ -1826,14 +1823,14 @@ func (c *Core) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest) (
|
|||
zap.String("alias", in.GetAlias()),
|
||||
zap.String("collection", in.GetCollectionName()),
|
||||
zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// Import imports large files (json, numpy, etc.) on MinIO/S3 storage into Milvus storage.
|
||||
func (c *Core) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvuspb.ImportResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.ImportResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1915,9 +1912,9 @@ func (c *Core) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvus
|
|||
|
||||
// GetImportState returns the current state of an import task.
|
||||
func (c *Core) GetImportState(ctx context.Context, req *milvuspb.GetImportStateRequest) (*milvuspb.GetImportStateResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.GetImportStateResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
return c.importManager.getTaskState(req.GetTask()), nil
|
||||
|
@ -1925,9 +1922,9 @@ func (c *Core) GetImportState(ctx context.Context, req *milvuspb.GetImportStateR
|
|||
|
||||
// ListImportTasks returns id array of all import tasks.
|
||||
func (c *Core) ListImportTasks(ctx context.Context, req *milvuspb.ListImportTasksRequest) (*milvuspb.ListImportTasksResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.ListImportTasksResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1959,7 +1956,7 @@ func (c *Core) ListImportTasks(ctx context.Context, req *milvuspb.ListImportTask
|
|||
}
|
||||
|
||||
resp := &milvuspb.ListImportTasksResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Tasks: tasks,
|
||||
}
|
||||
return resp, nil
|
||||
|
@ -1970,8 +1967,8 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (
|
|||
log.Info("RootCoord receive import state report",
|
||||
zap.Int64("task ID", ir.GetTaskId()),
|
||||
zap.Any("import state", ir.GetState()))
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
// This method update a busy node to idle node, and send import task to idle node
|
||||
|
@ -2023,7 +2020,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (
|
|||
}
|
||||
}
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// ExpireCredCache will call invalidate credential cache
|
||||
|
@ -2063,8 +2060,8 @@ func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.Creden
|
|||
tr := timerecord.NewTimeRecorder(method)
|
||||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username))
|
||||
ctxLog.Debug(method)
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
// insert to db
|
||||
|
@ -2086,7 +2083,7 @@ func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.Creden
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.RootCoordNumOfCredentials.Inc()
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// GetCredential get credential by username
|
||||
|
@ -2096,8 +2093,8 @@ func (c *Core) GetCredential(ctx context.Context, in *rootcoordpb.GetCredentialR
|
|||
tr := timerecord.NewTimeRecorder(method)
|
||||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username))
|
||||
ctxLog.Debug(method)
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return &rootcoordpb.GetCredentialResponse{Status: merr.Status(merr.WrapErrServiceNotReady(code.String()))}, nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &rootcoordpb.GetCredentialResponse{Status: merr.Status(err)}, nil
|
||||
}
|
||||
|
||||
credInfo, err := c.meta.GetCredential(in.Username)
|
||||
|
@ -2113,7 +2110,7 @@ func (c *Core) GetCredential(ctx context.Context, in *rootcoordpb.GetCredentialR
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &rootcoordpb.GetCredentialResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Username: credInfo.Username,
|
||||
Password: credInfo.EncryptedPassword,
|
||||
}, nil
|
||||
|
@ -2126,8 +2123,8 @@ func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.Creden
|
|||
tr := timerecord.NewTimeRecorder(method)
|
||||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username))
|
||||
ctxLog.Debug(method)
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
// update data on storage
|
||||
err := c.meta.AlterCredential(credInfo)
|
||||
|
@ -2147,7 +2144,7 @@ func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.Creden
|
|||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// DeleteCredential delete a user
|
||||
|
@ -2157,8 +2154,8 @@ func (c *Core) DeleteCredential(ctx context.Context, in *milvuspb.DeleteCredenti
|
|||
tr := timerecord.NewTimeRecorder(method)
|
||||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username))
|
||||
ctxLog.Debug(method)
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
var status *commonpb.Status
|
||||
defer func() {
|
||||
|
@ -2205,7 +2202,7 @@ func (c *Core) DeleteCredential(ctx context.Context, in *milvuspb.DeleteCredenti
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.RootCoordNumOfCredentials.Dec()
|
||||
status = merr.Status(nil)
|
||||
status = merr.Success()
|
||||
return status, nil
|
||||
}
|
||||
|
||||
|
@ -2216,8 +2213,8 @@ func (c *Core) ListCredUsers(ctx context.Context, in *milvuspb.ListCredUsersRequ
|
|||
tr := timerecord.NewTimeRecorder(method)
|
||||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole))
|
||||
ctxLog.Debug(method)
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return &milvuspb.ListCredUsersResponse{Status: merr.Status(merr.WrapErrServiceNotReady(code.String()))}, nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.ListCredUsersResponse{Status: merr.Status(err)}, nil
|
||||
}
|
||||
|
||||
credInfo, err := c.meta.ListCredentialUsernames()
|
||||
|
@ -2233,7 +2230,7 @@ func (c *Core) ListCredUsers(ctx context.Context, in *milvuspb.ListCredUsersRequ
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &milvuspb.ListCredUsersResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Usernames: credInfo.Usernames,
|
||||
}, nil
|
||||
}
|
||||
|
@ -2250,8 +2247,8 @@ func (c *Core) CreateRole(ctx context.Context, in *milvuspb.CreateRoleRequest) (
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method + " begin")
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
entity := in.Entity
|
||||
|
||||
|
@ -2267,7 +2264,7 @@ func (c *Core) CreateRole(ctx context.Context, in *milvuspb.CreateRoleRequest) (
|
|||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.RootCoordNumOfRoles.Inc()
|
||||
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// DropRole drop role
|
||||
|
@ -2284,8 +2281,8 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.String("role_name", in.RoleName))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil {
|
||||
errMsg := "not found the role, maybe the role isn't existed or internal system error"
|
||||
|
@ -2330,7 +2327,7 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.RootCoordNumOfRoles.Dec()
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// OperateUserRole operate the relationship between a user and a role
|
||||
|
@ -2346,8 +2343,8 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil {
|
||||
|
@ -2403,7 +2400,7 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole
|
|||
ctxLog.Debug(method + " success")
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// SelectRole select role
|
||||
|
@ -2417,15 +2414,15 @@ func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) (
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return &milvuspb.SelectRoleResponse{Status: merr.Status(merr.WrapErrServiceNotReady(code.String()))}, nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.SelectRoleResponse{Status: merr.Status(err)}, nil
|
||||
}
|
||||
|
||||
if in.Role != nil {
|
||||
if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.Role.Name}, false); err != nil {
|
||||
if common.IsKeyNotExistError(err) {
|
||||
return &milvuspb.SelectRoleResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
errMsg := "fail to select the role to check the role name"
|
||||
|
@ -2448,7 +2445,7 @@ func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) (
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &milvuspb.SelectRoleResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Results: roleResults,
|
||||
}, nil
|
||||
}
|
||||
|
@ -2464,15 +2461,15 @@ func (c *Core) SelectUser(ctx context.Context, in *milvuspb.SelectUserRequest) (
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return &milvuspb.SelectUserResponse{Status: merr.Status(merr.WrapErrServiceNotReady(code.String()))}, nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.SelectUserResponse{Status: merr.Status(err)}, nil
|
||||
}
|
||||
|
||||
if in.User != nil {
|
||||
if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.User.Name}, false); err != nil {
|
||||
if common.IsKeyNotExistError(err) {
|
||||
return &milvuspb.SelectUserResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
errMsg := "fail to select the user to check the username"
|
||||
|
@ -2495,7 +2492,7 @@ func (c *Core) SelectUser(ctx context.Context, in *milvuspb.SelectUserRequest) (
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &milvuspb.SelectUserResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Results: userResults,
|
||||
}, nil
|
||||
}
|
||||
|
@ -2573,8 +2570,8 @@ func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivile
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
if in.Type != milvuspb.OperatePrivilegeType_Grant && in.Type != milvuspb.OperatePrivilegeType_Revoke {
|
||||
errMsg := fmt.Sprintf("invalid operate privilege type, current type: %s, valid value: [%s, %s]", in.Type, milvuspb.OperatePrivilegeType_Grant, milvuspb.OperatePrivilegeType_Revoke)
|
||||
|
@ -2648,7 +2645,7 @@ func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivile
|
|||
ctxLog.Debug(method + " success")
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
// SelectGrant select grant
|
||||
|
@ -2663,9 +2660,9 @@ func (c *Core) SelectGrant(ctx context.Context, in *milvuspb.SelectGrantRequest)
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.SelectGrantResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
if in.Entity == nil {
|
||||
|
@ -2693,7 +2690,7 @@ func (c *Core) SelectGrant(ctx context.Context, in *milvuspb.SelectGrantRequest)
|
|||
grantEntities, err := c.meta.SelectGrant(util.DefaultTenant, in.Entity)
|
||||
if common.IsKeyNotExistError(err) {
|
||||
return &milvuspb.SelectGrantResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -2708,7 +2705,7 @@ func (c *Core) SelectGrant(ctx context.Context, in *milvuspb.SelectGrantRequest)
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &milvuspb.SelectGrantResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
Entities: grantEntities,
|
||||
}, nil
|
||||
}
|
||||
|
@ -2720,9 +2717,9 @@ func (c *Core) ListPolicy(ctx context.Context, in *internalpb.ListPolicyRequest)
|
|||
ctxLog := log.Ctx(ctx).With(zap.String("role", typeutil.RootCoordRole), zap.Any("in", in))
|
||||
ctxLog.Debug(method)
|
||||
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -2747,15 +2744,15 @@ func (c *Core) ListPolicy(ctx context.Context, in *internalpb.ListPolicyRequest)
|
|||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return &internalpb.ListPolicyResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
PolicyInfos: policies,
|
||||
UserRoles: userRoles,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Core) RenameCollection(ctx context.Context, req *milvuspb.RenameCollectionRequest) (*commonpb.Status, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
return merr.Status(merr.WrapErrServiceNotReady(code.String())), nil
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
log := log.Ctx(ctx).With(zap.String("oldCollectionName", req.GetOldName()), zap.String("newCollectionName", req.GetNewName()))
|
||||
|
@ -2784,16 +2781,15 @@ func (c *Core) RenameCollection(ctx context.Context, req *milvuspb.RenameCollect
|
|||
metrics.RootCoordDDLReqLatency.WithLabelValues("RenameCollection").Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Info("done to rename collection", zap.Uint64("ts", t.GetTs()))
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
func (c *Core) CheckHealth(ctx context.Context, in *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) {
|
||||
if code, ok := c.checkHealthy(); !ok {
|
||||
reason := errorutil.UnHealthReason("rootcoord", c.session.ServerID, "rootcoord is unhealthy")
|
||||
if err := merr.CheckHealthy(c.GetStateCode()); err != nil {
|
||||
return &milvuspb.CheckHealthResponse{
|
||||
Status: merr.Status(merr.WrapErrServiceNotReady(code.String())),
|
||||
Status: merr.Status(err),
|
||||
IsHealthy: false,
|
||||
Reasons: []string{reason},
|
||||
Reasons: []string{fmt.Sprintf("serverID=%d: %v", c.session.ServerID, err)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -2806,24 +2802,28 @@ func (c *Core) CheckHealth(ctx context.Context, in *milvuspb.CheckHealthRequest)
|
|||
proxyClient := proxyClient
|
||||
group.Go(func() error {
|
||||
sta, err := proxyClient.GetComponentStates(ctx, &milvuspb.GetComponentStatesRequest{})
|
||||
isHealthy, reason := errorutil.UnHealthReasonWithComponentStatesOrErr("proxy", nodeID, sta, err)
|
||||
if !isHealthy {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = merr.AnalyzeState("Proxy", nodeID, sta)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
errReasons = append(errReasons, reason)
|
||||
errReasons = append(errReasons, err.Error())
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := group.Wait()
|
||||
if err != nil || len(errReasons) != 0 {
|
||||
return &milvuspb.CheckHealthResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
IsHealthy: false,
|
||||
Reasons: errReasons,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Status(nil), IsHealthy: true, Reasons: errReasons}, nil
|
||||
return &milvuspb.CheckHealthResponse{Status: merr.Success(), IsHealthy: true, Reasons: errReasons}, nil
|
||||
}
|
||||
|
|
|
@ -971,7 +971,7 @@ func TestRootCoord_GetMetrics(t *testing.T) {
|
|||
c := newTestCore(withHealthyCode(),
|
||||
withMetricsCacheManager())
|
||||
c.metricsCacheManager.UpdateSystemInfoMetrics(&milvuspb.GetMetricsResponse{
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
Response: "cached response",
|
||||
ComponentName: "cached component",
|
||||
})
|
||||
|
@ -1451,36 +1451,36 @@ func TestCore_ReportImport(t *testing.T) {
|
|||
StateCode: commonpb.StateCode_Healthy,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: succStatus(),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
|
||||
return &datapb.WatchChannelsResponse{Status: succStatus()}, nil
|
||||
return &datapb.WatchChannelsResponse{Status: merr.Success()}, nil
|
||||
}
|
||||
dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
|
||||
return &datapb.FlushResponse{Status: succStatus()}, nil
|
||||
return &datapb.FlushResponse{Status: merr.Success()}, nil
|
||||
}
|
||||
|
||||
mockCallImportServiceErr := false
|
||||
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
if mockCallImportServiceErr {
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, errors.New("mock err")
|
||||
}
|
||||
return &datapb.ImportTaskResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
callGetSegmentStates := func(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
return &datapb.GetSegmentStatesResponse{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
callUnsetIsImportingState := func(context.Context, *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
||||
return merr.Status(nil), nil
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
t.Run("not healthy", func(t *testing.T) {
|
||||
|
@ -1551,7 +1551,7 @@ func TestCore_Rbac(t *testing.T) {
|
|||
}
|
||||
|
||||
// not healthy.
|
||||
c.stateCode.Store(commonpb.StateCode_Abnormal)
|
||||
c.UpdateStateCode(commonpb.StateCode_Abnormal)
|
||||
|
||||
{
|
||||
resp, err := c.CreateCredential(ctx, &internalpb.CredentialInfo{})
|
||||
|
@ -1647,7 +1647,7 @@ func TestCore_sendMinDdlTsAsTt(t *testing.T) {
|
|||
withDdlTsLockManager(ddlManager),
|
||||
withScheduler(sched))
|
||||
|
||||
c.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
c.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
c.session.ServerID = TestRootCoordID
|
||||
c.sendMinDdlTsAsTt() // no session.
|
||||
ticker.addSession(&sessionutil.Session{SessionRaw: sessionutil.SessionRaw{ServerID: TestRootCoordID}})
|
||||
|
@ -1721,13 +1721,13 @@ func TestRootcoord_EnableActiveStandby(t *testing.T) {
|
|||
|
||||
err = core.Init()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.StateCode_StandBy, core.stateCode.Load().(commonpb.StateCode))
|
||||
assert.Equal(t, commonpb.StateCode_StandBy, core.GetStateCode())
|
||||
err = core.Start()
|
||||
assert.NoError(t, err)
|
||||
core.session.TriggerKill = false
|
||||
err = core.Register()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.StateCode_Healthy, core.stateCode.Load().(commonpb.StateCode))
|
||||
assert.Equal(t, commonpb.StateCode_Healthy, core.GetStateCode())
|
||||
resp, err := core.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
|
@ -1772,13 +1772,13 @@ func TestRootcoord_DisableActiveStandby(t *testing.T) {
|
|||
|
||||
err = core.Init()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.StateCode_Initializing, core.stateCode.Load().(commonpb.StateCode))
|
||||
assert.Equal(t, commonpb.StateCode_Initializing, core.GetStateCode())
|
||||
err = core.Start()
|
||||
assert.NoError(t, err)
|
||||
core.session.TriggerKill = false
|
||||
err = core.Register()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.StateCode_Healthy, core.stateCode.Load().(commonpb.StateCode))
|
||||
assert.Equal(t, commonpb.StateCode_Healthy, core.GetStateCode())
|
||||
resp, err := core.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
|
@ -2062,8 +2062,7 @@ func TestCore_Stop(t *testing.T) {
|
|||
c := &Core{}
|
||||
err := c.Stop()
|
||||
assert.NoError(t, err)
|
||||
code, ok := c.stateCode.Load().(commonpb.StateCode)
|
||||
assert.True(t, ok)
|
||||
code := c.GetStateCode()
|
||||
assert.Equal(t, commonpb.StateCode_Abnormal, code)
|
||||
})
|
||||
|
||||
|
@ -2073,8 +2072,7 @@ func TestCore_Stop(t *testing.T) {
|
|||
c.ctx, c.cancel = context.WithCancel(context.Background())
|
||||
err := c.Stop()
|
||||
assert.NoError(t, err)
|
||||
code, ok := c.stateCode.Load().(commonpb.StateCode)
|
||||
assert.True(t, ok)
|
||||
code := c.GetStateCode()
|
||||
assert.Equal(t, commonpb.StateCode_Abnormal, code)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
@ -41,14 +42,14 @@ func (t *showCollectionTask) Prepare(ctx context.Context) error {
|
|||
|
||||
// Execute task execution
|
||||
func (t *showCollectionTask) Execute(ctx context.Context) error {
|
||||
t.Rsp.Status = succStatus()
|
||||
t.Rsp.Status = merr.Success()
|
||||
ts := t.Req.GetTimeStamp()
|
||||
if ts == 0 {
|
||||
ts = typeutil.MaxTimestamp
|
||||
}
|
||||
colls, err := t.core.meta.ListCollections(ctx, t.Req.GetDbName(), ts, true)
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error())
|
||||
t.Rsp.Status = merr.Status(err)
|
||||
return err
|
||||
}
|
||||
for _, meta := range colls {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
@ -45,14 +46,14 @@ func (t *showPartitionTask) Prepare(ctx context.Context) error {
|
|||
func (t *showPartitionTask) Execute(ctx context.Context) error {
|
||||
var coll *model.Collection
|
||||
var err error
|
||||
t.Rsp.Status = succStatus()
|
||||
t.Rsp.Status = merr.Success()
|
||||
if t.Req.GetCollectionName() == "" {
|
||||
coll, err = t.core.meta.GetCollectionByID(ctx, t.Req.GetDbName(), t.Req.GetCollectionID(), typeutil.MaxTimestamp, t.allowUnavailable)
|
||||
} else {
|
||||
coll, err = t.core.meta.GetCollectionByName(ctx, t.Req.GetDbName(), t.Req.GetCollectionName(), typeutil.MaxTimestamp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
|
||||
t.Rsp.Status = merr.Status(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,13 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -56,7 +59,9 @@ func Test_showPartitionTask_Prepare(t *testing.T) {
|
|||
|
||||
func Test_showPartitionTask_Execute(t *testing.T) {
|
||||
t.Run("failed to list collections by name", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
metaTable := mockrootcoord.NewIMetaTable(t)
|
||||
metaTable.EXPECT().GetCollectionByName(mock.Anything, mock.Anything, "test coll", mock.Anything).Return(nil, merr.WrapErrCollectionNotFound("test coll"))
|
||||
core := newTestCore(withMeta(metaTable))
|
||||
task := &showPartitionTask{
|
||||
baseTask: newBaseTask(context.Background(), core),
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
|
@ -68,12 +73,14 @@ func Test_showPartitionTask_Execute(t *testing.T) {
|
|||
Rsp: &milvuspb.ShowPartitionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
|
||||
assert.ErrorIs(t, merr.Error(task.Rsp.GetStatus()), merr.ErrCollectionNotFound)
|
||||
})
|
||||
|
||||
t.Run("failed to list collections by id", func(t *testing.T) {
|
||||
core := newTestCore(withInvalidMeta())
|
||||
metaTable := mockrootcoord.NewIMetaTable(t)
|
||||
metaTable.EXPECT().GetCollectionByID(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, merr.WrapErrCollectionNotFound(1))
|
||||
core := newTestCore(withMeta(metaTable))
|
||||
task := &showPartitionTask{
|
||||
baseTask: newBaseTask(context.Background(), core),
|
||||
Req: &milvuspb.ShowPartitionsRequest{
|
||||
|
@ -85,8 +92,8 @@ func Test_showPartitionTask_Execute(t *testing.T) {
|
|||
Rsp: &milvuspb.ShowPartitionsResponse{},
|
||||
}
|
||||
err := task.Execute(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
|
||||
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
|
||||
assert.ErrorIs(t, merr.Error(task.Rsp.GetStatus()), merr.ErrCollectionNotFound)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
|
|
|
@ -24,11 +24,9 @@ import (
|
|||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -53,16 +51,6 @@ func EqualKeyPairArray(p1 []*commonpb.KeyValuePair, p2 []*commonpb.KeyValuePair)
|
|||
return true
|
||||
}
|
||||
|
||||
// GetFieldSchemaByID return field schema by id
|
||||
func GetFieldSchemaByID(coll *model.Collection, fieldID typeutil.UniqueID) (*model.Field, error) {
|
||||
for _, f := range coll.Fields {
|
||||
if f.FieldID == fieldID {
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("field id = %d not found", fieldID)
|
||||
}
|
||||
|
||||
// EncodeMsgPositions serialize []*MsgPosition into string
|
||||
func EncodeMsgPositions(msgPositions []*msgstream.MsgPosition) (string, error) {
|
||||
if len(msgPositions) == 0 {
|
||||
|
@ -106,18 +94,6 @@ func CheckMsgType(got, expect commonpb.MsgType) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: use merr.StatusWithErrorCode or merr.Status instead
|
||||
func failStatus(code commonpb.ErrorCode, reason string) *commonpb.Status {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: code,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
func succStatus() *commonpb.Status {
|
||||
return merr.Status(nil)
|
||||
}
|
||||
|
||||
type TimeTravelRequest interface {
|
||||
GetBase() *commonpb.MsgBase
|
||||
GetTimeStamp() Timestamp
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
|
@ -62,20 +61,6 @@ func Test_EqualKeyPairArray(t *testing.T) {
|
|||
assert.True(t, EqualKeyPairArray(p1, p2))
|
||||
}
|
||||
|
||||
func Test_GetFieldSchemaByID(t *testing.T) {
|
||||
coll := &model.Collection{
|
||||
Fields: []*model.Field{
|
||||
{
|
||||
FieldID: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := GetFieldSchemaByID(coll, 1)
|
||||
assert.NoError(t, err)
|
||||
_, err = GetFieldSchemaByID(coll, 2)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_EncodeMsgPositions(t *testing.T) {
|
||||
mp := &msgstream.MsgPosition{
|
||||
ChannelName: "test",
|
||||
|
|
|
@ -445,7 +445,7 @@ func TestClientBase_Compression(t *testing.T) {
|
|||
State: &milvuspb.ComponentInfo{
|
||||
NodeID: randID,
|
||||
},
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
}, nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -267,7 +267,7 @@ func Test_ImportWrapperRowBased(t *testing.T) {
|
|||
assignSegmentFunc, flushFunc, saveSegmentFunc := createMockCallbackFunctions(t, rowCounter)
|
||||
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -345,7 +345,7 @@ func Test_ImportWrapperColumnBased_numpy(t *testing.T) {
|
|||
assignSegmentFunc, flushFunc, saveSegmentFunc := createMockCallbackFunctions(t, rowCounter)
|
||||
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -498,7 +498,7 @@ func Test_ImportWrapperRowBased_perf(t *testing.T) {
|
|||
schema := perfSchema(dim)
|
||||
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -672,7 +672,7 @@ func Test_ImportWrapperReportFailRowBased(t *testing.T) {
|
|||
|
||||
// success case
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -719,7 +719,7 @@ func Test_ImportWrapperReportFailColumnBased_numpy(t *testing.T) {
|
|||
|
||||
// success case
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -862,7 +862,7 @@ func Test_ImportWrapperDoBinlogImport(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
wrapper.importResult = &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -881,7 +881,7 @@ func Test_ImportWrapperReportPersisted(t *testing.T) {
|
|||
tr := timerecord.NewTimeRecorder("test")
|
||||
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
@ -961,7 +961,7 @@ func Test_ImportWrapperFlushFunc(t *testing.T) {
|
|||
assignSegmentFunc, flushFunc, saveSegmentFunc := createMockCallbackFunctions(t, rowCounter)
|
||||
|
||||
importResult := &rootcoordpb.ImportResult{
|
||||
Status: merr.Status(nil),
|
||||
Status: merr.Success(),
|
||||
TaskId: 1,
|
||||
DatanodeId: 1,
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue