mirror of https://github.com/milvus-io/milvus.git
Return newly defined merr instead of grpc unimplemented err (#27751)
Signed-off-by: bigsheeper <yihao.dai@zilliz.com>pull/27766/head
parent
2815c84639
commit
49b3a12804
|
@ -339,7 +339,7 @@ func TestFlush(t *testing.T) {
|
|||
|
||||
datanodeClient := mocks.NewMockDataNodeClient(t)
|
||||
datanodeClient.EXPECT().FlushChannels(mock.Anything, mock.Anything).Return(nil,
|
||||
grpcStatus.Error(codes.Unimplemented, "mock grpc unimplemented error"))
|
||||
merr.WrapErrServiceUnimplemented(grpcStatus.Error(codes.Unimplemented, "mock grpc unimplemented error")))
|
||||
|
||||
sm.sessions = struct {
|
||||
sync.RWMutex
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"go.opentelemetry.io/otel"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
|
@ -40,7 +39,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
|
@ -126,7 +124,7 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
|
|||
return channel.Name
|
||||
})
|
||||
err = s.cluster.FlushChannels(ctx, nodeID, ts, channelNames)
|
||||
if err != nil && funcutil.IsGrpcErr(err, codes.Unimplemented) {
|
||||
if err != nil && errors.Is(err, merr.ErrServiceUnimplemented) {
|
||||
isUnimplemented = true
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@ import (
|
|||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/retry"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
@ -192,7 +192,7 @@ func (mgr *TargetManager) PullNextTargetV2(broker Broker, collectionID int64, ch
|
|||
vChannelInfos, segmentInfos, err := broker.GetRecoveryInfoV2(context.TODO(), collectionID)
|
||||
if err != nil {
|
||||
// if meet rpc error, for compatibility with previous versions, try pull next target v1
|
||||
if funcutil.IsGrpcErr(err, codes.Unimplemented) {
|
||||
if errors.Is(err, merr.ErrServiceUnimplemented) {
|
||||
segments, dmChannels, err = mgr.PullNextTargetV1(broker, collectionID, chosenPartitionIDs...)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||
"github.com/milvus-io/milvus/pkg/util/etcd"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
@ -238,7 +239,8 @@ func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
|||
|
||||
suite.broker.ExpectedCalls = nil
|
||||
// test getRecoveryInfoV2 failed , then back to getRecoveryInfo succeed
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nil, nil, status.Errorf(codes.Unimplemented, "fake not found"))
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(
|
||||
nil, nil, merr.WrapErrServiceUnimplemented(status.Errorf(codes.Unimplemented, "fake not found")))
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{1}, nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collectionID, int64(1)).Return(nextTargetChannels, nextTargetBinlogs, nil)
|
||||
err := suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
|
|
|
@ -359,7 +359,7 @@ func (c *ClientBase[T]) needResetCancel() (needReset bool) {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *ClientBase[T]) checkErr(ctx context.Context, err error) (needRetry, needReset bool) {
|
||||
func (c *ClientBase[T]) checkErr(ctx context.Context, err error) (needRetry, needReset bool, retErr error) {
|
||||
log := log.Ctx(ctx).With(zap.String("clientRole", c.GetRole()))
|
||||
switch {
|
||||
case funcutil.IsGrpcErr(err):
|
||||
|
@ -367,21 +367,21 @@ func (c *ClientBase[T]) checkErr(ctx context.Context, err error) (needRetry, nee
|
|||
log.Warn("call received grpc error", zap.Error(err))
|
||||
if funcutil.IsGrpcErr(err, codes.Canceled, codes.DeadlineExceeded) {
|
||||
// canceled or deadline exceeded
|
||||
return true, c.needResetCancel()
|
||||
return true, c.needResetCancel(), err
|
||||
}
|
||||
|
||||
if funcutil.IsGrpcErr(err, codes.Unimplemented) {
|
||||
return false, false
|
||||
return false, false, merr.WrapErrServiceUnimplemented(err)
|
||||
}
|
||||
return true, true
|
||||
return true, true, err
|
||||
case IsServerIDMismatchErr(err):
|
||||
fallthrough
|
||||
case IsCrossClusterRoutingErr(err):
|
||||
return true, true
|
||||
return true, true, err
|
||||
default:
|
||||
log.Warn("fail to grpc call because of unknown error", zap.Error(err))
|
||||
// Unknown err
|
||||
return false, false
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,7 +418,8 @@ func (c *ClientBase[T]) call(ctx context.Context, caller func(client T) (any, er
|
|||
var err error
|
||||
ret, err = caller(client)
|
||||
if err != nil {
|
||||
needRetry, needReset := c.checkErr(ctx, err)
|
||||
var needRetry, needReset bool
|
||||
needRetry, needReset, err = c.checkErr(ctx, err)
|
||||
if !needRetry {
|
||||
// stop retry
|
||||
err = retry.Unrecoverable(err)
|
||||
|
|
|
@ -315,11 +315,11 @@ func TestClientBase_CheckError(t *testing.T) {
|
|||
base.MaxAttempts = 1
|
||||
|
||||
ctx := context.Background()
|
||||
retry, reset := base.checkErr(ctx, status.Errorf(codes.Canceled, "fake context canceled"))
|
||||
retry, reset, _ := base.checkErr(ctx, status.Errorf(codes.Canceled, "fake context canceled"))
|
||||
assert.True(t, retry)
|
||||
assert.True(t, reset)
|
||||
|
||||
retry, reset = base.checkErr(ctx, status.Errorf(codes.Unimplemented, "fake context canceled"))
|
||||
retry, reset, _ = base.checkErr(ctx, status.Errorf(codes.Unimplemented, "fake context canceled"))
|
||||
assert.False(t, retry)
|
||||
assert.False(t, reset)
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ var (
|
|||
ErrServiceDiskLimitExceeded = newMilvusError("disk limit exceeded", 7, false)
|
||||
ErrServiceRateLimit = newMilvusError("rate limit exceeded", 8, true)
|
||||
ErrServiceForceDeny = newMilvusError("force deny", 9, false)
|
||||
ErrServiceUnimplemented = newMilvusError("service unimplemented", 10, false)
|
||||
|
||||
// Collection related
|
||||
ErrCollectionNotFound = newMilvusError("collection not found", 100, false)
|
||||
|
|
|
@ -78,6 +78,7 @@ func (s *ErrSuite) TestWrap() {
|
|||
s.ErrorIs(WrapErrServiceCrossClusterRouting("ins-0", "ins-1"), ErrServiceCrossClusterRouting)
|
||||
s.ErrorIs(WrapErrServiceDiskLimitExceeded(110, 100, "DLE"), ErrServiceDiskLimitExceeded)
|
||||
s.ErrorIs(WrapErrNodeNotMatch(0, 1, "SIM"), ErrNodeNotMatch)
|
||||
s.ErrorIs(WrapErrServiceUnimplemented(errors.New("mock grpc err")), ErrServiceUnimplemented)
|
||||
|
||||
// Collection related
|
||||
s.ErrorIs(WrapErrCollectionNotFound("test_collection", "failed to get collection"), ErrCollectionNotFound)
|
||||
|
|
|
@ -338,6 +338,11 @@ func WrapErrServiceForceDeny(op string, reason error, method string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func WrapErrServiceUnimplemented(grpcErr error) error {
|
||||
err := errors.Wrapf(ErrServiceUnimplemented, "err: %s", grpcErr.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// database related
|
||||
func WrapErrDatabaseNotFound(database any, msg ...string) error {
|
||||
err := wrapWithField(ErrDatabaseNotFound, "database", database)
|
||||
|
|
Loading…
Reference in New Issue