mirror of https://github.com/milvus-io/milvus.git
Signed-off-by: zhuwenxing <wenxing.zhu@zilliz.com> Signed-off-by: zhuwenxing <wenxing.zhu@zilliz.com> Signed-off-by: zhuwenxing <wenxing.zhu@zilliz.com> Co-authored-by: zhuwenxing <wenxing.zhu@zilliz.com>pull/19892/head
parent
a752682de0
commit
108e51b2f0
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
|
@ -161,6 +162,8 @@ func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResp
|
|||
|
||||
// Flush flushes a collection's data
|
||||
func (c *Client) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -205,10 +208,14 @@ func (c *Client) AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentI
|
|||
// req contains the list of segment id to query
|
||||
//
|
||||
// response struct `GetSegmentStatesResponse` contains the list of each state query result
|
||||
// when the segment is not found, the state entry will has the field `Status` to identify failure
|
||||
// otherwise the Segment State and Start position information will be returned
|
||||
//
|
||||
// when the segment is not found, the state entry will has the field `Status` to identify failure
|
||||
// otherwise the Segment State and Start position information will be returned
|
||||
//
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetSegmentStates(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -227,9 +234,13 @@ func (c *Client) GetSegmentStates(ctx context.Context, req *datapb.GetSegmentSta
|
|||
// req contains the segment id to query
|
||||
//
|
||||
// response struct `GetInsertBinlogPathsResponse` contains the fields list
|
||||
// and corresponding binlog path list
|
||||
//
|
||||
// and corresponding binlog path list
|
||||
//
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsertBinlogPathsRequest) (*datapb.GetInsertBinlogPathsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -248,9 +259,13 @@ func (c *Client) GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsert
|
|||
// req contains the collection id to query
|
||||
//
|
||||
// response struct `GetCollectionStatisticsResponse` contains the key-value list fields returning related data
|
||||
// only row count for now
|
||||
//
|
||||
// only row count for now
|
||||
//
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetCollectionStatistics(ctx context.Context, req *datapb.GetCollectionStatisticsRequest) (*datapb.GetCollectionStatisticsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -269,9 +284,13 @@ func (c *Client) GetCollectionStatistics(ctx context.Context, req *datapb.GetCol
|
|||
// req contains the collection and partition id to query
|
||||
//
|
||||
// response struct `GetPartitionStatisticsResponse` contains the key-value list fields returning related data
|
||||
// only row count for now
|
||||
//
|
||||
// only row count for now
|
||||
//
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetPartitionStatistics(ctx context.Context, req *datapb.GetPartitionStatisticsRequest) (*datapb.GetPartitionStatisticsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -307,6 +326,8 @@ func (c *Client) GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringRes
|
|||
// response struct `GetSegmentInfoResponse` contains the list of segment info
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoRequest) (*datapb.GetSegmentInfoResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -320,7 +341,8 @@ func (c *Client) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoR
|
|||
}
|
||||
|
||||
// SaveBinlogPaths updates segments binlogs(including insert binlogs, stats logs and delta logs)
|
||||
// and related message stream positions
|
||||
//
|
||||
// and related message stream positions
|
||||
//
|
||||
// ctx is the context to control request deadline and cancellation
|
||||
// req contains the collection/partition id to query
|
||||
|
@ -329,10 +351,13 @@ func (c *Client) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoR
|
|||
// error is returned only when some communication issue occurs
|
||||
//
|
||||
// there is a constraint that the `SaveBinlogPaths` requests of same segment shall be passed in sequence
|
||||
// the root reason is each `SaveBinlogPaths` will overwrite the checkpoint position
|
||||
// if the constraint is broken, the checkpoint position will not be monotonically increasing and the integrity will be compromised
|
||||
//
|
||||
// the root reason is each `SaveBinlogPaths` will overwrite the checkpoint position
|
||||
// if the constraint is broken, the checkpoint position will not be monotonically increasing and the integrity will be compromised
|
||||
func (c *Client) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPathsRequest) (*commonpb.Status, error) {
|
||||
// use Call here on purpose
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.Call(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -353,6 +378,8 @@ func (c *Client) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
|
|||
// response struct `GetRecoveryInfoResponse` contains the list of segments info and corresponding vchannel info
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInfoRequest) (*datapb.GetRecoveryInfoResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -369,11 +396,14 @@ func (c *Client) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
|
|||
//
|
||||
// ctx is the context to control request deadline and cancellation
|
||||
// req contains the collection/partition id to query
|
||||
// when partition is lesser or equal to 0, all flushed segments of collection will be returned
|
||||
//
|
||||
// when partition is lesser or equal to 0, all flushed segments of collection will be returned
|
||||
//
|
||||
// response struct `GetFlushedSegmentsResponse` contains flushed segment id list
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedSegmentsRequest) (*datapb.GetFlushedSegmentsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -395,6 +425,8 @@ func (c *Client) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedS
|
|||
// response struct `GetSegmentsByStatesResponse` contains segment id list
|
||||
// error is returned only when some communication issue occurs
|
||||
func (c *Client) GetSegmentsByStates(ctx context.Context, req *datapb.GetSegmentsByStatesRequest) (*datapb.GetSegmentsByStatesResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -409,6 +441,8 @@ func (c *Client) GetSegmentsByStates(ctx context.Context, req *datapb.GetSegment
|
|||
|
||||
// ShowConfigurations gets specified configurations para of DataCoord
|
||||
func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -424,6 +458,8 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
|
|||
|
||||
// GetMetrics gets all metrics of datacoord
|
||||
func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -508,6 +544,8 @@ func (c *Client) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateR
|
|||
|
||||
// DropVirtualChannel drops virtual channel in datacoord.
|
||||
func (c *Client) DropVirtualChannel(ctx context.Context, req *datapb.DropVirtualChannelRequest) (*datapb.DropVirtualChannelResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -522,6 +560,8 @@ func (c *Client) DropVirtualChannel(ctx context.Context, req *datapb.DropVirtual
|
|||
|
||||
// SetSegmentState sets the state of a given segment.
|
||||
func (c *Client) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStateRequest) (*datapb.SetSegmentStateResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -536,6 +576,8 @@ func (c *Client) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStat
|
|||
|
||||
// Import data files(json, numpy, etc.) on MinIO/S3 storage, read and parse them into sealed segments
|
||||
func (c *Client) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -550,6 +592,8 @@ func (c *Client) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*da
|
|||
|
||||
// UpdateSegmentStatistics is the client side caller of UpdateSegmentStatistics.
|
||||
func (c *Client) UpdateSegmentStatistics(ctx context.Context, req *datapb.UpdateSegmentStatisticsRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -564,6 +608,8 @@ func (c *Client) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
|
|||
|
||||
// AcquireSegmentLock acquire the reference lock of the segments.
|
||||
func (c *Client) AcquireSegmentLock(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -578,6 +624,8 @@ func (c *Client) AcquireSegmentLock(ctx context.Context, req *datapb.AcquireSegm
|
|||
|
||||
// ReleaseSegmentLock release the reference lock of the segments.
|
||||
func (c *Client) ReleaseSegmentLock(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -592,6 +640,8 @@ func (c *Client) ReleaseSegmentLock(ctx context.Context, req *datapb.ReleaseSegm
|
|||
|
||||
// SaveImportSegment is the DataCoord client side code for SaveImportSegment call.
|
||||
func (c *Client) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSegmentRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -605,6 +655,8 @@ func (c *Client) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSe
|
|||
}
|
||||
|
||||
func (c *Client) UnsetIsImportingState(ctx context.Context, req *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -618,6 +670,8 @@ func (c *Client) UnsetIsImportingState(ctx context.Context, req *datapb.UnsetIsI
|
|||
}
|
||||
|
||||
func (c *Client) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmentsDroppedRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -632,6 +686,8 @@ func (c *Client) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmen
|
|||
|
||||
// BroadcastAlteredCollection is the DataCoord client side code for BroadcastAlteredCollection call.
|
||||
func (c *Client) BroadcastAlteredCollection(ctx context.Context, req *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
|
@ -244,6 +245,8 @@ func (c *Client) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
|
|||
|
||||
// ShowConfigurations gets specified configurations para of IndexCoord
|
||||
func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client indexpb.IndexCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -259,6 +262,8 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
|
|||
|
||||
// GetMetrics gets the metrics info of IndexCoord.
|
||||
func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client indexpb.IndexCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
|
@ -108,7 +109,7 @@ func (c *Client) GetComponentStates(ctx context.Context) (*milvuspb.ComponentSta
|
|||
return ret.(*milvuspb.ComponentStates), err
|
||||
}
|
||||
|
||||
//GetStatisticsChannel return the statistics channel in string
|
||||
// GetStatisticsChannel return the statistics channel in string
|
||||
func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
|
@ -124,6 +125,8 @@ func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResp
|
|||
|
||||
// InvalidateCollectionMetaCache invalidate collection meta cache
|
||||
func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.grpcClient.GetNodeID())
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -137,6 +140,8 @@ func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb
|
|||
}
|
||||
|
||||
func (c *Client) InvalidateCredentialCache(ctx context.Context, req *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.grpcClient.GetNodeID())
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -150,6 +155,8 @@ func (c *Client) InvalidateCredentialCache(ctx context.Context, req *proxypb.Inv
|
|||
}
|
||||
|
||||
func (c *Client) UpdateCredentialCache(ctx context.Context, req *proxypb.UpdateCredCacheRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.grpcClient.GetNodeID())
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -163,6 +170,8 @@ func (c *Client) UpdateCredentialCache(ctx context.Context, req *proxypb.UpdateC
|
|||
}
|
||||
|
||||
func (c *Client) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.grpcClient.GetNodeID())
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -178,6 +187,8 @@ func (c *Client) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.Refres
|
|||
// GetProxyMetrics gets the metrics of proxy, it's an internal interface which is different from GetMetrics interface,
|
||||
// because it only obtains the metrics of Proxy, not including the topological metrics of Query cluster and Data cluster.
|
||||
func (c *Client) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.grpcClient.GetNodeID())
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -192,6 +203,8 @@ func (c *Client) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
|
|||
|
||||
// SetRates notifies Proxy to limit rates of requests.
|
||||
func (c *Client) SetRates(ctx context.Context, req *proxypb.SetRatesRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.grpcClient.GetNodeID())
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
|
@ -157,6 +158,8 @@ func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResp
|
|||
|
||||
// ShowCollections shows the collections in the QueryCoord.
|
||||
func (c *Client) ShowCollections(ctx context.Context, req *querypb.ShowCollectionsRequest) (*querypb.ShowCollectionsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -171,6 +174,8 @@ func (c *Client) ShowCollections(ctx context.Context, req *querypb.ShowCollectio
|
|||
|
||||
// LoadCollection loads the data of the specified collections in the QueryCoord.
|
||||
func (c *Client) LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -185,6 +190,8 @@ func (c *Client) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
|||
|
||||
// ReleaseCollection release the data of the specified collections in the QueryCoord.
|
||||
func (c *Client) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -199,6 +206,8 @@ func (c *Client) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl
|
|||
|
||||
// ShowPartitions shows the partitions in the QueryCoord.
|
||||
func (c *Client) ShowPartitions(ctx context.Context, req *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -213,6 +222,8 @@ func (c *Client) ShowPartitions(ctx context.Context, req *querypb.ShowPartitions
|
|||
|
||||
// LoadPartitions loads the data of the specified partitions in the QueryCoord.
|
||||
func (c *Client) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -227,6 +238,8 @@ func (c *Client) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
|
|||
|
||||
// ReleasePartitions release the data of the specified partitions in the QueryCoord.
|
||||
func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -241,6 +254,8 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
|
|||
|
||||
// GetPartitionStates gets the states of the specified partition.
|
||||
func (c *Client) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -255,6 +270,8 @@ func (c *Client) GetPartitionStates(ctx context.Context, req *querypb.GetPartiti
|
|||
|
||||
// GetSegmentInfo gets the information of the specified segment from QueryCoord.
|
||||
func (c *Client) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -269,6 +286,8 @@ func (c *Client) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfo
|
|||
|
||||
// LoadBalance migrate the sealed segments on the source node to the dst nodes.
|
||||
func (c *Client) LoadBalance(ctx context.Context, req *querypb.LoadBalanceRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -283,6 +302,8 @@ func (c *Client) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques
|
|||
|
||||
// ShowConfigurations gets specified configurations para of QueryCoord
|
||||
func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -298,6 +319,8 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
|
|||
|
||||
// GetMetrics gets the metrics information of QueryCoord.
|
||||
func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -312,6 +335,8 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
|
||||
// GetReplicas gets the replicas of a certain collection.
|
||||
func (c *Client) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasRequest) (*milvuspb.GetReplicasResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -326,6 +351,8 @@ func (c *Client) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReque
|
|||
|
||||
// GetShardLeaders gets the shard leaders of a certain collection.
|
||||
func (c *Client) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeadersRequest) (*querypb.GetShardLeadersResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
|
@ -163,6 +164,8 @@ func (c *Client) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResp
|
|||
|
||||
// CreateCollection create collection
|
||||
func (c *Client) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -177,6 +180,8 @@ func (c *Client) CreateCollection(ctx context.Context, in *milvuspb.CreateCollec
|
|||
|
||||
// DropCollection drop collection
|
||||
func (c *Client) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -191,6 +196,8 @@ func (c *Client) DropCollection(ctx context.Context, in *milvuspb.DropCollection
|
|||
|
||||
// HasCollection check collection existence
|
||||
func (c *Client) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -205,6 +212,8 @@ func (c *Client) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRe
|
|||
|
||||
// DescribeCollection return collection info
|
||||
func (c *Client) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -219,6 +228,8 @@ func (c *Client) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCo
|
|||
|
||||
// ShowCollections list all collection names
|
||||
func (c *Client) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -232,6 +243,8 @@ func (c *Client) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectio
|
|||
}
|
||||
|
||||
func (c *Client) AlterCollection(ctx context.Context, request *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) {
|
||||
request = typeutil.Clone(request)
|
||||
commonpbutil.SetTargetID(request.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -246,6 +259,8 @@ func (c *Client) AlterCollection(ctx context.Context, request *milvuspb.AlterCol
|
|||
|
||||
// CreatePartition create partition
|
||||
func (c *Client) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -260,6 +275,8 @@ func (c *Client) CreatePartition(ctx context.Context, in *milvuspb.CreatePartiti
|
|||
|
||||
// DropPartition drop partition
|
||||
func (c *Client) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -274,6 +291,8 @@ func (c *Client) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRe
|
|||
|
||||
// HasPartition check partition existence
|
||||
func (c *Client) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -288,6 +307,8 @@ func (c *Client) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequ
|
|||
|
||||
// ShowPartitions list all partitions in collection
|
||||
func (c *Client) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -302,6 +323,8 @@ func (c *Client) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitions
|
|||
|
||||
// AllocTimestamp global timestamp allocator
|
||||
func (c *Client) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestampRequest) (*rootcoordpb.AllocTimestampResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -316,6 +339,8 @@ func (c *Client) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimest
|
|||
|
||||
// AllocID global ID allocator
|
||||
func (c *Client) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*rootcoordpb.AllocIDResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -330,6 +355,8 @@ func (c *Client) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*
|
|||
|
||||
// UpdateChannelTimeTick used to handle ChannelTimeTickMsg
|
||||
func (c *Client) UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -344,6 +371,8 @@ func (c *Client) UpdateChannelTimeTick(ctx context.Context, in *internalpb.Chann
|
|||
|
||||
// ShowSegments list all segments
|
||||
func (c *Client) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -358,6 +387,8 @@ func (c *Client) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequ
|
|||
|
||||
// InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies.
|
||||
func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -372,6 +403,8 @@ func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.
|
|||
|
||||
// ShowConfigurations gets specified configurations para of RootCoord
|
||||
func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -387,6 +420,8 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
|
|||
|
||||
// GetMetrics get metrics
|
||||
func (c *Client) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
in = typeutil.Clone(in)
|
||||
commonpbutil.SetTargetID(in.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -401,6 +436,8 @@ func (c *Client) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest)
|
|||
|
||||
// CreateAlias create collection alias
|
||||
func (c *Client) CreateAlias(ctx context.Context, req *milvuspb.CreateAliasRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -415,6 +452,8 @@ func (c *Client) CreateAlias(ctx context.Context, req *milvuspb.CreateAliasReque
|
|||
|
||||
// DropAlias drop collection alias
|
||||
func (c *Client) DropAlias(ctx context.Context, req *milvuspb.DropAliasRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -429,6 +468,8 @@ func (c *Client) DropAlias(ctx context.Context, req *milvuspb.DropAliasRequest)
|
|||
|
||||
// AlterAlias alter collection alias
|
||||
func (c *Client) AlterAlias(ctx context.Context, req *milvuspb.AlterAliasRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -511,6 +552,8 @@ func (c *Client) CreateCredential(ctx context.Context, req *internalpb.Credentia
|
|||
}
|
||||
|
||||
func (c *Client) GetCredential(ctx context.Context, req *rootcoordpb.GetCredentialRequest) (*rootcoordpb.GetCredentialResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -537,6 +580,8 @@ func (c *Client) UpdateCredential(ctx context.Context, req *internalpb.Credentia
|
|||
}
|
||||
|
||||
func (c *Client) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCredentialRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -550,6 +595,8 @@ func (c *Client) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCrede
|
|||
}
|
||||
|
||||
func (c *Client) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUsersRequest) (*milvuspb.ListCredUsersResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -563,6 +610,8 @@ func (c *Client) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUsersR
|
|||
}
|
||||
|
||||
func (c *Client) CreateRole(ctx context.Context, req *milvuspb.CreateRoleRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -576,6 +625,8 @@ func (c *Client) CreateRole(ctx context.Context, req *milvuspb.CreateRoleRequest
|
|||
}
|
||||
|
||||
func (c *Client) DropRole(ctx context.Context, req *milvuspb.DropRoleRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -589,6 +640,8 @@ func (c *Client) DropRole(ctx context.Context, req *milvuspb.DropRoleRequest) (*
|
|||
}
|
||||
|
||||
func (c *Client) OperateUserRole(ctx context.Context, req *milvuspb.OperateUserRoleRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -602,6 +655,8 @@ func (c *Client) OperateUserRole(ctx context.Context, req *milvuspb.OperateUserR
|
|||
}
|
||||
|
||||
func (c *Client) SelectRole(ctx context.Context, req *milvuspb.SelectRoleRequest) (*milvuspb.SelectRoleResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -615,6 +670,8 @@ func (c *Client) SelectRole(ctx context.Context, req *milvuspb.SelectRoleRequest
|
|||
}
|
||||
|
||||
func (c *Client) SelectUser(ctx context.Context, req *milvuspb.SelectUserRequest) (*milvuspb.SelectUserResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -628,6 +685,8 @@ func (c *Client) SelectUser(ctx context.Context, req *milvuspb.SelectUserRequest
|
|||
}
|
||||
|
||||
func (c *Client) OperatePrivilege(ctx context.Context, req *milvuspb.OperatePrivilegeRequest) (*commonpb.Status, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -641,6 +700,8 @@ func (c *Client) OperatePrivilege(ctx context.Context, req *milvuspb.OperatePriv
|
|||
}
|
||||
|
||||
func (c *Client) SelectGrant(ctx context.Context, req *milvuspb.SelectGrantRequest) (*milvuspb.SelectGrantResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -654,6 +715,8 @@ func (c *Client) SelectGrant(ctx context.Context, req *milvuspb.SelectGrantReque
|
|||
}
|
||||
|
||||
func (c *Client) ListPolicy(ctx context.Context, req *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||
req = typeutil.Clone(req)
|
||||
commonpbutil.SetTargetID(req.GetBase(), c.sess.ServerID)
|
||||
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
|
||||
if !funcutil.CheckCtxValid(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -89,7 +90,7 @@ type repackFuncType = func(tsMsgs []msgstream.TsMsg, hashKeys [][]int32) (map[in
|
|||
func getDmlChannelsFunc(ctx context.Context, rc types.RootCoord) getChannelsFuncType {
|
||||
return func(collectionID UniqueID) (channelInfos, error) {
|
||||
req := &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DescribeCollection},
|
||||
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection)),
|
||||
CollectionID: collectionID,
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/crypto"
|
||||
"github.com/milvus-io/milvus/internal/util/errorutil"
|
||||
"github.com/milvus-io/milvus/internal/util/logutil"
|
||||
|
@ -1775,12 +1776,10 @@ func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPar
|
|||
|
||||
func (node *Proxy) getCollectionProgress(ctx context.Context, request *milvuspb.GetLoadingProgressRequest, collectionID int64) (int64, error) {
|
||||
resp, err := node.queryCoord.ShowCollections(ctx, &querypb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: request.Base.MsgID,
|
||||
Timestamp: request.Base.Timestamp,
|
||||
SourceID: request.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
request.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection),
|
||||
),
|
||||
CollectionIDs: []int64{collectionID},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1804,12 +1803,10 @@ func (node *Proxy) getPartitionProgress(ctx context.Context, request *milvuspb.G
|
|||
partitionIDs = append(partitionIDs, partitionID)
|
||||
}
|
||||
resp, err := node.queryCoord.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
MsgID: request.Base.MsgID,
|
||||
Timestamp: request.Base.Timestamp,
|
||||
SourceID: request.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
request.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
|
||||
),
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
})
|
||||
|
@ -1860,12 +1857,12 @@ func (node *Proxy) GetLoadingProgress(ctx context.Context, request *milvuspb.Get
|
|||
if err != nil {
|
||||
return getErrResponse(err), nil
|
||||
}
|
||||
msgBase := &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SystemInfo,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
}
|
||||
msgBase := commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_SystemInfo),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
)
|
||||
if request.Base == nil {
|
||||
request.Base = msgBase
|
||||
} else {
|
||||
|
@ -2472,11 +2469,11 @@ func (node *Proxy) Insert(ctx context.Context, request *milvuspb.InsertRequest)
|
|||
HashValues: request.HashKeys,
|
||||
},
|
||||
InsertRequest: internalpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Insert),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionName: request.CollectionName,
|
||||
PartitionName: request.PartitionName,
|
||||
FieldsData: request.FieldsData,
|
||||
|
@ -2603,10 +2600,10 @@ func (node *Proxy) Delete(ctx context.Context, request *milvuspb.DeleteRequest)
|
|||
HashValues: request.HashKeys,
|
||||
},
|
||||
DeleteRequest: internalpb.DeleteRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Delete),
|
||||
commonpbutil.WithMsgID(0),
|
||||
),
|
||||
DbName: request.DbName,
|
||||
CollectionName: request.CollectionName,
|
||||
PartitionName: request.PartitionName,
|
||||
|
@ -2691,10 +2688,10 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
|
|||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
SearchRequest: &internalpb.SearchRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Search,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Search),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
ReqID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
request: request,
|
||||
|
@ -2937,10 +2934,10 @@ func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (*
|
|||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
RetrieveRequest: &internalpb.RetrieveRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Retrieve,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Retrieve),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
ReqID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
request: request,
|
||||
|
@ -3347,10 +3344,10 @@ func (node *Proxy) CalcDistance(ctx context.Context, request *milvuspb.CalcDista
|
|||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
RetrieveRequest: &internalpb.RetrieveRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Retrieve,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Retrieve),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
ReqID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
request: queryRequest,
|
||||
|
@ -3458,12 +3455,12 @@ func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.G
|
|||
|
||||
// get Segment info
|
||||
infoResp, err := node.dataCoord.GetSegmentInfo(ctx, &datapb.GetSegmentInfoRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SegmentInfo,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_SegmentInfo),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
SegmentIDs: getSegmentsByStatesResponse.Segments,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -3527,12 +3524,12 @@ func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQue
|
|||
return resp, nil
|
||||
}
|
||||
infoResp, err := node.queryCoord.GetSegmentInfo(ctx, &querypb.GetSegmentInfoRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SegmentInfo,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_SegmentInfo),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionID: collID,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -3679,13 +3676,12 @@ func (node *Proxy) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsReque
|
|||
log.Debug("Proxy.GetMetrics",
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
req.Base = &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SystemInfo,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
}
|
||||
|
||||
req.Base = commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_SystemInfo),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
)
|
||||
if metricType == metricsinfo.SystemInfoMetrics {
|
||||
ret, err := node.metricsCacheManager.GetSystemInfoMetrics()
|
||||
if err == nil && ret != nil {
|
||||
|
@ -3761,12 +3757,12 @@ func (node *Proxy) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetrics
|
|||
log.Debug("Proxy.GetProxyMetrics",
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
req.Base = &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_SystemInfo,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
}
|
||||
req.Base = commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_SystemInfo),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
)
|
||||
|
||||
if metricType == metricsinfo.SystemInfoMetrics {
|
||||
proxyMetrics, err := getProxyMetrics(ctx, req, node)
|
||||
|
@ -3827,12 +3823,12 @@ func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceReq
|
|||
return status, nil
|
||||
}
|
||||
infoResp, err := node.queryCoord.LoadBalance(ctx, &querypb.LoadBalanceRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_LoadBalanceSegments),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
SourceNodeIDs: []int64{req.SrcNodeID},
|
||||
DstNodeIDs: req.DstNodeIDs,
|
||||
BalanceReason: querypb.TriggerCondition_GrpcRequest,
|
||||
|
@ -3880,7 +3876,7 @@ func (node *Proxy) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReq
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
//GetCompactionState gets the compaction state of multiple segments
|
||||
// GetCompactionState gets the compaction state of multiple segments
|
||||
func (node *Proxy) GetCompactionState(ctx context.Context, req *milvuspb.GetCompactionStateRequest) (*milvuspb.GetCompactionStateResponse, error) {
|
||||
log.Info("received GetCompactionState request", zap.Int64("compactionID", req.GetCompactionID()))
|
||||
resp := &milvuspb.GetCompactionStateResponse{}
|
||||
|
@ -3953,7 +3949,7 @@ func (node *Proxy) checkHealthyAndReturnCode() (commonpb.StateCode, bool) {
|
|||
return code, code == commonpb.StateCode_Healthy
|
||||
}
|
||||
|
||||
//unhealthyStatus returns the proxy not healthy status
|
||||
// unhealthyStatus returns the proxy not healthy status
|
||||
func unhealthyStatus() *commonpb.Status {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
|
@ -4246,9 +4242,9 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser
|
|||
return &milvuspb.ListCredUsersResponse{Status: unhealthyStatus()}, nil
|
||||
}
|
||||
rootCoordReq := &milvuspb.ListCredUsersRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ListCredUsernames,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ListCredUsernames),
|
||||
),
|
||||
}
|
||||
resp, err := node.rootCoord.ListCredUsers(ctx, rootCoordReq)
|
||||
if err != nil {
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
@ -208,10 +209,10 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
|
|||
if !collInfo.isLoaded {
|
||||
// check if collection was loaded
|
||||
showResp, err := m.queryCoord.ShowCollections(ctx, &querypb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionIDs: []int64{collInfo.collID},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -396,9 +397,9 @@ func (m *MetaCache) GetPartitionInfo(ctx context.Context, collectionName string,
|
|||
// Get the collection information from rootcoord.
|
||||
func (m *MetaCache) describeCollection(ctx context.Context, collectionName string) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
req := &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection),
|
||||
),
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
coll, err := m.rootCoord.DescribeCollection(ctx, req)
|
||||
|
@ -432,9 +433,9 @@ func (m *MetaCache) describeCollection(ctx context.Context, collectionName strin
|
|||
|
||||
func (m *MetaCache) showPartitions(ctx context.Context, collectionName string) (*milvuspb.ShowPartitionsResponse, error) {
|
||||
req := &milvuspb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
|
||||
),
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
|
||||
|
@ -524,9 +525,9 @@ func (m *MetaCache) GetCredentialInfo(ctx context.Context, username string) (*in
|
|||
|
||||
if !ok {
|
||||
req := &rootcoordpb.GetCredentialRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_GetCredential,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_GetCredential),
|
||||
),
|
||||
Username: username,
|
||||
}
|
||||
resp, err := m.rootCoord.GetCredential(ctx, req)
|
||||
|
@ -583,10 +584,10 @@ func (m *MetaCache) GetShards(ctx context.Context, withCache bool, collectionNam
|
|||
zap.String("collectionName", collectionName))
|
||||
}
|
||||
req := &querypb.GetShardLeadersRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_GetShardLeaders,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_GetShardLeaders),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionID: info.collID,
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/internal/util/logutil"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
|
@ -299,12 +300,12 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
|
|||
}
|
||||
|
||||
req := &internalpb.ChannelTimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_TimeTick, // todo
|
||||
MsgID: 0, // todo
|
||||
Timestamp: 0, // todo
|
||||
SourceID: node.session.ServerID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_TimeTick), // todo
|
||||
commonpbutil.WithMsgID(0), // todo
|
||||
commonpbutil.WithTimeStamp(0), // todo
|
||||
commonpbutil.WithSourceID(node.session.ServerID),
|
||||
),
|
||||
ChannelNames: channels,
|
||||
Timestamps: tss,
|
||||
DefaultTimestamp: maxTs,
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -141,7 +141,7 @@ func (cct *createCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (cct *createCollectionTask) OnEnqueue() error {
|
||||
cct.Base = &commonpb.MsgBase{}
|
||||
cct.Base = commonpbutil.NewMsgBase()
|
||||
cct.Base.MsgType = commonpb.MsgType_CreateCollection
|
||||
cct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
|
@ -278,7 +278,7 @@ func (dct *dropCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dct *dropCollectionTask) OnEnqueue() error {
|
||||
dct.Base = &commonpb.MsgBase{}
|
||||
dct.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ func (hct *hasCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (hct *hasCollectionTask) OnEnqueue() error {
|
||||
hct.Base = &commonpb.MsgBase{}
|
||||
hct.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -422,7 +422,7 @@ func (dct *describeCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dct *describeCollectionTask) OnEnqueue() error {
|
||||
dct.Base = &commonpb.MsgBase{}
|
||||
dct.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -540,7 +540,7 @@ func (sct *showCollectionsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (sct *showCollectionsTask) OnEnqueue() error {
|
||||
sct.Base = &commonpb.MsgBase{}
|
||||
sct.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -592,12 +592,10 @@ func (sct *showCollectionsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
resp, err := sct.queryCoord.ShowCollections(ctx, &querypb.ShowCollectionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: sct.Base.MsgID,
|
||||
Timestamp: sct.Base.Timestamp,
|
||||
SourceID: sct.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
sct.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections),
|
||||
),
|
||||
//DbID: sct.ShowCollectionsRequest.DbName,
|
||||
CollectionIDs: collectionIDs,
|
||||
})
|
||||
|
@ -701,7 +699,7 @@ func (act *alterCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (act *alterCollectionTask) OnEnqueue() error {
|
||||
act.Base = &commonpb.MsgBase{}
|
||||
act.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -763,7 +761,7 @@ func (cpt *createPartitionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (cpt *createPartitionTask) OnEnqueue() error {
|
||||
cpt.Base = &commonpb.MsgBase{}
|
||||
cpt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -840,7 +838,7 @@ func (dpt *dropPartitionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dpt *dropPartitionTask) OnEnqueue() error {
|
||||
dpt.Base = &commonpb.MsgBase{}
|
||||
dpt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -917,7 +915,7 @@ func (hpt *hasPartitionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (hpt *hasPartitionTask) OnEnqueue() error {
|
||||
hpt.Base = &commonpb.MsgBase{}
|
||||
hpt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -994,7 +992,7 @@ func (spt *showPartitionsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (spt *showPartitionsTask) OnEnqueue() error {
|
||||
spt.Base = &commonpb.MsgBase{}
|
||||
spt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1056,12 +1054,10 @@ func (spt *showPartitionsTask) Execute(ctx context.Context) error {
|
|||
IDs2Names[partitionID] = partitionName
|
||||
}
|
||||
resp, err := spt.queryCoord.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: spt.Base.MsgID,
|
||||
Timestamp: spt.Base.Timestamp,
|
||||
SourceID: spt.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
spt.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections),
|
||||
),
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
})
|
||||
|
@ -1158,7 +1154,7 @@ func (ft *flushTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (ft *flushTask) OnEnqueue() error {
|
||||
ft.Base = &commonpb.MsgBase{}
|
||||
ft.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1178,12 +1174,10 @@ func (ft *flushTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
flushReq := &datapb.FlushRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Flush,
|
||||
MsgID: ft.Base.MsgID,
|
||||
Timestamp: ft.Base.Timestamp,
|
||||
SourceID: ft.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
ft.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Flush),
|
||||
),
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
}
|
||||
|
@ -1259,7 +1253,7 @@ func (lct *loadCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (lct *loadCollectionTask) OnEnqueue() error {
|
||||
lct.Base = &commonpb.MsgBase{}
|
||||
lct.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1322,12 +1316,10 @@ func (lct *loadCollectionTask) Execute(ctx context.Context) (err error) {
|
|||
return errors.New(errMsg)
|
||||
}
|
||||
request := &querypb.LoadCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadCollection,
|
||||
MsgID: lct.Base.MsgID,
|
||||
Timestamp: lct.Base.Timestamp,
|
||||
SourceID: lct.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
lct.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_LoadCollection),
|
||||
),
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
Schema: collSchema,
|
||||
|
@ -1394,7 +1386,7 @@ func (rct *releaseCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (rct *releaseCollectionTask) OnEnqueue() error {
|
||||
rct.Base = &commonpb.MsgBase{}
|
||||
rct.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1418,12 +1410,10 @@ func (rct *releaseCollectionTask) Execute(ctx context.Context) (err error) {
|
|||
}
|
||||
rct.collectionID = collID
|
||||
request := &querypb.ReleaseCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ReleaseCollection,
|
||||
MsgID: rct.Base.MsgID,
|
||||
Timestamp: rct.Base.Timestamp,
|
||||
SourceID: rct.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
rct.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ReleaseCollection),
|
||||
),
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
}
|
||||
|
@ -1484,7 +1474,7 @@ func (lpt *loadPartitionsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (lpt *loadPartitionsTask) OnEnqueue() error {
|
||||
lpt.Base = &commonpb.MsgBase{}
|
||||
lpt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1547,12 +1537,10 @@ func (lpt *loadPartitionsTask) Execute(ctx context.Context) error {
|
|||
partitionIDs = append(partitionIDs, partitionID)
|
||||
}
|
||||
request := &querypb.LoadPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadPartitions,
|
||||
MsgID: lpt.Base.MsgID,
|
||||
Timestamp: lpt.Base.Timestamp,
|
||||
SourceID: lpt.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
lpt.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_LoadPartitions),
|
||||
),
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
PartitionIDs: partitionIDs,
|
||||
|
@ -1611,7 +1599,7 @@ func (rpt *releasePartitionsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (rpt *releasePartitionsTask) OnEnqueue() error {
|
||||
rpt.Base = &commonpb.MsgBase{}
|
||||
rpt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1643,12 +1631,10 @@ func (rpt *releasePartitionsTask) Execute(ctx context.Context) (err error) {
|
|||
partitionIDs = append(partitionIDs, partitionID)
|
||||
}
|
||||
request := &querypb.ReleasePartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ReleasePartitions,
|
||||
MsgID: rpt.Base.MsgID,
|
||||
Timestamp: rpt.Base.Timestamp,
|
||||
SourceID: rpt.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
rpt.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ReleasePartitions),
|
||||
),
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
PartitionIDs: partitionIDs,
|
||||
|
@ -1713,7 +1699,7 @@ func (c *CreateAliasTask) SetTs(ts Timestamp) {
|
|||
|
||||
// OnEnqueue defines the behavior task enqueued
|
||||
func (c *CreateAliasTask) OnEnqueue() error {
|
||||
c.Base = &commonpb.MsgBase{}
|
||||
c.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1793,7 +1779,7 @@ func (d *DropAliasTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (d *DropAliasTask) OnEnqueue() error {
|
||||
d.Base = &commonpb.MsgBase{}
|
||||
d.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1859,7 +1845,7 @@ func (a *AlterAliasTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (a *AlterAliasTask) OnEnqueue() error {
|
||||
a.Base = &commonpb.MsgBase{}
|
||||
a.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
|
||||
"github.com/milvus-io/milvus/internal/util/indexparams"
|
||||
|
@ -96,7 +97,7 @@ func (cit *createIndexTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (cit *createIndexTask) OnEnqueue() error {
|
||||
cit.req.Base = &commonpb.MsgBase{}
|
||||
cit.req.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -361,7 +362,7 @@ func (dit *describeIndexTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dit *describeIndexTask) OnEnqueue() error {
|
||||
dit.Base = &commonpb.MsgBase{}
|
||||
dit.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -471,7 +472,7 @@ func (dit *dropIndexTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dit *dropIndexTask) OnEnqueue() error {
|
||||
dit.Base = &commonpb.MsgBase{}
|
||||
dit.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -565,7 +566,7 @@ func (gibpt *getIndexBuildProgressTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (gibpt *getIndexBuildProgressTask) OnEnqueue() error {
|
||||
gibpt.Base = &commonpb.MsgBase{}
|
||||
gibpt.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -658,7 +659,7 @@ func (gist *getIndexStateTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (gist *getIndexStateTask) OnEnqueue() error {
|
||||
gist.Base = &commonpb.MsgBase{}
|
||||
gist.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/retry"
|
||||
"github.com/milvus-io/milvus/internal/util/timerecord"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
|
@ -308,12 +309,12 @@ func (it *insertTask) assignSegmentID(channelNames []string) (*msgstream.MsgPack
|
|||
// create empty insert message
|
||||
createInsertMsg := func(segmentID UniqueID, channelName string, msgID int64) *msgstream.InsertMsg {
|
||||
insertReq := internalpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: msgID,
|
||||
Timestamp: it.BeginTimestamp, // entity's timestamp was set to equal it.BeginTimestamp in preExecute()
|
||||
SourceID: it.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Insert),
|
||||
commonpbutil.WithMsgID(msgID),
|
||||
commonpbutil.WithTimeStamp(it.BeginTimestamp), // entity's timestamp was set to equal it.BeginTimestamp in preExecute()
|
||||
commonpbutil.WithSourceID(it.Base.SourceID),
|
||||
),
|
||||
CollectionID: it.CollectionID,
|
||||
PartitionID: it.PartitionID,
|
||||
CollectionName: it.CollectionName,
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/distance"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
|
@ -560,10 +561,10 @@ func checkIfLoaded(ctx context.Context, qc types.QueryCoord, collectionName stri
|
|||
|
||||
// If request to search partitions
|
||||
resp, err := qc.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionID: info.collID,
|
||||
PartitionIDs: searchPartitionIDs,
|
||||
})
|
||||
|
@ -841,7 +842,7 @@ func (t *searchTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (t *searchTask) OnEnqueue() error {
|
||||
t.Base = &commonpb.MsgBase{}
|
||||
t.Base = commonpbutil.NewMsgBase()
|
||||
t.Base.MsgType = commonpb.MsgType_Search
|
||||
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/grpcclient"
|
||||
"github.com/milvus-io/milvus/internal/util/timerecord"
|
||||
|
@ -88,7 +89,7 @@ func (g *getStatisticsTask) SetTs(ts Timestamp) {
|
|||
|
||||
func (g *getStatisticsTask) OnEnqueue() error {
|
||||
g.GetStatisticsRequest = &internalpb.GetStatisticsRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
Base: commonpbutil.NewMsgBase(),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -231,12 +232,10 @@ func (g *getStatisticsTask) getStatisticsFromDataCoord(ctx context.Context) erro
|
|||
partIDs := g.unloadedPartitionIDs
|
||||
|
||||
req := &datapb.GetPartitionStatisticsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_GetPartitionStatistics,
|
||||
MsgID: g.Base.MsgID,
|
||||
Timestamp: g.Base.Timestamp,
|
||||
SourceID: g.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
g.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_GetPartitionStatistics),
|
||||
),
|
||||
CollectionID: collID,
|
||||
PartitionIDs: partIDs,
|
||||
}
|
||||
|
@ -326,10 +325,10 @@ func checkFullLoaded(ctx context.Context, qc types.QueryCoord, collectionName st
|
|||
// If request to search partitions
|
||||
if len(searchPartitionIDs) > 0 {
|
||||
resp, err := qc.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionID: info.collID,
|
||||
PartitionIDs: searchPartitionIDs,
|
||||
})
|
||||
|
@ -351,10 +350,10 @@ func checkFullLoaded(ctx context.Context, qc types.QueryCoord, collectionName st
|
|||
|
||||
// If request to search collection
|
||||
resp, err := qc.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
|
||||
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
|
||||
),
|
||||
CollectionID: info.collID,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -628,7 +627,7 @@ func (g *getCollectionStatisticsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (g *getCollectionStatisticsTask) OnEnqueue() error {
|
||||
g.Base = &commonpb.MsgBase{}
|
||||
g.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -645,12 +644,10 @@ func (g *getCollectionStatisticsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
g.collectionID = collID
|
||||
req := &datapb.GetCollectionStatisticsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_GetCollectionStatistics,
|
||||
MsgID: g.Base.MsgID,
|
||||
Timestamp: g.Base.Timestamp,
|
||||
SourceID: g.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
g.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_GetCollectionStatistics),
|
||||
),
|
||||
CollectionID: collID,
|
||||
}
|
||||
|
||||
|
@ -718,7 +715,7 @@ func (g *getPartitionStatisticsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (g *getPartitionStatisticsTask) OnEnqueue() error {
|
||||
g.Base = &commonpb.MsgBase{}
|
||||
g.Base = commonpbutil.NewMsgBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -739,12 +736,10 @@ func (g *getPartitionStatisticsTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
req := &datapb.GetPartitionStatisticsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_GetPartitionStatistics,
|
||||
MsgID: g.Base.MsgID,
|
||||
Timestamp: g.Base.Timestamp,
|
||||
SourceID: g.Base.SourceID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBaseCopy(
|
||||
g.Base,
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_GetCollectionStatistics),
|
||||
),
|
||||
CollectionID: collID,
|
||||
PartitionIDs: []int64{partitionID},
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/timerecord"
|
||||
)
|
||||
|
||||
|
@ -49,12 +50,12 @@ func (ta *timestampAllocator) alloc(count uint32) ([]Timestamp, error) {
|
|||
tr := timerecord.NewTimeRecorder("applyTimestamp")
|
||||
ctx, cancel := context.WithTimeout(ta.ctx, 5*time.Second)
|
||||
req := &rootcoordpb.AllocTimestampRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_RequestTSO,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: ta.peerID,
|
||||
},
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_RequestTSO),
|
||||
commonpbutil.WithMsgID(0),
|
||||
commonpbutil.WithTimeStamp(0),
|
||||
commonpbutil.WithSourceID(ta.peerID),
|
||||
),
|
||||
Count: count,
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package commonpbutil
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
)
|
||||
|
||||
const MsgIDNeedFill int64 = -2
|
||||
|
||||
var Params paramtable.ComponentParam
|
||||
|
||||
type MsgBaseOptions func(*commonpb.MsgBase)
|
||||
|
||||
func WithMsgType(msgType commonpb.MsgType) MsgBaseOptions {
|
||||
return func(msgBase *commonpb.MsgBase) {
|
||||
msgBase.MsgType = msgType
|
||||
}
|
||||
}
|
||||
|
||||
func WithMsgID(msgID int64) MsgBaseOptions {
|
||||
return func(msgBase *commonpb.MsgBase) {
|
||||
msgBase.MsgID = msgID
|
||||
}
|
||||
}
|
||||
|
||||
func WithTimeStamp(ts uint64) MsgBaseOptions {
|
||||
return func(msgBase *commonpb.MsgBase) {
|
||||
msgBase.Timestamp = ts
|
||||
}
|
||||
}
|
||||
|
||||
func WithSourceID(sourceID int64) MsgBaseOptions {
|
||||
return func(msgBase *commonpb.MsgBase) {
|
||||
msgBase.SourceID = sourceID
|
||||
}
|
||||
}
|
||||
|
||||
func GetNowTimestamp() uint64 {
|
||||
return uint64(time.Now().Unix())
|
||||
}
|
||||
|
||||
func SetTargetID(msgBase *commonpb.MsgBase, targetID int64) *commonpb.MsgBase {
|
||||
if msgBase == nil {
|
||||
return nil
|
||||
}
|
||||
msgBase.TargetID = targetID
|
||||
return msgBase
|
||||
}
|
||||
|
||||
func NewMsgBaseDefault() *commonpb.MsgBase {
|
||||
return &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Undefined,
|
||||
MsgID: MsgIDNeedFill,
|
||||
Timestamp: GetNowTimestamp(),
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
}
|
||||
}
|
||||
|
||||
func NewMsgBase(options ...MsgBaseOptions) *commonpb.MsgBase {
|
||||
msgBase := NewMsgBaseDefault()
|
||||
for _, op := range options {
|
||||
op(msgBase)
|
||||
}
|
||||
return msgBase
|
||||
}
|
||||
|
||||
func NewMsgBaseCopy(msgBase *commonpb.MsgBase, options ...MsgBaseOptions) *commonpb.MsgBase {
|
||||
msgBaseRt := msgBase
|
||||
for _, op := range options {
|
||||
op(msgBaseRt)
|
||||
}
|
||||
return msgBaseRt
|
||||
|
||||
}
|
|
@ -49,6 +49,8 @@ type GrpcClient[T any] interface {
|
|||
ReCall(ctx context.Context, caller func(client T) (any, error)) (any, error)
|
||||
Call(ctx context.Context, caller func(client T) (any, error)) (any, error)
|
||||
Close() error
|
||||
SetNodeID(int64)
|
||||
GetNodeID() int64
|
||||
}
|
||||
|
||||
// ClientBase is a base of grpc client
|
||||
|
@ -73,6 +75,7 @@ type ClientBase[T any] struct {
|
|||
InitialBackoff float32
|
||||
MaxBackoff float32
|
||||
BackoffMultiplier float32
|
||||
NodeID int64
|
||||
}
|
||||
|
||||
// SetRole sets role of client
|
||||
|
@ -314,3 +317,13 @@ func (c *ClientBase[T]) Close() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNodeID set ID role of client
|
||||
func (c *ClientBase[T]) SetNodeID(nodeID int64) {
|
||||
c.NodeID = nodeID
|
||||
}
|
||||
|
||||
// GetNodeID returns ID of client
|
||||
func (c *ClientBase[T]) GetNodeID() int64 {
|
||||
return c.NodeID
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ type GRPCClientBase[T any] struct {
|
|||
grpcClientMtx sync.RWMutex
|
||||
GetGrpcClientErr error
|
||||
role string
|
||||
nodeID int64
|
||||
}
|
||||
|
||||
func (c *GRPCClientBase[T]) SetGetAddrFunc(f func() (string, error)) {
|
||||
|
@ -154,3 +155,11 @@ func (c *GRPCClientBase[T]) Close() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *GRPCClientBase[T]) GetNodeID() int64 {
|
||||
return c.nodeID
|
||||
}
|
||||
|
||||
func (c *GRPCClientBase[T]) SetNodeID(nodeID int64) {
|
||||
c.nodeID = nodeID
|
||||
}
|
||||
|
|
|
@ -22,9 +22,15 @@ import (
|
|||
"math"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
)
|
||||
|
||||
// Generic Clone for proto message
|
||||
func Clone[T proto.Message](msg T) T {
|
||||
return proto.Clone(msg).(T)
|
||||
}
|
||||
|
||||
// Float32ToBytes converts a float to byte slice.
|
||||
func Float32ToBytes(float float32) []byte {
|
||||
bits := math.Float32bits(float)
|
||||
|
|
Loading…
Reference in New Issue