mirror of https://github.com/milvus-io/milvus.git
Fix Data race in NodeID (#16603)
Signed-off-by: xiaofan-luan <xiaofan.luan@zilliz.com>pull/16571/head
parent
0667b50bb4
commit
89b4a34892
|
@ -53,7 +53,7 @@ func (alloc *rootCoordAllocator) allocTimestamp(ctx context.Context) (Timestamp,
|
|||
MsgType: commonpb.MsgType_RequestTSO,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.DataCoordCfg.NodeID,
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
},
|
||||
Count: 1,
|
||||
})
|
||||
|
@ -70,7 +70,7 @@ func (alloc *rootCoordAllocator) allocID(ctx context.Context) (UniqueID, error)
|
|||
MsgType: commonpb.MsgType_RequestID,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.DataCoordCfg.NodeID,
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
},
|
||||
Count: 1,
|
||||
})
|
||||
|
|
|
@ -125,7 +125,7 @@ func (c *Cluster) Flush(ctx context.Context, segments []*datapb.SegmentInfo, mar
|
|||
req := &datapb.FlushSegmentsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Flush,
|
||||
SourceID: Params.DataCoordCfg.NodeID,
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
SegmentIDs: segments,
|
||||
|
|
|
@ -59,7 +59,7 @@ func (s *Server) getSystemInfoMetrics(
|
|||
coordTopology := metricsinfo.DataCoordTopology{
|
||||
Cluster: clusterTopology,
|
||||
Connections: metricsinfo.ConnTopology{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.NodeID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
|
||||
// TODO(dragondriver): fill ConnectedComponents if necessary
|
||||
ConnectedComponents: []metricsinfo.ConnectionInfo{},
|
||||
},
|
||||
|
@ -70,7 +70,7 @@ func (s *Server) getSystemInfoMetrics(
|
|||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
},
|
||||
Response: "",
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.NodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
|
||||
}
|
||||
var err error
|
||||
resp.Response, err = metricsinfo.MarshalTopology(coordTopology)
|
||||
|
@ -87,7 +87,7 @@ func (s *Server) getSystemInfoMetrics(
|
|||
func (s *Server) getDataCoordMetrics() metricsinfo.DataCoordInfos {
|
||||
ret := metricsinfo.DataCoordInfos{
|
||||
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.NodeID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
|
||||
HardwareInfos: metricsinfo.HardwareMetrics{
|
||||
IP: s.session.Address,
|
||||
CPUCoreCount: metricsinfo.GetCPUCoreCount(false),
|
||||
|
|
|
@ -241,8 +241,8 @@ func (s *Server) initSession() error {
|
|||
return errors.New("failed to initialize session")
|
||||
}
|
||||
s.session.Init(typeutil.DataCoordRole, Params.DataCoordCfg.Address, true, true)
|
||||
Params.DataCoordCfg.NodeID = s.session.ServerID
|
||||
Params.SetLogger(Params.DataCoordCfg.NodeID)
|
||||
Params.DataCoordCfg.SetNodeID(s.session.ServerID)
|
||||
Params.SetLogger(Params.DataCoordCfg.GetNodeID())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -814,7 +814,7 @@ func (s *Server) loadCollectionFromRootCoord(ctx context.Context, collectionID i
|
|||
resp, err := s.rootCoordClient.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
SourceID: Params.DataCoordCfg.NodeID,
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
},
|
||||
DbName: "",
|
||||
CollectionID: collectionID,
|
||||
|
@ -827,7 +827,7 @@ func (s *Server) loadCollectionFromRootCoord(ctx context.Context, collectionID i
|
|||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.DataCoordCfg.NodeID,
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
},
|
||||
DbName: "",
|
||||
CollectionName: resp.Schema.Name,
|
||||
|
|
|
@ -1809,7 +1809,7 @@ func TestGetCompactionState(t *testing.T) {
|
|||
resp, err := svr.GetCompactionState(context.Background(), &milvuspb.GetCompactionStateRequest{})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID), resp.GetStatus().GetReason())
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.GetStatus().GetReason())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1853,7 +1853,7 @@ func TestCompleteCompaction(t *testing.T) {
|
|||
resp, err := svr.CompleteCompaction(context.Background(), &datapb.CompactionResult{})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetErrorCode())
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID), resp.GetReason())
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.GetReason())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1914,7 +1914,7 @@ func TestManualCompaction(t *testing.T) {
|
|||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID), resp.Status.Reason)
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.Status.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1964,7 +1964,7 @@ func TestGetCompactionStateWithPlans(t *testing.T) {
|
|||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID), resp.Status.Reason)
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.Status.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -2443,7 +2443,7 @@ func TestImport(t *testing.T) {
|
|||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.GetErrorCode())
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID), resp.Status.GetReason())
|
||||
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.Status.GetReason())
|
||||
})
|
||||
|
||||
t.Run("test update segment stat", func(t *testing.T) {
|
||||
|
|
|
@ -605,7 +605,7 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
|
|||
dresp, err := s.rootCoordClient.DescribeCollection(s.ctx, &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
SourceID: Params.DataCoordCfg.NodeID,
|
||||
SourceID: Params.DataCoordCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
})
|
||||
|
@ -676,19 +676,19 @@ func (s *Server) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedS
|
|||
// it may include SystemMetrics, Topology metrics, etc.
|
||||
func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
log.Debug("received get metrics request",
|
||||
zap.Int64("nodeID", Params.DataCoordCfg.NodeID),
|
||||
zap.Int64("nodeID", Params.DataCoordCfg.GetNodeID()),
|
||||
zap.String("request", req.Request))
|
||||
|
||||
if s.isClosed() {
|
||||
log.Warn("DataCoord.GetMetrics failed",
|
||||
zap.Int64("node_id", Params.DataCoordCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)))
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID),
|
||||
Reason: msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()),
|
||||
},
|
||||
Response: "",
|
||||
}, nil
|
||||
|
@ -697,7 +697,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
metricType, err := metricsinfo.ParseMetricType(req.Request)
|
||||
if err != nil {
|
||||
log.Warn("DataCoord.GetMetrics failed to parse metric type",
|
||||
zap.Int64("node_id", Params.DataCoordCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(err))
|
||||
|
||||
|
@ -724,7 +724,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
metrics, err := s.getSystemInfoMetrics(ctx, req)
|
||||
|
||||
log.Debug("DataCoord.GetMetrics",
|
||||
zap.Int64("node_id", Params.DataCoordCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType),
|
||||
zap.Any("metrics", metrics), // TODO(dragondriver): necessary? may be very large
|
||||
|
@ -736,7 +736,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
}
|
||||
|
||||
log.RatedWarn(60.0, "DataCoord.GetMetrics failed, request metric type is not implemented yet",
|
||||
zap.Int64("node_id", Params.DataCoordCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
|
@ -759,9 +759,9 @@ func (s *Server) CompleteCompaction(ctx context.Context, req *datapb.CompactionR
|
|||
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to complete compaction", zap.Int64("planID", req.PlanID),
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)))
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
|
||||
|
||||
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -793,8 +793,8 @@ func (s *Server) ManualCompaction(ctx context.Context, req *milvuspb.ManualCompa
|
|||
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to execute manual compaction", zap.Int64("collectionID", req.GetCollectionID()),
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -834,8 +834,8 @@ func (s *Server) GetCompactionState(ctx context.Context, req *milvuspb.GetCompac
|
|||
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to get compaction state", zap.Int64("compactionID", req.GetCompactionID()),
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -866,8 +866,8 @@ func (s *Server) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.
|
|||
}
|
||||
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to get compaction state with plans", zap.Int64("compactionID", req.GetCompactionID()), zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
log.Warn("failed to get compaction state with plans", zap.Int64("compactionID", req.GetCompactionID()), zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -937,8 +937,8 @@ func (s *Server) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
|
|||
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to watch channels request", zap.Any("channels", req.GetChannelNames()),
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
for _, channelName := range req.GetChannelNames() {
|
||||
|
@ -966,7 +966,7 @@ func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateR
|
|||
if s.isClosed() {
|
||||
log.Warn("failed to get flush state because of closed server",
|
||||
zap.Int64s("segmentIDs", req.GetSegmentIDs()), zap.Int("len", len(req.GetSegmentIDs())))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ func (s *Server) Import(ctx context.Context, itr *datapb.ImportTaskRequest) (*da
|
|||
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to import because of closed server", zap.Any("import task request", itr))
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@ -1044,7 +1044,7 @@ func (s *Server) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
|
|||
}
|
||||
if s.isClosed() {
|
||||
log.Warn("failed to update segment stat for closed server")
|
||||
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.NodeID)
|
||||
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
|
||||
return resp, nil
|
||||
}
|
||||
s.updateSegmentStatistics(req.GetStats())
|
||||
|
|
|
@ -54,7 +54,7 @@ func (alloc *allocator) allocID() (UniqueID, error) {
|
|||
MsgType: commonpb.MsgType_RequestID,
|
||||
MsgID: 1, // GOOSE TODO
|
||||
Timestamp: 0, // GOOSE TODO
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
Count: 1,
|
||||
})
|
||||
|
@ -76,7 +76,7 @@ func (alloc *allocator) allocIDBatch(count uint32) (UniqueID, uint32, error) {
|
|||
resp, err := alloc.rootCoord.AllocID(ctx, &rootcoordpb.AllocIDRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_RequestID,
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
Count: count,
|
||||
})
|
||||
|
|
|
@ -561,7 +561,7 @@ func (t *compactionTask) compact() error {
|
|||
)
|
||||
|
||||
log.Info("overall elapse in ms", zap.Int64("planID", t.plan.GetPlanID()), zap.Any("elapse", nano2Milli(time.Since(compactStart))))
|
||||
metrics.DataNodeCompactionLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Observe(float64(t.tr.ElapseSpan().Milliseconds()))
|
||||
metrics.DataNodeCompactionLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Observe(float64(t.tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -174,11 +174,6 @@ func (node *DataNode) SetDataCoord(ds types.DataCoord) error {
|
|||
}
|
||||
}
|
||||
|
||||
// SetNodeID set node id for DataNode
|
||||
func (node *DataNode) SetNodeID(id UniqueID) {
|
||||
node.NodeID = id
|
||||
}
|
||||
|
||||
// Register register datanode to etcd
|
||||
func (node *DataNode) Register() error {
|
||||
node.session.Register()
|
||||
|
@ -206,9 +201,8 @@ func (node *DataNode) initSession() error {
|
|||
return errors.New("failed to initialize session")
|
||||
}
|
||||
node.session.Init(typeutil.DataNodeRole, Params.DataNodeCfg.IP+":"+strconv.Itoa(Params.DataNodeCfg.Port), false, true)
|
||||
Params.DataNodeCfg.NodeID = node.session.ServerID
|
||||
node.NodeID = node.session.ServerID
|
||||
Params.SetLogger(Params.DataNodeCfg.NodeID)
|
||||
Params.DataNodeCfg.SetNodeID(node.session.ServerID)
|
||||
Params.SetLogger(Params.DataNodeCfg.GetNodeID())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -233,7 +227,7 @@ func (node *DataNode) Init() error {
|
|||
func (node *DataNode) StartWatchChannels(ctx context.Context) {
|
||||
defer logutil.LogPanic()
|
||||
// REF MEP#7 watch path should be [prefix]/channel/{node_id}/{channel_name}
|
||||
watchPrefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", node.NodeID))
|
||||
watchPrefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", Params.DataNodeCfg.GetNodeID()))
|
||||
evtChan := node.watchKv.WatchWithPrefix(watchPrefix)
|
||||
// after watch, first check all exists nodes first
|
||||
err := node.checkWatchedList()
|
||||
|
@ -273,7 +267,7 @@ func (node *DataNode) StartWatchChannels(ctx context.Context) {
|
|||
// serves the corner case for etcd connection lost and missing some events
|
||||
func (node *DataNode) checkWatchedList() error {
|
||||
// REF MEP#7 watch path should be [prefix]/channel/{node_id}/{channel_name}
|
||||
prefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", node.NodeID))
|
||||
prefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", Params.DataNodeCfg.GetNodeID()))
|
||||
keys, values, err := node.watchKv.LoadWithPrefix(prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -387,7 +381,7 @@ func (node *DataNode) handlePutEvent(watchInfo *datapb.ChannelWatchInfo, version
|
|||
return fmt.Errorf("fail to marshal watchInfo with state, vChanName: %s, state: %s ,err: %w", vChanName, watchInfo.State.String(), err)
|
||||
}
|
||||
|
||||
k := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", node.NodeID), vChanName)
|
||||
k := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", Params.DataNodeCfg.GetNodeID()), vChanName)
|
||||
|
||||
log.Debug("handle put event: try to save result state", zap.String("key", k), zap.String("state", watchInfo.State.String()))
|
||||
err = node.watchKv.CompareVersionAndSwap(k, version, string(v))
|
||||
|
@ -442,7 +436,7 @@ func (node *DataNode) Start() error {
|
|||
MsgType: commonpb.MsgType_RequestTSO,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: node.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
Count: 1,
|
||||
})
|
||||
|
@ -549,7 +543,7 @@ func (node *DataNode) ReadyToFlush() error {
|
|||
// One precondition: The segmentID in req is in ascending order.
|
||||
func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmentsRequest) (*commonpb.Status, error) {
|
||||
metrics.DataNodeFlushSegmentsReqCounter.WithLabelValues(
|
||||
fmt.Sprint(Params.DataNodeCfg.NodeID),
|
||||
fmt.Sprint(Params.DataNodeCfg.GetNodeID()),
|
||||
MetricRequestsTotal).Inc()
|
||||
|
||||
status := &commonpb.Status{
|
||||
|
@ -612,7 +606,7 @@ func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmen
|
|||
|
||||
status.ErrorCode = commonpb.ErrorCode_Success
|
||||
metrics.DataNodeFlushSegmentsReqCounter.WithLabelValues(
|
||||
fmt.Sprint(Params.DataNodeCfg.NodeID),
|
||||
fmt.Sprint(Params.DataNodeCfg.GetNodeID()),
|
||||
MetricRequestsSuccess).Inc()
|
||||
|
||||
return status, nil
|
||||
|
@ -664,19 +658,19 @@ func (node *DataNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.Strin
|
|||
// TODO(dragondriver): cache the Metrics and set a retention to the cache
|
||||
func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
log.Debug("DataNode.GetMetrics",
|
||||
zap.Int64("node_id", Params.DataNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request))
|
||||
|
||||
if !node.isHealthy() {
|
||||
log.Warn("DataNode.GetMetrics failed",
|
||||
zap.Int64("node_id", Params.DataNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(errDataNodeIsUnhealthy(Params.DataNodeCfg.NodeID)))
|
||||
zap.Error(errDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID())))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgDataNodeIsUnhealthy(Params.DataNodeCfg.NodeID),
|
||||
Reason: msgDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID()),
|
||||
},
|
||||
Response: "",
|
||||
}, nil
|
||||
|
@ -685,7 +679,7 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
|
|||
metricType, err := metricsinfo.ParseMetricType(req.Request)
|
||||
if err != nil {
|
||||
log.Warn("DataNode.GetMetrics failed to parse metric type",
|
||||
zap.Int64("node_id", Params.DataNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(err))
|
||||
|
||||
|
@ -705,7 +699,7 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
|
|||
systemInfoMetrics, err := node.getSystemInfoMetrics(ctx, req)
|
||||
|
||||
log.Debug("DataNode.GetMetrics",
|
||||
zap.Int64("node_id", Params.DataNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType),
|
||||
zap.Any("systemInfoMetrics", systemInfoMetrics), // TODO(dragondriver): necessary? may be very large
|
||||
|
@ -715,7 +709,7 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
|
|||
}
|
||||
|
||||
log.Debug("DataNode.GetMetrics failed, request metric type is not implemented yet",
|
||||
zap.Int64("node_id", Params.DataNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
|
@ -780,7 +774,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
TaskId: req.GetImportTask().TaskId,
|
||||
DatanodeId: node.NodeID,
|
||||
DatanodeId: Params.DataNodeCfg.GetNodeID(),
|
||||
State: commonpb.ImportState_ImportStarted,
|
||||
Segments: make([]int64, 0),
|
||||
AutoIds: make([]int64, 0),
|
||||
|
@ -796,9 +790,9 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||
zap.Int64("collection ID", req.GetImportTask().GetCollectionId()),
|
||||
zap.Int64("partition ID", req.GetImportTask().GetPartitionId()),
|
||||
zap.Int64("taskID", req.GetImportTask().GetTaskId()),
|
||||
zap.Error(errDataNodeIsUnhealthy(Params.DataNodeCfg.NodeID)))
|
||||
zap.Error(errDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID())))
|
||||
|
||||
msg := msgDataNodeIsUnhealthy(Params.DataNodeCfg.NodeID)
|
||||
msg := msgDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID())
|
||||
importResult.State = commonpb.ImportState_ImportFailed
|
||||
importResult.Infos = append(importResult.Infos, &commonpb.KeyValuePair{Key: "failed_reason", Value: msg})
|
||||
reportFunc(importResult)
|
||||
|
@ -846,7 +840,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||
}
|
||||
|
||||
// temp id allocator service
|
||||
idAllocator, err := allocator2.NewIDAllocator(node.ctx, node.rootCoord, Params.DataNodeCfg.NodeID)
|
||||
idAllocator, err := allocator2.NewIDAllocator(node.ctx, node.rootCoord, Params.DataNodeCfg.GetNodeID())
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
||||
|
@ -1029,7 +1023,7 @@ func importFlushReqFunc(node *DataNode, req *datapb.ImportTaskRequest, res *root
|
|||
MsgType: 0, //TODO msg type
|
||||
MsgID: 0, //TODO msg id
|
||||
Timestamp: 0, //TODO time stamp
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
SegmentID: segmentID,
|
||||
CollectionID: req.ImportTask.GetCollectionId(),
|
||||
|
|
|
@ -87,8 +87,7 @@ func TestDataNode(t *testing.T) {
|
|||
defer node.Stop()
|
||||
|
||||
node.chunkManager = storage.NewLocalChunkManager(storage.RootPath("/tmp/lib/milvus"))
|
||||
Params.DataNodeCfg.NodeID = 1
|
||||
|
||||
Params.DataNodeCfg.SetNodeID(1)
|
||||
t.Run("Test WatchDmChannels ", func(t *testing.T) {
|
||||
emptyNode := &DataNode{}
|
||||
|
||||
|
@ -461,15 +460,15 @@ func TestWatchChannel(t *testing.T) {
|
|||
// GOOSE TODO
|
||||
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
|
||||
oldInvalidCh := "datanode-etcd-test-by-dev-rootcoord-dml-channel-invalid"
|
||||
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID, oldInvalidCh)
|
||||
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), oldInvalidCh)
|
||||
err = kv.Save(path, string([]byte{23}))
|
||||
assert.NoError(t, err)
|
||||
|
||||
ch := fmt.Sprintf("datanode-etcd-test-by-dev-rootcoord-dml-channel_%d", rand.Int31())
|
||||
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID, ch)
|
||||
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), ch)
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID))
|
||||
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
|
||||
c <- struct{}{}
|
||||
cnt := 0
|
||||
for {
|
||||
|
@ -508,7 +507,7 @@ func TestWatchChannel(t *testing.T) {
|
|||
exist := node.flowgraphManager.exist(ch)
|
||||
assert.True(t, exist)
|
||||
|
||||
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID))
|
||||
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
|
||||
assert.Nil(t, err)
|
||||
//TODO there is not way to sync Release done, use sleep for now
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
@ -520,15 +519,15 @@ func TestWatchChannel(t *testing.T) {
|
|||
t.Run("Test release channel", func(t *testing.T) {
|
||||
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
|
||||
oldInvalidCh := "datanode-etcd-test-by-dev-rootcoord-dml-channel-invalid"
|
||||
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID, oldInvalidCh)
|
||||
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), oldInvalidCh)
|
||||
err = kv.Save(path, string([]byte{23}))
|
||||
assert.NoError(t, err)
|
||||
|
||||
ch := fmt.Sprintf("datanode-etcd-test-by-dev-rootcoord-dml-channel_%d", rand.Int31())
|
||||
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID, ch)
|
||||
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), ch)
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID))
|
||||
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
|
||||
c <- struct{}{}
|
||||
cnt := 0
|
||||
for {
|
||||
|
@ -567,7 +566,7 @@ func TestWatchChannel(t *testing.T) {
|
|||
exist := node.flowgraphManager.exist(ch)
|
||||
assert.False(t, exist)
|
||||
|
||||
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, node.NodeID))
|
||||
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
|
||||
assert.Nil(t, err)
|
||||
//TODO there is not way to sync Release done, use sleep for now
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
|
|
@ -131,8 +131,8 @@ func (dsService *dataSyncService) close() {
|
|||
log.Info("dataSyncService closing flowgraph", zap.Int64("collectionID", dsService.collectionID),
|
||||
zap.String("vChanName", dsService.vchannelName))
|
||||
dsService.fg.Close()
|
||||
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Dec()
|
||||
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Sub(2) // timeTickChannel + deltaChannel
|
||||
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Dec()
|
||||
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Sub(2) // timeTickChannel + deltaChannel
|
||||
}
|
||||
|
||||
dsService.cancelFn()
|
||||
|
|
|
@ -243,7 +243,7 @@ func (ddn *ddNode) sendDeltaTimeTick(ts Timestamp) error {
|
|||
MsgType: commonpb.MsgType_TimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: ts,
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
}
|
||||
timeTickMsg := &msgstream.TimeTickMsg{
|
||||
|
@ -295,7 +295,7 @@ func newDDNode(ctx context.Context, collID UniqueID, vchanInfo *datapb.VchannelI
|
|||
|
||||
deltaStream.SetRepackFunc(msgstream.DefaultRepackFunc)
|
||||
deltaStream.AsProducer([]string{deltaChannelName})
|
||||
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Inc()
|
||||
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
|
||||
log.Debug("datanode AsProducer", zap.String("DeltaChannelName", deltaChannelName))
|
||||
var deltaMsgStream msgstream.MsgStream = deltaStream
|
||||
deltaMsgStream.Start()
|
||||
|
|
|
@ -155,7 +155,7 @@ func (dn *deleteNode) bufferDeleteMsg(msg *msgstream.DeleteMsg, tr TimeRange) er
|
|||
|
||||
// store
|
||||
delDataBuf.updateSize(int64(rows))
|
||||
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(metrics.DeleteLabel, fmt.Sprint(Params.DataNodeCfg.NodeID)).Add(float64(rows))
|
||||
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(metrics.DeleteLabel, fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Add(float64(rows))
|
||||
delDataBuf.updateTimeRange(tr)
|
||||
dn.delBuf.Store(segID, delDataBuf)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
func newDmInputNode(ctx context.Context, seekPos *internalpb.MsgPosition, dmNodeConfig *nodeConfig) (*flowgraph.InputNode, error) {
|
||||
// subName should be unique, since pchannelName is shared among several collections
|
||||
// consumeSubName := Params.MsgChannelSubName + "-" + strconv.FormatInt(collID, 10)
|
||||
consumeSubName := fmt.Sprintf("%s-%d-%d", Params.CommonCfg.DataNodeSubName, Params.DataNodeCfg.NodeID, dmNodeConfig.collectionID)
|
||||
consumeSubName := fmt.Sprintf("%s-%d-%d", Params.CommonCfg.DataNodeSubName, Params.DataNodeCfg.GetNodeID(), dmNodeConfig.collectionID)
|
||||
insertStream, err := dmNodeConfig.msFactory.NewTtMsgStream(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -45,7 +45,7 @@ func newDmInputNode(ctx context.Context, seekPos *internalpb.MsgPosition, dmNode
|
|||
// is virtual channel name, so we need to convert vchannel name into pchannel neme here.
|
||||
pchannelName := funcutil.ToPhysicalChannel(dmNodeConfig.vChannelName)
|
||||
insertStream.AsConsumer([]string{pchannelName}, consumeSubName)
|
||||
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Inc()
|
||||
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
|
||||
log.Info("datanode AsConsumer", zap.String("physical channel", pchannelName), zap.String("subName", consumeSubName), zap.Int64("collection ID", dmNodeConfig.collectionID))
|
||||
|
||||
if seekPos != nil {
|
||||
|
|
|
@ -298,7 +298,7 @@ func (ibNode *insertBufferNode) Operate(in []Msg) []Msg {
|
|||
dropped: false,
|
||||
})
|
||||
|
||||
metrics.DataNodeAutoFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Inc()
|
||||
metrics.DataNodeAutoFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,13 +343,13 @@ func (ibNode *insertBufferNode) Operate(in []Msg) []Msg {
|
|||
err := ibNode.flushManager.flushBufferData(task.buffer, task.segmentID, task.flushed, task.dropped, endPositions[0])
|
||||
if err != nil {
|
||||
log.Warn("failed to invoke flushBufferData", zap.Error(err))
|
||||
metrics.DataNodeFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.FailLabel).Inc()
|
||||
metrics.DataNodeFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.FailLabel).Inc()
|
||||
} else {
|
||||
segmentsToFlush = append(segmentsToFlush, task.segmentID)
|
||||
ibNode.insertBuffer.Delete(task.segmentID)
|
||||
metrics.DataNodeFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.SuccessLabel).Inc()
|
||||
metrics.DataNodeFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.SuccessLabel).Inc()
|
||||
}
|
||||
metrics.DataNodeFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.TotalLabel).Inc()
|
||||
metrics.DataNodeFlushSegmentCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.TotalLabel).Inc()
|
||||
}
|
||||
|
||||
if err := ibNode.writeHardTimeTick(fgMsg.timeRange.timestampMax, seg2Upload); err != nil {
|
||||
|
@ -473,7 +473,7 @@ func (ibNode *insertBufferNode) bufferInsertMsg(msg *msgstream.InsertMsg, endPos
|
|||
|
||||
// update buffer size
|
||||
buffer.updateSize(int64(msg.NRows()))
|
||||
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.InsertLabel).Add(float64(len(msg.RowData)))
|
||||
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Add(float64(len(msg.RowData)))
|
||||
|
||||
// store in buffer
|
||||
ibNode.insertBuffer.Store(currentSegID, buffer)
|
||||
|
@ -507,7 +507,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, flushCh <-chan fl
|
|||
return nil, err
|
||||
}
|
||||
wTt.AsProducer([]string{Params.CommonCfg.DataCoordTimeTick})
|
||||
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Inc()
|
||||
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
|
||||
log.Debug("datanode AsProducer", zap.String("TimeTickChannelName", Params.CommonCfg.DataCoordTimeTick))
|
||||
var wTtMsgStream msgstream.MsgStream = wTt
|
||||
wTtMsgStream.Start()
|
||||
|
@ -521,7 +521,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, flushCh <-chan fl
|
|||
continue
|
||||
}
|
||||
stats = append(stats, stat)
|
||||
metrics.DataNodeSegmentRowsCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Add(float64(stat.NumRows))
|
||||
metrics.DataNodeSegmentRowsCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Add(float64(stat.NumRows))
|
||||
}
|
||||
msgPack := msgstream.MsgPack{}
|
||||
timeTickMsg := msgstream.DataNodeTtMsg{
|
||||
|
@ -544,7 +544,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, flushCh <-chan fl
|
|||
msgPack.Msgs = append(msgPack.Msgs, &timeTickMsg)
|
||||
pt, _ := tsoutil.ParseHybridTs(ts)
|
||||
pChan := funcutil.ToPhysicalChannel(config.vChannelName)
|
||||
metrics.DataNodeTimeSync.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), pChan).Set(float64(pt))
|
||||
metrics.DataNodeTimeSync.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), pChan).Set(float64(pt))
|
||||
return wTtMsgStream.Produce(&msgPack)
|
||||
})
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ func (fm *flowgraphManager) addAndStart(dn *DataNode, vchan *datapb.VchannelInfo
|
|||
|
||||
fm.flowgraphs.Store(vchan.GetChannelName(), dataSyncService)
|
||||
|
||||
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Inc()
|
||||
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func (fm *flowgraphManager) release(vchanName string) {
|
|||
|
||||
if fg, loaded := fm.flowgraphs.LoadAndDelete(vchanName); loaded {
|
||||
fg.(*dataSyncService).close()
|
||||
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Dec()
|
||||
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Dec()
|
||||
}
|
||||
log.Info("release flowgraph resources end", zap.String("Vchannel", vchanName))
|
||||
}
|
||||
|
|
|
@ -412,7 +412,7 @@ func (m *rendezvousFlushManager) flushBufferData(data *BufferData, segmentID Uni
|
|||
data: kvs,
|
||||
}, field2Insert, field2Stats, flushed, dropped, pos)
|
||||
|
||||
metrics.DataNodeFlushSegmentLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.DataNodeFlushSegmentLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -554,11 +554,11 @@ type flushBufferInsertTask struct {
|
|||
func (t *flushBufferInsertTask) flushInsertData() error {
|
||||
if t.ChunkManager != nil && len(t.data) > 0 {
|
||||
for _, d := range t.data {
|
||||
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.InsertLabel).Add(float64(len(d)))
|
||||
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Add(float64(len(d)))
|
||||
}
|
||||
tr := timerecord.NewTimeRecorder("insertData")
|
||||
err := t.MultiWrite(t.data)
|
||||
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.InsertLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -573,11 +573,11 @@ type flushBufferDeleteTask struct {
|
|||
func (t *flushBufferDeleteTask) flushDeleteData() error {
|
||||
if len(t.data) > 0 && t.ChunkManager != nil {
|
||||
for _, d := range t.data {
|
||||
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.DeleteLabel).Add(float64(len(d)))
|
||||
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Add(float64(len(d)))
|
||||
}
|
||||
tr := timerecord.NewTimeRecorder("deleteData")
|
||||
err := t.MultiWrite(t.data)
|
||||
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.NodeID), metrics.DeleteLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -615,7 +615,7 @@ func dropVirtualChannelFunc(dsService *dataSyncService, opts ...retry.Option) fl
|
|||
MsgType: 0, //TODO msg type
|
||||
MsgID: 0, //TODO msg id
|
||||
Timestamp: 0, //TODO time stamp
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
ChannelName: dsService.vchannelName,
|
||||
}
|
||||
|
@ -751,7 +751,7 @@ func flushNotifyFunc(dsService *dataSyncService, opts ...retry.Option) notifyMet
|
|||
MsgType: 0, //TODO msg type
|
||||
MsgID: 0, //TODO msg id
|
||||
Timestamp: 0, //TODO time stamp
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
SegmentID: pack.segmentID,
|
||||
CollectionID: dsService.collectionID,
|
||||
|
|
|
@ -53,7 +53,7 @@ func (mService *metaService) getCollectionSchema(ctx context.Context, collID Uni
|
|||
MsgType: commonpb.MsgType_DescribeCollection,
|
||||
MsgID: 0, //GOOSE TODO
|
||||
Timestamp: 0, // GOOSE TODO
|
||||
SourceID: Params.DataNodeCfg.NodeID,
|
||||
SourceID: Params.DataNodeCfg.GetNodeID(),
|
||||
},
|
||||
DbName: "default", // GOOSE TODO
|
||||
CollectionID: collID,
|
||||
|
|
|
@ -29,7 +29,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
|
|||
// TODO(dragondriver): add more metrics
|
||||
nodeInfos := metricsinfo.DataNodeInfos{
|
||||
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.NodeID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
|
||||
HardwareInfos: metricsinfo.HardwareMetrics{
|
||||
IP: node.session.Address,
|
||||
CPUCoreCount: metricsinfo.GetCPUCoreCount(false),
|
||||
|
@ -60,7 +60,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
|
|||
Reason: err.Error(),
|
||||
},
|
||||
Response: "",
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.NodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,6 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
|
|||
Reason: "",
|
||||
},
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.NodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -297,7 +297,6 @@ func (s *Server) init() error {
|
|||
}
|
||||
}
|
||||
|
||||
s.datanode.SetNodeID(dn.Params.DataNodeCfg.NodeID)
|
||||
s.datanode.UpdateStateCode(internalpb.StateCode_Initializing)
|
||||
|
||||
if err := s.datanode.Init(); err != nil {
|
||||
|
|
|
@ -130,7 +130,7 @@ func (s *Server) init() error {
|
|||
indexnode.Params.IndexNodeCfg.IP = Params.IP
|
||||
indexnode.Params.IndexNodeCfg.Address = Params.GetAddress()
|
||||
|
||||
closer := trace.InitTracing(fmt.Sprintf("IndexNode-%d", indexnode.Params.IndexNodeCfg.NodeID))
|
||||
closer := trace.InitTracing(fmt.Sprintf("IndexNode-%d", indexnode.Params.IndexNodeCfg.GetNodeID()))
|
||||
s.closer = closer
|
||||
|
||||
defer func() {
|
||||
|
|
|
@ -166,8 +166,8 @@ func (i *IndexNode) initSession() error {
|
|||
return errors.New("failed to initialize session")
|
||||
}
|
||||
i.session.Init(typeutil.IndexNodeRole, Params.IndexNodeCfg.IP+":"+strconv.Itoa(Params.IndexNodeCfg.Port), false, true)
|
||||
Params.IndexNodeCfg.NodeID = i.session.ServerID
|
||||
Params.SetLogger(Params.IndexNodeCfg.NodeID)
|
||||
Params.IndexNodeCfg.SetNodeID(i.session.ServerID)
|
||||
Params.SetLogger(i.session.ServerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ func (i *IndexNode) CreateIndex(ctx context.Context, request *indexpb.CreateInde
|
|||
sp, ctx2 := trace.StartSpanFromContextWithOperationName(i.loopCtx, "IndexNode-CreateIndex")
|
||||
defer sp.Finish()
|
||||
sp.SetTag("IndexBuildID", strconv.FormatInt(request.IndexBuildID, 10))
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10), metrics.TotalLabel).Inc()
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.TotalLabel).Inc()
|
||||
|
||||
t := &IndexBuildTask{
|
||||
BaseTask: BaseTask{
|
||||
|
@ -299,7 +299,7 @@ func (i *IndexNode) CreateIndex(ctx context.Context, request *indexpb.CreateInde
|
|||
req: request,
|
||||
cm: i.chunkManager,
|
||||
etcdKV: i.etcdKV,
|
||||
nodeID: Params.IndexNodeCfg.NodeID,
|
||||
nodeID: Params.IndexNodeCfg.GetNodeID(),
|
||||
serializedSize: 0,
|
||||
}
|
||||
|
||||
|
@ -312,12 +312,12 @@ func (i *IndexNode) CreateIndex(ctx context.Context, request *indexpb.CreateInde
|
|||
log.Warn("IndexNode failed to schedule", zap.Int64("indexBuildID", request.IndexBuildID), zap.Error(err))
|
||||
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
ret.Reason = err.Error()
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10), metrics.FailLabel).Inc()
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.FailLabel).Inc()
|
||||
return ret, nil
|
||||
}
|
||||
log.Info("IndexNode successfully scheduled", zap.Int64("indexBuildID", request.IndexBuildID))
|
||||
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10), metrics.SuccessLabel).Inc()
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.SuccessLabel).Inc()
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
@ -376,14 +376,14 @@ func (i *IndexNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringR
|
|||
func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
if !i.isHealthy() {
|
||||
log.Warn("IndexNode.GetMetrics failed",
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.NodeID)))
|
||||
zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID())))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.NodeID),
|
||||
Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID()),
|
||||
},
|
||||
Response: "",
|
||||
}, nil
|
||||
|
@ -392,7 +392,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
metricType, err := metricsinfo.ParseMetricType(req.Request)
|
||||
if err != nil {
|
||||
log.Warn("IndexNode.GetMetrics failed to parse metric type",
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(err))
|
||||
|
||||
|
@ -409,7 +409,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
metrics, err := getSystemInfoMetrics(ctx, req, i)
|
||||
|
||||
log.Debug("IndexNode.GetMetrics",
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType),
|
||||
zap.Error(err))
|
||||
|
@ -418,7 +418,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
}
|
||||
|
||||
log.Warn("IndexNode.GetMetrics failed, request metric type is not implemented yet",
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ func (inm *Mock) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
metrics, err := getMockSystemInfoMetrics(ctx, req, inm)
|
||||
|
||||
log.Debug("IndexNode.GetMetrics",
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType),
|
||||
zap.Any("metrics", metrics), // TODO(dragondriver): necessary? may be very large
|
||||
|
@ -317,7 +317,7 @@ func (inm *Mock) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
|
|||
}
|
||||
|
||||
log.Warn("IndexNode.GetMetrics failed, request metric type is not implemented yet",
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.NodeID),
|
||||
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
|
@ -338,7 +338,7 @@ func getMockSystemInfoMetrics(
|
|||
// TODO(dragondriver): add more metrics
|
||||
nodeInfos := metricsinfo.IndexNodeInfos{
|
||||
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.NodeID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
|
||||
HardwareInfos: metricsinfo.HardwareMetrics{
|
||||
CPUCoreCount: metricsinfo.GetCPUCoreCount(false),
|
||||
CPUCoreUsage: metricsinfo.GetCPUUsage(),
|
||||
|
@ -368,6 +368,6 @@ func getMockSystemInfoMetrics(
|
|||
Reason: "",
|
||||
},
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.NodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func getSystemInfoMetrics(
|
|||
// TODO(dragondriver): add more metrics
|
||||
nodeInfos := metricsinfo.IndexNodeInfos{
|
||||
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.NodeID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
|
||||
HardwareInfos: metricsinfo.HardwareMetrics{
|
||||
IP: node.session.Address,
|
||||
CPUCoreCount: metricsinfo.GetCPUCoreCount(false),
|
||||
|
@ -66,7 +66,7 @@ func getSystemInfoMetrics(
|
|||
Reason: err.Error(),
|
||||
},
|
||||
Response: "",
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.NodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -76,6 +76,6 @@ func getSystemInfoMetrics(
|
|||
Reason: "",
|
||||
},
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.NodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -386,8 +386,8 @@ func (it *IndexBuildTask) loadFieldData(ctx context.Context) (storage.FieldID, s
|
|||
}
|
||||
|
||||
// TODO: @xiaocai2333 metrics.IndexNodeLoadBinlogLatency should be added above, put here to get segmentID.
|
||||
metrics.IndexNodeLoadBinlogLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10)).Observe(float64(loadVectorDuration))
|
||||
metrics.IndexNodeDecodeBinlogLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10)).Observe(float64(it.tr.RecordSpan()))
|
||||
metrics.IndexNodeLoadBinlogLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(loadVectorDuration))
|
||||
metrics.IndexNodeDecodeBinlogLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(it.tr.RecordSpan()))
|
||||
|
||||
if len(insertData.Data) != 1 {
|
||||
return storage.InvalidUniqueID, nil, errors.New("we expect only one field in deserialized insert data")
|
||||
|
@ -443,7 +443,7 @@ func (it *IndexBuildTask) buildIndex(ctx context.Context) ([]*storage.Blob, erro
|
|||
}
|
||||
}
|
||||
|
||||
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10)).Observe(float64(it.tr.RecordSpan()))
|
||||
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(it.tr.RecordSpan()))
|
||||
|
||||
it.tr.Record("build index done")
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ func (it *IndexBuildTask) buildIndex(ctx context.Context) ([]*storage.Blob, erro
|
|||
return nil, err
|
||||
}
|
||||
encodeIndexFileDur := it.tr.Record("index codec serialize done")
|
||||
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
|
||||
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
|
||||
return serializedIndexBlobs, nil
|
||||
}
|
||||
|
||||
|
@ -579,7 +579,7 @@ func (it *IndexBuildTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
saveIndexFileDur := it.tr.Record("index file save done")
|
||||
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.NodeID, 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
|
||||
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
|
||||
it.tr.Elapse("index building all done")
|
||||
log.Info("IndexNode CreateIndex successfully ", zap.Int64("collect", it.collectionID),
|
||||
zap.Int64("partition", it.partitionID), zap.Int64("segment", it.segmentID))
|
||||
|
|
|
@ -400,7 +400,7 @@ func (mgr *singleTypeChannelsMgr) createMsgStream(collectionID UniqueID) error {
|
|||
|
||||
mgr.updateCollection(collectionID, id)
|
||||
for _, pc := range pchans {
|
||||
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), pc).Inc()
|
||||
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), pc).Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -435,7 +435,7 @@ func (mgr *singleTypeChannelsMgr) removeStream(collectionID UniqueID) error {
|
|||
mgr.deleteVChansByVIDs(ids)
|
||||
mgr.deleteStreamByVIDs(ids)
|
||||
for _, pc := range channels {
|
||||
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), pc).Dec()
|
||||
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), pc).Dec()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -486,7 +486,7 @@ func (mgr *channelsMgrImpl) getVChannels(collectionID UniqueID) ([]vChan, error)
|
|||
}
|
||||
|
||||
func (mgr *channelsMgrImpl) createDQLStream(collectionID UniqueID) error {
|
||||
metrics.ProxyMsgStreamObjectsForSearch.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "query").Inc()
|
||||
metrics.ProxyMsgStreamObjectsForSearch.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "query").Inc()
|
||||
return mgr.dqlChannelsMgr.createMsgStream(collectionID)
|
||||
}
|
||||
|
||||
|
@ -495,7 +495,7 @@ func (mgr *channelsMgrImpl) getDQLStream(collectionID UniqueID) (msgstream.MsgSt
|
|||
}
|
||||
|
||||
func (mgr *channelsMgrImpl) removeDQLStream(collectionID UniqueID) error {
|
||||
metrics.ProxyMsgStreamObjectsForSearch.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "query").Dec()
|
||||
metrics.ProxyMsgStreamObjectsForSearch.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "query").Dec()
|
||||
return mgr.dqlChannelsMgr.removeStream(collectionID)
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -122,7 +122,7 @@ func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string)
|
|||
collInfo, ok := m.collInfo[collectionName]
|
||||
|
||||
if !ok {
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GeCollectionID", metrics.CacheMissLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GeCollectionID", metrics.CacheMissLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("UpdateCache")
|
||||
m.mu.RUnlock()
|
||||
coll, err := m.describeCollection(ctx, collectionName)
|
||||
|
@ -132,12 +132,12 @@ func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string)
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.updateCollection(coll, collectionName)
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
collInfo = m.collInfo[collectionName]
|
||||
return collInfo.collID, nil
|
||||
}
|
||||
defer m.mu.RUnlock()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetCollectionID", metrics.CacheHitLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionID", metrics.CacheHitLabel).Inc()
|
||||
|
||||
return collInfo.collID, nil
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
|
|||
|
||||
if !ok {
|
||||
tr := timerecord.NewTimeRecorder("UpdateCache")
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetCollectionInfo", metrics.CacheMissLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheMissLabel).Inc()
|
||||
coll, err := m.describeCollection(ctx, collectionName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -161,10 +161,10 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
|
|||
defer m.mu.Unlock()
|
||||
m.updateCollection(coll, collectionName)
|
||||
collInfo = m.collInfo[collectionName]
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
}
|
||||
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetCollectionInfo", metrics.CacheHitLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheHitLabel).Inc()
|
||||
return &collectionInfo{
|
||||
collID: collInfo.collID,
|
||||
schema: collInfo.schema,
|
||||
|
@ -180,7 +180,7 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri
|
|||
collInfo, ok := m.collInfo[collectionName]
|
||||
|
||||
if !ok {
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetCollectionSchema", metrics.CacheMissLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheMissLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder("UpdateCache")
|
||||
m.mu.RUnlock()
|
||||
coll, err := m.describeCollection(ctx, collectionName)
|
||||
|
@ -194,14 +194,14 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri
|
|||
defer m.mu.Unlock()
|
||||
m.updateCollection(coll, collectionName)
|
||||
collInfo = m.collInfo[collectionName]
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
log.Debug("Reload collection from root coordinator ",
|
||||
zap.String("collection name ", collectionName),
|
||||
zap.Any("time (milliseconds) take ", tr.ElapseSpan().Milliseconds()))
|
||||
return collInfo.schema, nil
|
||||
}
|
||||
defer m.mu.RUnlock()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetCollectionSchema", metrics.CacheHitLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheHitLabel).Inc()
|
||||
|
||||
return collInfo.schema, nil
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (m
|
|||
|
||||
if collInfo.partInfo == nil || len(collInfo.partInfo) == 0 {
|
||||
tr := timerecord.NewTimeRecorder("UpdateCache")
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetPartitions", metrics.CacheMissLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitions", metrics.CacheMissLabel).Inc()
|
||||
m.mu.RUnlock()
|
||||
|
||||
partitions, err := m.showPartitions(ctx, collectionName)
|
||||
|
@ -256,7 +256,7 @@ func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (m
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
log.Debug("proxy", zap.Any("GetPartitions:partitions after update", partitions), zap.Any("collectionName", collectionName))
|
||||
ret := make(map[string]typeutil.UniqueID)
|
||||
partInfo := m.collInfo[collectionName].partInfo
|
||||
|
@ -267,7 +267,7 @@ func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (m
|
|||
|
||||
}
|
||||
defer m.mu.RUnlock()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetPartitions", metrics.CacheHitLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitions", metrics.CacheHitLabel).Inc()
|
||||
|
||||
ret := make(map[string]typeutil.UniqueID)
|
||||
partInfo := m.collInfo[collectionName].partInfo
|
||||
|
@ -298,7 +298,7 @@ func (m *MetaCache) GetPartitionInfo(ctx context.Context, collectionName string,
|
|||
|
||||
if !ok {
|
||||
tr := timerecord.NewTimeRecorder("UpdateCache")
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetPartitionInfo", metrics.CacheMissLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitionInfo", metrics.CacheMissLabel).Inc()
|
||||
partitions, err := m.showPartitions(ctx, collectionName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -310,14 +310,14 @@ func (m *MetaCache) GetPartitionInfo(ctx context.Context, collectionName string,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
log.Debug("proxy", zap.Any("GetPartitionID:partitions after update", partitions), zap.Any("collectionName", collectionName))
|
||||
partInfo, ok = m.collInfo[collectionName].partInfo[partitionName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("partitionID of partitionName:%s can not be find", partitionName)
|
||||
}
|
||||
}
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "GetPartitionInfo", metrics.CacheHitLabel).Inc()
|
||||
metrics.ProxyCacheHitCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitionInfo", metrics.CacheHitLabel).Inc()
|
||||
return &partitionInfo{
|
||||
partitionID: partInfo.partitionID,
|
||||
createdTimestamp: partInfo.createdTimestamp,
|
||||
|
@ -546,7 +546,7 @@ func (m *MetaCache) GetShards(ctx context.Context, withCache bool, collectionNam
|
|||
req := &querypb.GetShardLeadersRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_GetShardLeaders,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: info.collID,
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ func getSystemInfoMetrics(
|
|||
|
||||
identifierMap := make(map[string]int)
|
||||
|
||||
proxyRoleName := metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.ProxyID)
|
||||
proxyRoleName := metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID())
|
||||
identifierMap[proxyRoleName] = int(node.session.ServerID)
|
||||
|
||||
proxyTopologyNode := metricsinfo.SystemTopologyNode{
|
||||
|
@ -424,7 +424,7 @@ func getSystemInfoMetrics(
|
|||
Reason: err.Error(),
|
||||
},
|
||||
Response: "",
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.ProxyID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -434,6 +434,6 @@ func getSystemInfoMetrics(
|
|||
Reason: "",
|
||||
},
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.ProxyID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -143,8 +143,8 @@ func (node *Proxy) initSession() error {
|
|||
return errors.New("new session failed, maybe etcd cannot be connected")
|
||||
}
|
||||
node.session.Init(typeutil.ProxyRole, Params.ProxyCfg.NetworkAddress, false, true)
|
||||
Params.ProxyCfg.ProxyID = node.session.ServerID
|
||||
Params.SetLogger(Params.ProxyCfg.ProxyID)
|
||||
Params.ProxyCfg.SetNodeID(node.session.ServerID)
|
||||
Params.SetLogger(node.session.ServerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -184,39 +184,39 @@ func (node *Proxy) Init() error {
|
|||
node.factory.Init(&Params)
|
||||
log.Debug("init parameters for factory", zap.String("role", typeutil.ProxyRole), zap.Any("parameters", Params.ServiceParam))
|
||||
|
||||
log.Debug("create id allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
idAllocator, err := allocator.NewIDAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.ProxyID)
|
||||
log.Debug("create id allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
idAllocator, err := allocator.NewIDAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.GetNodeID())
|
||||
if err != nil {
|
||||
log.Warn("failed to create id allocator",
|
||||
zap.Error(err),
|
||||
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
return err
|
||||
}
|
||||
node.idAllocator = idAllocator
|
||||
log.Debug("create id allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
log.Debug("create id allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create timestamp allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
tsoAllocator, err := newTimestampAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.ProxyID)
|
||||
log.Debug("create timestamp allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
tsoAllocator, err := newTimestampAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.GetNodeID())
|
||||
if err != nil {
|
||||
log.Warn("failed to create timestamp allocator",
|
||||
zap.Error(err),
|
||||
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
return err
|
||||
}
|
||||
node.tsoAllocator = tsoAllocator
|
||||
log.Debug("create timestamp allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
log.Debug("create timestamp allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create segment id assigner", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
log.Debug("create segment id assigner", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
segAssigner, err := newSegIDAssigner(node.ctx, node.dataCoord, node.lastTick)
|
||||
if err != nil {
|
||||
log.Warn("failed to create segment id assigner",
|
||||
zap.Error(err),
|
||||
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
return err
|
||||
}
|
||||
node.segAssigner = segAssigner
|
||||
node.segAssigner.PeerID = Params.ProxyCfg.ProxyID
|
||||
log.Debug("create segment id assigner done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.ProxyID))
|
||||
node.segAssigner.PeerID = Params.ProxyCfg.GetNodeID()
|
||||
log.Debug("create segment id assigner done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create channels manager", zap.String("role", typeutil.ProxyRole))
|
||||
dmlChannelsFunc := getDmlChannelsFunc(node.ctx, node.rootCoord)
|
||||
|
@ -283,7 +283,7 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
|
|||
|
||||
maxTs := ts
|
||||
for channel, ts := range stats {
|
||||
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), channel).Set(float64(ts))
|
||||
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), channel).Set(float64(ts))
|
||||
channels = append(channels, channel)
|
||||
tss = append(tss, ts)
|
||||
if ts > maxTs {
|
||||
|
@ -303,7 +303,7 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
|
|||
DefaultTimestamp: maxTs,
|
||||
}
|
||||
|
||||
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), "DefaultTimestamp").Set(float64(maxTs))
|
||||
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "DefaultTimestamp").Set(float64(maxTs))
|
||||
|
||||
status, err := node.rootCoord.UpdateChannelTimeTick(node.ctx, req)
|
||||
if err != nil {
|
||||
|
|
|
@ -564,7 +564,7 @@ func TestProxy(t *testing.T) {
|
|||
states, err := proxy.GetComponentStates(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, states.Status.ErrorCode)
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, states.State.NodeID)
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), states.State.NodeID)
|
||||
assert.Equal(t, typeutil.ProxyRole, states.State.Role)
|
||||
assert.Equal(t, proxy.stateCode.Load().(internalpb.StateCode), states.State.StateCode)
|
||||
})
|
||||
|
|
|
@ -306,7 +306,7 @@ func (it *insertTask) PreExecute(ctx context.Context) error {
|
|||
var rowIDEnd UniqueID
|
||||
tr := timerecord.NewTimeRecorder("applyPK")
|
||||
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(rowNums)
|
||||
metrics.ProxyApplyPrimaryKeyLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan()))
|
||||
metrics.ProxyApplyPrimaryKeyLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan()))
|
||||
|
||||
it.RowIDs = make([]UniqueID, rowNums)
|
||||
for i := rowIDBegin; i < rowIDEnd; i++ {
|
||||
|
@ -548,7 +548,7 @@ func (it *insertTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
sendMsgDur := tr.Record("send insert request to message stream")
|
||||
metrics.ProxySendInsertReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(sendMsgDur.Milliseconds()))
|
||||
metrics.ProxySendInsertReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(sendMsgDur.Milliseconds()))
|
||||
|
||||
log.Debug("Proxy Insert Execute done", zap.Int64("msgID", it.Base.MsgID), zap.String("collection name", collectionName))
|
||||
|
||||
|
@ -603,13 +603,13 @@ func (cct *createCollectionTask) SetTs(ts Timestamp) {
|
|||
func (cct *createCollectionTask) OnEnqueue() error {
|
||||
cct.Base = &commonpb.MsgBase{}
|
||||
cct.Base.MsgType = commonpb.MsgType_CreateCollection
|
||||
cct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
cct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cct *createCollectionTask) PreExecute(ctx context.Context) error {
|
||||
cct.Base.MsgType = commonpb.MsgType_CreateCollection
|
||||
cct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
cct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
cct.schema = &schemapb.CollectionSchema{}
|
||||
err := proto.Unmarshal(cct.Schema, cct.schema)
|
||||
|
@ -756,7 +756,7 @@ func (dct *dropCollectionTask) OnEnqueue() error {
|
|||
|
||||
func (dct *dropCollectionTask) PreExecute(ctx context.Context) error {
|
||||
dct.Base.MsgType = commonpb.MsgType_DropCollection
|
||||
dct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
dct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if err := validateCollectionName(dct.CollectionName); err != nil {
|
||||
return err
|
||||
|
@ -885,7 +885,7 @@ func (hct *hasCollectionTask) OnEnqueue() error {
|
|||
|
||||
func (hct *hasCollectionTask) PreExecute(ctx context.Context) error {
|
||||
hct.Base.MsgType = commonpb.MsgType_HasCollection
|
||||
hct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
hct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if err := validateCollectionName(hct.CollectionName); err != nil {
|
||||
return err
|
||||
|
@ -959,7 +959,7 @@ func (dct *describeCollectionTask) OnEnqueue() error {
|
|||
|
||||
func (dct *describeCollectionTask) PreExecute(ctx context.Context) error {
|
||||
dct.Base.MsgType = commonpb.MsgType_DescribeCollection
|
||||
dct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
dct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if dct.CollectionID != 0 && len(dct.CollectionName) == 0 {
|
||||
return nil
|
||||
|
@ -1075,7 +1075,7 @@ func (g *getCollectionStatisticsTask) OnEnqueue() error {
|
|||
|
||||
func (g *getCollectionStatisticsTask) PreExecute(ctx context.Context) error {
|
||||
g.Base.MsgType = commonpb.MsgType_GetCollectionStatistics
|
||||
g.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
g.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1165,7 +1165,7 @@ func (g *getPartitionStatisticsTask) OnEnqueue() error {
|
|||
|
||||
func (g *getPartitionStatisticsTask) PreExecute(ctx context.Context) error {
|
||||
g.Base.MsgType = commonpb.MsgType_GetPartitionStatistics
|
||||
g.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
g.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1259,7 +1259,7 @@ func (sct *showCollectionsTask) OnEnqueue() error {
|
|||
|
||||
func (sct *showCollectionsTask) PreExecute(ctx context.Context) error {
|
||||
sct.Base.MsgType = commonpb.MsgType_ShowCollections
|
||||
sct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
sct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
if sct.GetType() == milvuspb.ShowType_InMemory {
|
||||
for _, collectionName := range sct.CollectionNames {
|
||||
if err := validateCollectionName(collectionName); err != nil {
|
||||
|
@ -1413,7 +1413,7 @@ func (cpt *createPartitionTask) OnEnqueue() error {
|
|||
|
||||
func (cpt *createPartitionTask) PreExecute(ctx context.Context) error {
|
||||
cpt.Base.MsgType = commonpb.MsgType_CreatePartition
|
||||
cpt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
cpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName, partitionTag := cpt.CollectionName, cpt.PartitionName
|
||||
|
||||
|
@ -1490,7 +1490,7 @@ func (dpt *dropPartitionTask) OnEnqueue() error {
|
|||
|
||||
func (dpt *dropPartitionTask) PreExecute(ctx context.Context) error {
|
||||
dpt.Base.MsgType = commonpb.MsgType_DropPartition
|
||||
dpt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
dpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName, partitionTag := dpt.CollectionName, dpt.PartitionName
|
||||
|
||||
|
@ -1567,7 +1567,7 @@ func (hpt *hasPartitionTask) OnEnqueue() error {
|
|||
|
||||
func (hpt *hasPartitionTask) PreExecute(ctx context.Context) error {
|
||||
hpt.Base.MsgType = commonpb.MsgType_HasPartition
|
||||
hpt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
hpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName, partitionTag := hpt.CollectionName, hpt.PartitionName
|
||||
|
||||
|
@ -1644,7 +1644,7 @@ func (spt *showPartitionsTask) OnEnqueue() error {
|
|||
|
||||
func (spt *showPartitionsTask) PreExecute(ctx context.Context) error {
|
||||
spt.Base.MsgType = commonpb.MsgType_ShowPartitions
|
||||
spt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
spt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if err := validateCollectionName(spt.CollectionName); err != nil {
|
||||
return err
|
||||
|
@ -1810,7 +1810,7 @@ func (cit *createIndexTask) OnEnqueue() error {
|
|||
|
||||
func (cit *createIndexTask) PreExecute(ctx context.Context) error {
|
||||
cit.Base.MsgType = commonpb.MsgType_CreateIndex
|
||||
cit.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
cit.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName, fieldName := cit.CollectionName, cit.FieldName
|
||||
|
||||
|
@ -1944,7 +1944,7 @@ func (dit *describeIndexTask) OnEnqueue() error {
|
|||
|
||||
func (dit *describeIndexTask) PreExecute(ctx context.Context) error {
|
||||
dit.Base.MsgType = commonpb.MsgType_DescribeIndex
|
||||
dit.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
dit.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if err := validateCollectionName(dit.CollectionName); err != nil {
|
||||
return err
|
||||
|
@ -2025,7 +2025,7 @@ func (dit *dropIndexTask) OnEnqueue() error {
|
|||
|
||||
func (dit *dropIndexTask) PreExecute(ctx context.Context) error {
|
||||
dit.Base.MsgType = commonpb.MsgType_DropIndex
|
||||
dit.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
dit.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName, fieldName := dit.CollectionName, dit.FieldName
|
||||
|
||||
|
@ -2114,7 +2114,7 @@ func (gibpt *getIndexBuildProgressTask) OnEnqueue() error {
|
|||
|
||||
func (gibpt *getIndexBuildProgressTask) PreExecute(ctx context.Context) error {
|
||||
gibpt.Base.MsgType = commonpb.MsgType_GetIndexBuildProgress
|
||||
gibpt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
gibpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if err := validateCollectionName(gibpt.CollectionName); err != nil {
|
||||
return err
|
||||
|
@ -2136,7 +2136,7 @@ func (gibpt *getIndexBuildProgressTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
MsgID: gibpt.Base.MsgID,
|
||||
Timestamp: gibpt.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: gibpt.DbName,
|
||||
CollectionName: collectionName,
|
||||
|
@ -2156,7 +2156,7 @@ func (gibpt *getIndexBuildProgressTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_DescribeIndex,
|
||||
MsgID: gibpt.Base.MsgID,
|
||||
Timestamp: gibpt.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: gibpt.DbName,
|
||||
CollectionName: gibpt.CollectionName,
|
||||
|
@ -2188,7 +2188,7 @@ func (gibpt *getIndexBuildProgressTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_ShowSegments,
|
||||
MsgID: gibpt.Base.MsgID,
|
||||
Timestamp: gibpt.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
|
@ -2214,7 +2214,7 @@ func (gibpt *getIndexBuildProgressTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_DescribeSegment,
|
||||
MsgID: gibpt.Base.MsgID,
|
||||
Timestamp: gibpt.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
SegmentID: segmentID,
|
||||
|
@ -2254,7 +2254,7 @@ func (gibpt *getIndexBuildProgressTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_SegmentInfo,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
SegmentIDs: allSegmentIDs,
|
||||
})
|
||||
|
@ -2338,7 +2338,7 @@ func (gist *getIndexStateTask) OnEnqueue() error {
|
|||
|
||||
func (gist *getIndexStateTask) PreExecute(ctx context.Context) error {
|
||||
gist.Base.MsgType = commonpb.MsgType_GetIndexState
|
||||
gist.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
gist.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
if err := validateCollectionName(gist.CollectionName); err != nil {
|
||||
return err
|
||||
|
@ -2361,7 +2361,7 @@ func (gist *getIndexStateTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_ShowPartitions,
|
||||
MsgID: gist.Base.MsgID,
|
||||
Timestamp: gist.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: gist.DbName,
|
||||
CollectionName: collectionName,
|
||||
|
@ -2382,7 +2382,7 @@ func (gist *getIndexStateTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_DescribeIndex,
|
||||
MsgID: gist.Base.MsgID,
|
||||
Timestamp: gist.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: gist.DbName,
|
||||
CollectionName: gist.CollectionName,
|
||||
|
@ -2416,7 +2416,7 @@ func (gist *getIndexStateTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_ShowSegments,
|
||||
MsgID: gist.Base.MsgID,
|
||||
Timestamp: gist.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
|
@ -2442,7 +2442,7 @@ func (gist *getIndexStateTask) Execute(ctx context.Context) error {
|
|||
MsgType: commonpb.MsgType_DescribeSegment,
|
||||
MsgID: gist.Base.MsgID,
|
||||
Timestamp: gist.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
SegmentID: segmentID,
|
||||
|
@ -2550,7 +2550,7 @@ func (ft *flushTask) OnEnqueue() error {
|
|||
|
||||
func (ft *flushTask) PreExecute(ctx context.Context) error {
|
||||
ft.Base.MsgType = commonpb.MsgType_Flush
|
||||
ft.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
ft.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2645,7 +2645,7 @@ func (lct *loadCollectionTask) OnEnqueue() error {
|
|||
func (lct *loadCollectionTask) PreExecute(ctx context.Context) error {
|
||||
log.Debug("loadCollectionTask PreExecute", zap.String("role", typeutil.ProxyRole), zap.Int64("msgID", lct.Base.MsgID))
|
||||
lct.Base.MsgType = commonpb.MsgType_LoadCollection
|
||||
lct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
lct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName := lct.CollectionName
|
||||
|
||||
|
@ -2746,7 +2746,7 @@ func (rct *releaseCollectionTask) OnEnqueue() error {
|
|||
|
||||
func (rct *releaseCollectionTask) PreExecute(ctx context.Context) error {
|
||||
rct.Base.MsgType = commonpb.MsgType_ReleaseCollection
|
||||
rct.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
rct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName := rct.CollectionName
|
||||
|
||||
|
@ -2834,7 +2834,7 @@ func (lpt *loadPartitionsTask) OnEnqueue() error {
|
|||
|
||||
func (lpt *loadPartitionsTask) PreExecute(ctx context.Context) error {
|
||||
lpt.Base.MsgType = commonpb.MsgType_LoadPartitions
|
||||
lpt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
lpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName := lpt.CollectionName
|
||||
|
||||
|
@ -2933,7 +2933,7 @@ func (rpt *releasePartitionsTask) OnEnqueue() error {
|
|||
|
||||
func (rpt *releasePartitionsTask) PreExecute(ctx context.Context) error {
|
||||
rpt.Base.MsgType = commonpb.MsgType_ReleasePartitions
|
||||
rpt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
rpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collName := rpt.CollectionName
|
||||
|
||||
|
@ -3118,7 +3118,7 @@ func getPrimaryKeysFromExpr(schema *schemapb.CollectionSchema, expr string) (res
|
|||
|
||||
func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
||||
dt.Base.MsgType = commonpb.MsgType_Delete
|
||||
dt.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
dt.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
dt.result = &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -3342,7 +3342,7 @@ func (c *CreateAliasTask) OnEnqueue() error {
|
|||
// PreExecute defines the action before task execution
|
||||
func (c *CreateAliasTask) PreExecute(ctx context.Context) error {
|
||||
c.Base.MsgType = commonpb.MsgType_CreateAlias
|
||||
c.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
c.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collAlias := c.Alias
|
||||
// collection alias uses the same format as collection name
|
||||
|
@ -3421,7 +3421,7 @@ func (d *DropAliasTask) OnEnqueue() error {
|
|||
|
||||
func (d *DropAliasTask) PreExecute(ctx context.Context) error {
|
||||
d.Base.MsgType = commonpb.MsgType_DropAlias
|
||||
d.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
d.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
collAlias := d.Alias
|
||||
if err := ValidateCollectionAlias(collAlias); err != nil {
|
||||
return err
|
||||
|
@ -3487,7 +3487,7 @@ func (a *AlterAliasTask) OnEnqueue() error {
|
|||
|
||||
func (a *AlterAliasTask) PreExecute(ctx context.Context) error {
|
||||
a.Base.MsgType = commonpb.MsgType_AlterAlias
|
||||
a.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
a.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collAlias := a.Alias
|
||||
// collection alias uses the same format as collection name
|
||||
|
|
|
@ -61,7 +61,7 @@ func (t *queryTask) PreExecute(ctx context.Context) error {
|
|||
}
|
||||
|
||||
t.Base.MsgType = commonpb.MsgType_Retrieve
|
||||
t.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collectionName := t.request.CollectionName
|
||||
t.collectionName = collectionName
|
||||
|
@ -385,7 +385,7 @@ func (t *queryTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []Un
|
|||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: t.Base.MsgID,
|
||||
Timestamp: t.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: searchPartitionIDs,
|
||||
|
@ -418,7 +418,7 @@ func (t *queryTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []Un
|
|||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: t.Base.MsgID,
|
||||
Timestamp: t.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -449,7 +449,7 @@ func (t *queryTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []Un
|
|||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: t.Base.MsgID,
|
||||
Timestamp: t.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
})
|
||||
|
|
|
@ -91,7 +91,7 @@ func TestQueryTask_all(t *testing.T) {
|
|||
status, err := qc.LoadCollection(ctx, &querypb.LoadCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_LoadCollection,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
})
|
||||
|
@ -104,7 +104,7 @@ func TestQueryTask_all(t *testing.T) {
|
|||
RetrieveRequest: &internalpb.RetrieveRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Retrieve,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
OutputFieldsId: make([]int64, len(fieldName2Types)),
|
||||
|
@ -118,7 +118,7 @@ func TestQueryTask_all(t *testing.T) {
|
|||
request: &milvuspb.QueryRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Retrieve,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionName: collectionName,
|
||||
Expr: expr,
|
||||
|
|
|
@ -65,7 +65,7 @@ func (t *searchTask) PreExecute(ctx context.Context) error {
|
|||
|
||||
defer sp.Finish()
|
||||
t.Base.MsgType = commonpb.MsgType_Search
|
||||
t.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
collectionName := t.request.CollectionName
|
||||
if err := validateCollectionName(collectionName); err != nil {
|
||||
|
@ -338,7 +338,7 @@ func (t *searchTask) PostExecute(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metrics.ProxyDecodeSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
|
||||
metrics.ProxyDecodeSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
|
||||
log.Debug("proxy search post execute stage 2", zap.Any("len(validSearchResults)", len(validSearchResults)))
|
||||
if len(validSearchResults) <= 0 {
|
||||
log.Warn("search result is empty", zap.Any("requestID", t.Base.MsgID), zap.String("requestType", "search"))
|
||||
|
@ -365,7 +365,7 @@ func (t *searchTask) PostExecute(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metrics.ProxyReduceSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10), metrics.SuccessLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
|
||||
metrics.ProxyReduceSearchResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.SuccessLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
|
||||
t.result.CollectionName = t.collectionName
|
||||
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, t.request.CollectionName)
|
||||
|
@ -428,7 +428,7 @@ func (t *searchTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []U
|
|||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: t.Base.MsgID,
|
||||
Timestamp: t.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: searchPartitionIDs,
|
||||
|
@ -461,7 +461,7 @@ func (t *searchTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []U
|
|||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: t.Base.MsgID,
|
||||
Timestamp: t.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -492,7 +492,7 @@ func (t *searchTask) checkIfLoaded(collectionID UniqueID, searchPartitionIDs []U
|
|||
MsgType: commonpb.MsgType_ShowCollections,
|
||||
MsgID: t.Base.MsgID,
|
||||
Timestamp: t.Base.Timestamp,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
})
|
||||
|
@ -737,7 +737,7 @@ func (t *searchTask) SetTs(ts Timestamp) {
|
|||
func (t *searchTask) OnEnqueue() error {
|
||||
t.Base = &commonpb.MsgBase{}
|
||||
t.Base.MsgType = commonpb.MsgType_Search
|
||||
t.Base.SourceID = Params.ProxyCfg.ProxyID
|
||||
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestSearchTask_PostExecute(t *testing.T) {
|
|||
SearchRequest: &internalpb.SearchRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Search,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
},
|
||||
request: nil,
|
||||
|
@ -576,7 +576,7 @@ func TestSearchTaskWithInvalidRoundDecimal(t *testing.T) {
|
|||
// MsgType: commonpb.MsgType_LoadCollection,
|
||||
// MsgID: 0,
|
||||
// Timestamp: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// DbID: 0,
|
||||
// CollectionID: collectionID,
|
||||
|
@ -597,9 +597,9 @@ func TestSearchTaskWithInvalidRoundDecimal(t *testing.T) {
|
|||
// MsgType: commonpb.MsgType_Search,
|
||||
// MsgID: 0,
|
||||
// Timestamp: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
|
||||
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
|
||||
// DbID: 0,
|
||||
// CollectionID: 0,
|
||||
// PartitionIDs: nil,
|
||||
|
@ -820,7 +820,7 @@ func TestSearchTaskV2_all(t *testing.T) {
|
|||
// MsgType: commonpb.MsgType_LoadCollection,
|
||||
// MsgID: 0,
|
||||
// Timestamp: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// DbID: 0,
|
||||
// CollectionID: collectionID,
|
||||
|
@ -841,9 +841,9 @@ func TestSearchTaskV2_all(t *testing.T) {
|
|||
// MsgType: commonpb.MsgType_Search,
|
||||
// MsgID: 0,
|
||||
// Timestamp: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
|
||||
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
|
||||
// DbID: 0,
|
||||
// CollectionID: 0,
|
||||
// PartitionIDs: nil,
|
||||
|
@ -1058,7 +1058,7 @@ func TestSearchTaskV2_7803_reduce(t *testing.T) {
|
|||
// MsgType: commonpb.MsgType_LoadCollection,
|
||||
// MsgID: 0,
|
||||
// Timestamp: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// DbID: 0,
|
||||
// CollectionID: collectionID,
|
||||
|
@ -1079,9 +1079,9 @@ func TestSearchTaskV2_7803_reduce(t *testing.T) {
|
|||
// MsgType: commonpb.MsgType_Search,
|
||||
// MsgID: 0,
|
||||
// Timestamp: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.ProxyID, 10),
|
||||
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
|
||||
// DbID: 0,
|
||||
// CollectionID: 0,
|
||||
// PartitionIDs: nil,
|
||||
|
|
|
@ -393,7 +393,7 @@ func TestInsertTask_checkLengthOfFieldsData(t *testing.T) {
|
|||
// Base: &commonpb.MsgBase{
|
||||
// MsgType: commonpb.MsgType_Insert,
|
||||
// MsgID: 0,
|
||||
// SourceID: Params.ProxyCfg.ProxyID,
|
||||
// SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
@ -1127,7 +1127,7 @@ func TestDropCollectionTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
// missing collectionID in globalMetaCache
|
||||
err = task.Execute(ctx)
|
||||
assert.NotNil(t, err)
|
||||
|
@ -1204,7 +1204,7 @@ func TestHasCollectionTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
// missing collectionID in globalMetaCache
|
||||
err = task.Execute(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
@ -1265,7 +1265,7 @@ func TestDescribeCollectionTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
// missing collectionID in globalMetaCache
|
||||
err := task.Execute(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
@ -1447,7 +1447,7 @@ func TestCreatePartitionTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
err := task.Execute(ctx)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
|
@ -1494,7 +1494,7 @@ func TestDropPartitionTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
err := task.Execute(ctx)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
|
@ -1541,7 +1541,7 @@ func TestHasPartitionTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
err := task.Execute(ctx)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
|
@ -1589,7 +1589,7 @@ func TestShowPartitionsTask(t *testing.T) {
|
|||
assert.Equal(t, UniqueID(100), task.ID())
|
||||
assert.Equal(t, Timestamp(100), task.BeginTs())
|
||||
assert.Equal(t, Timestamp(100), task.EndTs())
|
||||
assert.Equal(t, Params.ProxyCfg.ProxyID, task.GetBase().GetSourceID())
|
||||
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
|
||||
err := task.Execute(ctx)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
|
@ -1673,7 +1673,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
MsgType: commonpb.MsgType_CreatePartition,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
|
@ -1703,7 +1703,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
_ = ticker.start()
|
||||
defer ticker.close()
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.ProxyID)
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.GetNodeID())
|
||||
assert.NoError(t, err)
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
@ -1725,7 +1725,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
|
@ -1782,7 +1782,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
|
|||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
|
@ -1893,7 +1893,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
MsgType: commonpb.MsgType_CreatePartition,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
|
@ -1923,7 +1923,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
_ = ticker.start()
|
||||
defer ticker.close()
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.ProxyID)
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.GetNodeID())
|
||||
assert.NoError(t, err)
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
@ -1945,7 +1945,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
|
@ -2002,7 +2002,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
|
|||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
SourceID: Params.ProxyCfg.GetNodeID(),
|
||||
},
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
|
|
|
@ -61,7 +61,7 @@ func (ta *timestampAllocator) alloc(count uint32) ([]Timestamp, error) {
|
|||
resp, err := ta.tso.AllocTimestamp(ctx, req)
|
||||
defer func() {
|
||||
cancel()
|
||||
metrics.ProxyApplyTimestampLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.ProxyID, 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.ProxyApplyTimestampLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -948,7 +948,7 @@ func (qc *QueryCoord) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
|
|||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
},
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.QueryCoordID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.GetNodeID()),
|
||||
}
|
||||
|
||||
if qc.stateCode.Load() != internalpb.StateCode_Healthy {
|
||||
|
|
|
@ -107,9 +107,11 @@ type Meta interface {
|
|||
|
||||
// MetaReplica records the current load information on all querynodes
|
||||
type MetaReplica struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
client kv.MetaKv // client of a reliable kv service, i.e. etcd client
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
client kv.MetaKv // client of a reliable kv service, i.e. etcd client
|
||||
//DDL lock
|
||||
clientMutex sync.Mutex
|
||||
factory dependency.Factory
|
||||
idAllocator func() (UniqueID, error)
|
||||
|
||||
|
@ -141,7 +143,6 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAl
|
|||
m := &MetaReplica{
|
||||
ctx: childCtx,
|
||||
cancel: cancel,
|
||||
client: kv,
|
||||
factory: factory,
|
||||
idAllocator: idAllocator,
|
||||
|
||||
|
@ -154,6 +155,7 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAl
|
|||
segmentsInfo: newSegmentsInfo(kv),
|
||||
replicas: NewReplicaInfos(),
|
||||
}
|
||||
m.setKvClient(kv)
|
||||
|
||||
err := m.reloadFromKV()
|
||||
if err != nil {
|
||||
|
@ -167,7 +169,7 @@ func (m *MetaReplica) reloadFromKV() error {
|
|||
log.Debug("start reload from kv")
|
||||
|
||||
log.Info("recovery collections...")
|
||||
collectionKeys, collectionValues, err := m.client.LoadWithPrefix(collectionMetaPrefix)
|
||||
collectionKeys, collectionValues, err := m.getKvClient().LoadWithPrefix(collectionMetaPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -192,7 +194,7 @@ func (m *MetaReplica) reloadFromKV() error {
|
|||
return err
|
||||
}
|
||||
|
||||
deltaChannelKeys, deltaChannelValues, err := m.client.LoadWithPrefix(deltaChannelMetaPrefix)
|
||||
deltaChannelKeys, deltaChannelValues, err := m.getKvClient().LoadWithPrefix(deltaChannelMetaPrefix)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -210,7 +212,7 @@ func (m *MetaReplica) reloadFromKV() error {
|
|||
m.deltaChannelInfos[collectionID] = append(m.deltaChannelInfos[collectionID], deltaChannelInfo)
|
||||
}
|
||||
|
||||
dmChannelKeys, dmChannelValues, err := m.client.LoadWithPrefix(dmChannelMetaPrefix)
|
||||
dmChannelKeys, dmChannelValues, err := m.getKvClient().LoadWithPrefix(dmChannelMetaPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -272,7 +274,7 @@ func (m *MetaReplica) reloadFromKV() error {
|
|||
}
|
||||
}
|
||||
|
||||
replicaKeys, replicaValues, err := m.client.LoadWithPrefix(ReplicaMetaPrefix)
|
||||
replicaKeys, replicaValues, err := m.getKvClient().LoadWithPrefix(ReplicaMetaPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -339,9 +341,17 @@ func reloadShardLeaderAddress(meta Meta, cluster Cluster) error {
|
|||
}
|
||||
|
||||
func (m *MetaReplica) setKvClient(kv kv.MetaKv) {
|
||||
m.clientMutex.Lock()
|
||||
defer m.clientMutex.Unlock()
|
||||
m.client = kv
|
||||
}
|
||||
|
||||
func (m *MetaReplica) getKvClient() kv.MetaKv {
|
||||
m.clientMutex.Lock()
|
||||
defer m.clientMutex.Unlock()
|
||||
return m.client
|
||||
}
|
||||
|
||||
func (m *MetaReplica) showCollections() []*querypb.CollectionInfo {
|
||||
m.collectionMu.RLock()
|
||||
defer m.collectionMu.RUnlock()
|
||||
|
@ -424,7 +434,7 @@ func (m *MetaReplica) addCollection(collectionID UniqueID, loadType querypb.Load
|
|||
ReplicaIds: make([]int64, 0),
|
||||
ReplicaNumber: 0,
|
||||
}
|
||||
err := saveGlobalCollectionInfo(collectionID, newCollection, m.client)
|
||||
err := saveGlobalCollectionInfo(collectionID, newCollection, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
|
@ -475,7 +485,7 @@ func (m *MetaReplica) addPartitions(collectionID UniqueID, partitionIDs []Unique
|
|||
collectionInfo.ReleasedPartitionIDs = newReleasedPartitionIDs
|
||||
|
||||
log.Debug("add a partition to MetaReplica", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", collectionInfo.PartitionIDs))
|
||||
err := saveGlobalCollectionInfo(collectionID, collectionInfo, m.client)
|
||||
err := saveGlobalCollectionInfo(collectionID, collectionInfo, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", collectionInfo.PartitionIDs), zap.Any("error", err.Error()))
|
||||
return err
|
||||
|
@ -494,7 +504,7 @@ func (m *MetaReplica) releaseCollection(collectionID UniqueID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
err = removeCollectionMeta(collectionID, collection.ReplicaIds, m.client)
|
||||
err = removeCollectionMeta(collectionID, collection.ReplicaIds, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Warn("remove collectionInfo from etcd failed", zap.Int64("collectionID", collectionID), zap.Any("error", err.Error()))
|
||||
return err
|
||||
|
@ -555,7 +565,7 @@ func (m *MetaReplica) releasePartitions(collectionID UniqueID, releasedPartition
|
|||
collectionInfo.PartitionStates = newPartitionStates
|
||||
collectionInfo.ReleasedPartitionIDs = newReleasedPartitionIDs
|
||||
|
||||
err := saveGlobalCollectionInfo(collectionID, collectionInfo, m.client)
|
||||
err := saveGlobalCollectionInfo(collectionID, collectionInfo, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("releasePartition: remove partition infos error", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", releasedPartitionIDs), zap.Any("error", err.Error()))
|
||||
return err
|
||||
|
@ -679,7 +689,7 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
|
|||
saveKvs[changeInfoKey] = string(changeInfoBytes)
|
||||
}
|
||||
|
||||
err := m.client.MultiSave(saveKvs)
|
||||
err := m.getKvClient().MultiSave(saveKvs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -760,7 +770,7 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
|
|||
changeInfoKey := fmt.Sprintf("%s/%d", util.ChangeInfoMetaPrefix, segmentChangeInfos.Base.MsgID)
|
||||
saveKvs[changeInfoKey] = string(changeInfoBytes)
|
||||
|
||||
err = m.client.MultiSave(saveKvs)
|
||||
err = m.getKvClient().MultiSave(saveKvs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -914,7 +924,7 @@ func (m *MetaReplica) setDmChannelInfos(dmChannelWatchInfos []*querypb.DmChannel
|
|||
m.dmChannelMu.Lock()
|
||||
defer m.dmChannelMu.Unlock()
|
||||
|
||||
err := saveDmChannelWatchInfos(dmChannelWatchInfos, m.client)
|
||||
err := saveDmChannelWatchInfos(dmChannelWatchInfos, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save dmChannelWatchInfo error", zap.Any("error", err.Error()))
|
||||
return err
|
||||
|
@ -972,7 +982,7 @@ func (m *MetaReplica) setDeltaChannel(collectionID UniqueID, infos []*datapb.Vch
|
|||
return nil
|
||||
}
|
||||
|
||||
err := saveDeltaChannelInfo(collectionID, infos, m.client)
|
||||
err := saveDeltaChannelInfo(collectionID, infos, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save delta channel info error", zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||
return err
|
||||
|
@ -1028,7 +1038,7 @@ func (m *MetaReplica) setLoadType(collectionID UniqueID, loadType querypb.LoadTy
|
|||
if _, ok := m.collectionInfos[collectionID]; ok {
|
||||
info := proto.Clone(m.collectionInfos[collectionID]).(*querypb.CollectionInfo)
|
||||
info.LoadType = loadType
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
|
@ -1060,7 +1070,7 @@ func (m *MetaReplica) setLoadPercentage(collectionID UniqueID, partitionID Uniqu
|
|||
}
|
||||
partitionState.InMemoryPercentage = percentage
|
||||
}
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
|
@ -1085,7 +1095,7 @@ func (m *MetaReplica) setLoadPercentage(collectionID UniqueID, partitionID Uniqu
|
|||
}
|
||||
|
||||
info.InMemoryPercentage /= int64(len(info.PartitionIDs))
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.client)
|
||||
err := saveGlobalCollectionInfo(collectionID, info, m.getKvClient())
|
||||
if err != nil {
|
||||
log.Error("save collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
|
||||
return err
|
||||
|
@ -1189,7 +1199,7 @@ func (m *MetaReplica) addReplica(replica *milvuspb.ReplicaInfo) error {
|
|||
collectionInfo.ReplicaIds = append(collectionInfo.ReplicaIds, replica.ReplicaID)
|
||||
collectionInfo.ReplicaNumber++
|
||||
|
||||
err = saveGlobalCollectionInfo(collectionInfo.CollectionID, collectionInfo, m.client)
|
||||
err = saveGlobalCollectionInfo(collectionInfo.CollectionID, collectionInfo, m.getKvClient())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1198,7 +1208,7 @@ func (m *MetaReplica) addReplica(replica *milvuspb.ReplicaInfo) error {
|
|||
m.collectionInfos[collectionInfo.CollectionID] = collectionInfo
|
||||
m.collectionMu.Unlock()
|
||||
|
||||
err = saveReplicaInfo(replica, m.client)
|
||||
err = saveReplicaInfo(replica, m.getKvClient())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1208,7 +1218,7 @@ func (m *MetaReplica) addReplica(replica *milvuspb.ReplicaInfo) error {
|
|||
}
|
||||
|
||||
func (m *MetaReplica) setReplicaInfo(info *milvuspb.ReplicaInfo) error {
|
||||
err := saveReplicaInfo(info, m.client)
|
||||
err := saveReplicaInfo(info, m.getKvClient())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -117,13 +117,12 @@ func TestMetaFunc(t *testing.T) {
|
|||
NodeID: nodeID,
|
||||
}
|
||||
meta := &MetaReplica{
|
||||
client: kv,
|
||||
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
|
||||
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
|
||||
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
|
||||
segmentsInfo: segmentsInfo,
|
||||
}
|
||||
|
||||
meta.setKvClient(kv)
|
||||
dmChannels := []string{"testDm1", "testDm2"}
|
||||
|
||||
t.Run("Test ShowPartitionFail", func(t *testing.T) {
|
||||
|
@ -307,7 +306,6 @@ func TestReloadMetaFromKV(t *testing.T) {
|
|||
return newID, nil
|
||||
}
|
||||
meta := &MetaReplica{
|
||||
client: kv,
|
||||
idAllocator: idAllocator,
|
||||
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
|
||||
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
|
||||
|
@ -316,6 +314,7 @@ func TestReloadMetaFromKV(t *testing.T) {
|
|||
segmentsInfo: newSegmentsInfo(kv),
|
||||
replicas: NewReplicaInfos(),
|
||||
}
|
||||
meta.setKvClient(kv)
|
||||
|
||||
kvs := make(map[string]string)
|
||||
collectionInfo := &querypb.CollectionInfo{
|
||||
|
@ -407,12 +406,12 @@ func TestCreateQueryChannel(t *testing.T) {
|
|||
}
|
||||
|
||||
m := &MetaReplica{
|
||||
client: kv,
|
||||
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
|
||||
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
|
||||
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
|
||||
segmentsInfo: segmentsInfo,
|
||||
}
|
||||
m.setKvClient(kv)
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
info := m.createQueryChannel(test.inID)
|
||||
|
|
|
@ -41,7 +41,7 @@ func getSystemInfoMetrics(
|
|||
clusterTopology := metricsinfo.QueryClusterTopology{
|
||||
Self: metricsinfo.QueryCoordInfos{
|
||||
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.QueryCoordID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.GetNodeID()),
|
||||
HardwareInfos: metricsinfo.HardwareMetrics{
|
||||
IP: qc.session.Address,
|
||||
CPUCoreCount: metricsinfo.GetCPUCoreCount(false),
|
||||
|
@ -119,7 +119,7 @@ func getSystemInfoMetrics(
|
|||
coordTopology := metricsinfo.QueryCoordTopology{
|
||||
Cluster: clusterTopology,
|
||||
Connections: metricsinfo.ConnTopology{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.QueryCoordID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.GetNodeID()),
|
||||
// TODO(dragondriver): fill ConnectedComponents if necessary
|
||||
ConnectedComponents: []metricsinfo.ConnectionInfo{},
|
||||
},
|
||||
|
|
|
@ -129,7 +129,7 @@ func (qc *QueryCoord) initSession() error {
|
|||
return fmt.Errorf("session is nil, the etcd client connection may have failed")
|
||||
}
|
||||
qc.session.Init(typeutil.QueryCoordRole, Params.QueryCoordCfg.Address, true, true)
|
||||
Params.QueryCoordCfg.NodeID = uint64(qc.session.ServerID)
|
||||
Params.QueryCoordCfg.SetNodeID(qc.session.ServerID)
|
||||
Params.SetLogger(qc.session.ServerID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ OUTER:
|
|||
c.vChannels = append(c.vChannels, dstChan)
|
||||
}
|
||||
|
||||
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Add(float64(len(c.vChannels)))
|
||||
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Add(float64(len(c.vChannels)))
|
||||
}
|
||||
|
||||
// getVChannels get virtual channels of collection
|
||||
|
@ -147,7 +147,7 @@ func (c *Collection) removeVChannel(channel Channel) {
|
|||
zap.String("channel", channel),
|
||||
)
|
||||
|
||||
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Sub(float64(len(c.vChannels)))
|
||||
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(len(c.vChannels)))
|
||||
}
|
||||
|
||||
// addPChannels add physical channels to physical channels of collection
|
||||
|
@ -244,7 +244,7 @@ OUTER:
|
|||
c.vDeltaChannels = append(c.vDeltaChannels, dstChan)
|
||||
}
|
||||
|
||||
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Add(float64(len(c.vDeltaChannels)))
|
||||
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Add(float64(len(c.vDeltaChannels)))
|
||||
}
|
||||
|
||||
func (c *Collection) removeVDeltaChannel(channel Channel) {
|
||||
|
@ -262,7 +262,7 @@ func (c *Collection) removeVDeltaChannel(channel Channel) {
|
|||
zap.String("channel", channel),
|
||||
)
|
||||
|
||||
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Sub(float64(len(c.vDeltaChannels)))
|
||||
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(len(c.vDeltaChannels)))
|
||||
}
|
||||
|
||||
// setReleaseTime records when collection is released
|
||||
|
|
|
@ -212,7 +212,7 @@ func (colReplica *collectionReplica) addCollection(collectionID UniqueID, schema
|
|||
colReplica.collections[collectionID] = newCollection
|
||||
log.Debug("Successfully add collection ", zap.Int64("collectionID", collectionID))
|
||||
|
||||
metrics.QueryNodeNumCollections.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Set(float64(len(colReplica.collections)))
|
||||
metrics.QueryNodeNumCollections.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Set(float64(len(colReplica.collections)))
|
||||
return newCollection
|
||||
}
|
||||
|
||||
|
@ -241,8 +241,8 @@ func (colReplica *collectionReplica) removeCollectionPrivate(collectionID Unique
|
|||
deleteCollection(collection)
|
||||
delete(colReplica.collections, collectionID)
|
||||
|
||||
metrics.QueryNodeNumCollections.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Set(float64(len(colReplica.collections)))
|
||||
metrics.QueryNodeNumPartitions.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Sub(float64(len(collection.partitionIDs)))
|
||||
metrics.QueryNodeNumCollections.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Set(float64(len(colReplica.collections)))
|
||||
metrics.QueryNodeNumPartitions.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(len(collection.partitionIDs)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -417,7 +417,7 @@ func (colReplica *collectionReplica) addPartitionPrivate(collectionID UniqueID,
|
|||
colReplica.partitions[partitionID] = newPartition
|
||||
}
|
||||
|
||||
metrics.QueryNodeNumPartitions.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Set(float64(len(colReplica.partitions)))
|
||||
metrics.QueryNodeNumPartitions.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Set(float64(len(colReplica.partitions)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -451,7 +451,7 @@ func (colReplica *collectionReplica) removePartitionPrivate(partitionID UniqueID
|
|||
collection.removePartitionID(partitionID)
|
||||
delete(colReplica.partitions, partitionID)
|
||||
|
||||
metrics.QueryNodeNumPartitions.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Set(float64(len(colReplica.partitions)))
|
||||
metrics.QueryNodeNumPartitions.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Set(float64(len(colReplica.partitions)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -559,7 +559,7 @@ func (colReplica *collectionReplica) addSegmentPrivate(segmentID UniqueID, parti
|
|||
partition.addSegmentID(segmentID)
|
||||
colReplica.segments[segmentID] = segment
|
||||
|
||||
metrics.QueryNodeNumSegments.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumSegments.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -597,7 +597,7 @@ func (colReplica *collectionReplica) removeSegmentPrivate(segmentID UniqueID) er
|
|||
delete(colReplica.segments, segmentID)
|
||||
deleteSegment(segment)
|
||||
|
||||
metrics.QueryNodeNumSegments.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Dec()
|
||||
metrics.QueryNodeNumSegments.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Dec()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -750,7 +750,7 @@ func (colReplica *collectionReplica) getSegmentInfo(segment *Segment) *querypb.S
|
|||
SegmentID: segment.ID(),
|
||||
CollectionID: segment.collectionID,
|
||||
PartitionID: segment.partitionID,
|
||||
NodeID: Params.QueryNodeCfg.QueryNodeID,
|
||||
NodeID: Params.QueryNodeCfg.GetNodeID(),
|
||||
MemSize: segment.getMemSize(),
|
||||
NumRows: segment.getRowCount(),
|
||||
IndexName: indexName,
|
||||
|
|
|
@ -76,7 +76,7 @@ func (dsService *dataSyncService) addFlowGraphsForDMLChannels(collectionID Uniqu
|
|||
log.Debug("add DML flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", channel))
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
}
|
||||
|
||||
return results, nil
|
||||
|
@ -116,7 +116,7 @@ func (dsService *dataSyncService) addFlowGraphsForDeltaChannels(collectionID Uni
|
|||
log.Debug("add delta flow graph",
|
||||
zap.Any("collectionID", collectionID),
|
||||
zap.Any("channel", channel))
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
}
|
||||
|
||||
return results, nil
|
||||
|
@ -189,7 +189,7 @@ func (dsService *dataSyncService) removeFlowGraphsByDMLChannels(channels []Chann
|
|||
if _, ok := dsService.dmlChannel2FlowGraph[channel]; ok {
|
||||
// close flow graph
|
||||
dsService.dmlChannel2FlowGraph[channel].close()
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Dec()
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Dec()
|
||||
}
|
||||
delete(dsService.dmlChannel2FlowGraph, channel)
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ func (dsService *dataSyncService) removeFlowGraphsByDeltaChannels(channels []Cha
|
|||
if _, ok := dsService.deltaChannel2FlowGraph[channel]; ok {
|
||||
// close flow graph
|
||||
dsService.deltaChannel2FlowGraph[channel].close()
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Dec()
|
||||
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Dec()
|
||||
}
|
||||
delete(dsService.deltaChannel2FlowGraph, channel)
|
||||
}
|
||||
|
|
|
@ -210,7 +210,7 @@ func (q *queryNodeFlowGraph) consumeFlowGraph(channel Channel, subName ConsumeSu
|
|||
zap.Any("subName", subName),
|
||||
)
|
||||
q.consumerCnt++
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ func (q *queryNodeFlowGraph) consumeFlowGraphFromLatest(channel Channel, subName
|
|||
zap.Any("subName", subName),
|
||||
)
|
||||
q.consumerCnt++
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,7 @@ func (q *queryNodeFlowGraph) seekQueryNodeFlowGraph(position *internalpb.MsgPosi
|
|||
zap.Any("channel", position.ChannelName),
|
||||
)
|
||||
q.consumerCnt++
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,7 @@ func (q *queryNodeFlowGraph) close() {
|
|||
q.cancel()
|
||||
q.flowGraph.Close()
|
||||
if q.dmlStream != nil && q.consumerCnt > 0 {
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Sub(float64(q.consumerCnt))
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(q.consumerCnt))
|
||||
}
|
||||
log.Debug("stop query node flow graph",
|
||||
zap.Any("collectionID", q.collectionID),
|
||||
|
|
|
@ -214,7 +214,7 @@ func (h *historical) searchSegments(segIDs []UniqueID, searchReqs []*searchReque
|
|||
// update metrics
|
||||
metrics.QueryNodeSQSegmentLatency.WithLabelValues(metrics.SearchLabel,
|
||||
metrics.SealedSegmentLabel,
|
||||
fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
// write back result into list
|
||||
lock.Lock()
|
||||
|
|
|
@ -91,7 +91,7 @@ func (node *QueryNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.Stri
|
|||
func (node *QueryNode) AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -201,7 +201,7 @@ func (node *QueryNode) RemoveQueryChannel(ctx context.Context, in *queryPb.Remov
|
|||
func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -226,7 +226,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmC
|
|||
log.Error(err.Error())
|
||||
return status, nil
|
||||
}
|
||||
log.Debug("watchDmChannelsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID), zap.Int64("replicaID", in.GetReplicaID()))
|
||||
log.Debug("watchDmChannelsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()), zap.Int64("replicaID", in.GetReplicaID()))
|
||||
|
||||
waitFunc := func() (*commonpb.Status, error) {
|
||||
err = dct.WaitToFinish()
|
||||
|
@ -238,7 +238,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmC
|
|||
log.Error(err.Error())
|
||||
return status, nil
|
||||
}
|
||||
log.Debug("watchDmChannelsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID))
|
||||
log.Debug("watchDmChannelsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
}, nil
|
||||
|
@ -251,7 +251,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmC
|
|||
func (node *QueryNode) WatchDeltaChannels(ctx context.Context, in *queryPb.WatchDeltaChannelsRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -276,7 +276,7 @@ func (node *QueryNode) WatchDeltaChannels(ctx context.Context, in *queryPb.Watch
|
|||
log.Error(err.Error())
|
||||
return status, nil
|
||||
}
|
||||
log.Debug("watchDeltaChannelsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID))
|
||||
log.Debug("watchDeltaChannelsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
|
||||
|
||||
waitFunc := func() (*commonpb.Status, error) {
|
||||
err = dct.WaitToFinish()
|
||||
|
@ -288,7 +288,7 @@ func (node *QueryNode) WatchDeltaChannels(ctx context.Context, in *queryPb.Watch
|
|||
log.Error(err.Error())
|
||||
return status, nil
|
||||
}
|
||||
log.Debug("watchDeltaChannelsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID))
|
||||
log.Debug("watchDeltaChannelsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
}, nil
|
||||
|
@ -301,7 +301,7 @@ func (node *QueryNode) WatchDeltaChannels(ctx context.Context, in *queryPb.Watch
|
|||
func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegmentsRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -330,7 +330,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
|
|||
for _, info := range in.Infos {
|
||||
segmentIDs = append(segmentIDs, info.SegmentID)
|
||||
}
|
||||
log.Debug("loadSegmentsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID))
|
||||
log.Debug("loadSegmentsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
|
||||
|
||||
waitFunc := func() (*commonpb.Status, error) {
|
||||
err = dct.WaitToFinish()
|
||||
|
@ -342,7 +342,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
|
|||
log.Error(err.Error())
|
||||
return status, nil
|
||||
}
|
||||
log.Debug("loadSegmentsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID))
|
||||
log.Debug("loadSegmentsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
}, nil
|
||||
|
@ -355,7 +355,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
|
|||
func (node *QueryNode) ReleaseCollection(ctx context.Context, in *queryPb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -401,7 +401,7 @@ func (node *QueryNode) ReleaseCollection(ctx context.Context, in *queryPb.Releas
|
|||
func (node *QueryNode) ReleasePartitions(ctx context.Context, in *queryPb.ReleasePartitionsRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -447,7 +447,7 @@ func (node *QueryNode) ReleasePartitions(ctx context.Context, in *queryPb.Releas
|
|||
func (node *QueryNode) ReleaseSegments(ctx context.Context, in *queryPb.ReleaseSegmentsRequest) (*commonpb.Status, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: err.Error(),
|
||||
|
@ -480,7 +480,7 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, in *queryPb.ReleaseS
|
|||
func (node *QueryNode) GetSegmentInfo(ctx context.Context, in *queryPb.GetSegmentInfoRequest) (*queryPb.GetSegmentInfoResponse, error) {
|
||||
code := node.stateCode.Load().(internalpb.StateCode)
|
||||
if code != internalpb.StateCode_Healthy {
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.QueryNodeID)
|
||||
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
|
||||
res := &queryPb.GetSegmentInfoResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
|
@ -560,7 +560,7 @@ func (node *QueryNode) Search(ctx context.Context, req *queryPb.SearchRequest) (
|
|||
return &internalpb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.QueryNodeID),
|
||||
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -619,7 +619,7 @@ func (node *QueryNode) Query(ctx context.Context, req *queryPb.QueryRequest) (*i
|
|||
return &internalpb.RetrieveResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.QueryNodeID),
|
||||
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -676,14 +676,14 @@ func (node *QueryNode) Query(ctx context.Context, req *queryPb.QueryRequest) (*i
|
|||
func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
if !node.isHealthy() {
|
||||
log.Warn("QueryNode.GetMetrics failed",
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.QueryNodeID),
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(errQueryNodeIsUnhealthy(Params.QueryNodeCfg.QueryNodeID)))
|
||||
zap.Error(errQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.QueryNodeID),
|
||||
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
|
||||
},
|
||||
Response: "",
|
||||
}, nil
|
||||
|
@ -692,7 +692,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
|
|||
metricType, err := metricsinfo.ParseMetricType(req.Request)
|
||||
if err != nil {
|
||||
log.Warn("QueryNode.GetMetrics failed to parse metric type",
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.QueryNodeID),
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.Error(err))
|
||||
|
||||
|
@ -709,7 +709,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
|
|||
metrics, err := getSystemInfoMetrics(ctx, req, node)
|
||||
if err != nil {
|
||||
log.Warn("QueryNode.GetMetrics failed",
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.QueryNodeID),
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType),
|
||||
zap.Error(err))
|
||||
|
@ -719,7 +719,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
|
|||
}
|
||||
|
||||
log.Debug("QueryNode.GetMetrics failed, request metric type is not implemented yet",
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.QueryNodeID),
|
||||
zap.Int64("node_id", Params.QueryNodeCfg.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest,
|
|||
totalMem := metricsinfo.GetMemoryCount()
|
||||
nodeInfos := metricsinfo.QueryNodeInfos{
|
||||
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, Params.QueryNodeCfg.QueryNodeID),
|
||||
Name: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, Params.QueryNodeCfg.GetNodeID()),
|
||||
HardwareInfos: metricsinfo.HardwareMetrics{
|
||||
IP: node.session.Address,
|
||||
CPUCoreCount: metricsinfo.GetCPUCoreCount(false),
|
||||
|
@ -68,7 +68,7 @@ func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest,
|
|||
Reason: err.Error(),
|
||||
},
|
||||
Response: "",
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, Params.QueryNodeCfg.QueryNodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, Params.QueryNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -78,6 +78,6 @@ func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest,
|
|||
Reason: "",
|
||||
},
|
||||
Response: resp,
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, Params.QueryNodeCfg.QueryNodeID),
|
||||
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, Params.QueryNodeCfg.GetNodeID()),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -1686,11 +1686,11 @@ func initConsumer(ctx context.Context, queryResultChannel Channel) (msgstream.Ms
|
|||
|
||||
func genSimpleChangeInfo() *querypb.SealedSegmentsChangeInfo {
|
||||
changeInfo := &querypb.SegmentChangeInfo{
|
||||
OnlineNodeID: Params.QueryNodeCfg.QueryNodeID,
|
||||
OnlineNodeID: Params.QueryNodeCfg.GetNodeID(),
|
||||
OnlineSegments: []*querypb.SegmentInfo{
|
||||
genSimpleSegmentInfo(),
|
||||
},
|
||||
OfflineNodeID: Params.QueryNodeCfg.QueryNodeID + 1,
|
||||
OfflineNodeID: Params.QueryNodeCfg.GetNodeID() + 1,
|
||||
OfflineSegments: []*querypb.SegmentInfo{
|
||||
genSimpleSegmentInfo(),
|
||||
},
|
||||
|
|
|
@ -29,7 +29,7 @@ func (qc *queryChannel) AsConsumer(channelName string, subName string, position
|
|||
var err error
|
||||
qc.asConsumeOnce.Do(func() {
|
||||
qc.queryMsgStream.AsConsumer([]string{channelName}, subName)
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
if position == nil || len(position.MsgID) == 0 {
|
||||
log.Debug("QueryNode AsConsumer", zap.String("channel", channelName), zap.String("sub name", subName))
|
||||
} else {
|
||||
|
|
|
@ -334,7 +334,7 @@ func (q *queryCollection) setServiceableTime(t Timestamp) {
|
|||
}
|
||||
q.serviceableTime = t
|
||||
ps, _ := tsoutil.ParseHybridTs(t)
|
||||
metrics.QueryNodeServiceTime.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Set(float64(ps))
|
||||
metrics.QueryNodeServiceTime.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Set(float64(ps))
|
||||
}
|
||||
|
||||
func (q *queryCollection) checkTimeout(msg queryMsg) bool {
|
||||
|
@ -668,11 +668,11 @@ func (q *queryCollection) doUnsolvedQueryMsg() {
|
|||
)
|
||||
switch msgType {
|
||||
case commonpb.MsgType_Retrieve:
|
||||
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID),
|
||||
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
|
||||
metrics.QueryLabel).Observe(float64(m.RecordSpan().Milliseconds()))
|
||||
err = q.retrieve(m)
|
||||
case commonpb.MsgType_Search:
|
||||
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID),
|
||||
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
|
||||
metrics.SearchLabel).Observe(float64(m.RecordSpan().Milliseconds()))
|
||||
err = q.search(m)
|
||||
default:
|
||||
|
@ -980,7 +980,7 @@ func translateHits(schema *typeutil.SchemaHelper, fieldIDs []int64, rawHits [][]
|
|||
}
|
||||
}
|
||||
|
||||
metrics.QueryNodeTranslateHitsLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeTranslateHitsLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return finalResult, nil
|
||||
}
|
||||
|
||||
|
@ -1121,8 +1121,8 @@ func (q *queryCollection) search(msg queryMsg) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.SearchLabel).Observe(float64(msg.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.SearchLabel, metrics.SuccessLabel).Inc()
|
||||
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel).Observe(float64(msg.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.SuccessLabel).Inc()
|
||||
|
||||
tr.Record(fmt.Sprintf("publish empty search result done, msgID = %d", searchMsg.ID()))
|
||||
tr.Elapse(fmt.Sprintf("all done, msgID = %d", searchMsg.ID()))
|
||||
|
@ -1153,7 +1153,7 @@ func (q *queryCollection) search(msg queryMsg) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
|
||||
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
|
||||
|
||||
for i := 0; i < len(reqSlices); i++ {
|
||||
blob, err := getSearchResultDataBlob(blobs, i)
|
||||
|
@ -1206,9 +1206,9 @@ func (q *queryCollection) search(msg queryMsg) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID),
|
||||
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
|
||||
metrics.SearchLabel).Observe(float64(msg.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID),
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
|
||||
metrics.SearchLabel,
|
||||
metrics.SuccessLabel).Inc()
|
||||
tr.Record(fmt.Sprintf("publish search result, msgID = %d", searchMsg.ID()))
|
||||
|
@ -1298,7 +1298,7 @@ func (q *queryCollection) retrieve(msg queryMsg) error {
|
|||
}
|
||||
log.Debug("retrieve result", zap.String("ids", result.Ids.String()))
|
||||
reduceDuration := tr.Record(fmt.Sprintf("merge result done, msgID = %d", retrieveMsg.ID()))
|
||||
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.QueryLabel).Observe(float64(reduceDuration.Milliseconds()))
|
||||
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel).Observe(float64(reduceDuration.Milliseconds()))
|
||||
|
||||
resultChannelInt := 0
|
||||
retrieveResultMsg := &msgstream.RetrieveResultMsg{
|
||||
|
@ -1323,8 +1323,8 @@ func (q *queryCollection) retrieve(msg queryMsg) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.QueryLabel, metrics.SuccessLabel).Inc()
|
||||
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.QueryLabel).Observe(float64(msg.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel, metrics.SuccessLabel).Inc()
|
||||
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel).Observe(float64(msg.ElapseSpan().Milliseconds()))
|
||||
|
||||
log.Debug("QueryNode publish RetrieveResultMsg",
|
||||
zap.Int64("msgID", retrieveMsg.ID()),
|
||||
|
@ -1395,7 +1395,7 @@ func (q *queryCollection) publishSearchResultWithCtx(ctx context.Context, result
|
|||
}
|
||||
|
||||
func (q *queryCollection) publishSearchResult(result *internalpb.SearchResults, nodeID UniqueID) error {
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.SearchLabel, metrics.TotalLabel).Inc()
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.TotalLabel).Inc()
|
||||
return q.publishSearchResultWithCtx(q.releaseCtx, result, nodeID)
|
||||
}
|
||||
|
||||
|
@ -1404,7 +1404,7 @@ func (q *queryCollection) publishRetrieveResultWithCtx(ctx context.Context, resu
|
|||
}
|
||||
|
||||
func (q *queryCollection) publishRetrieveResult(result *internalpb.RetrieveResults, nodeID UniqueID) error {
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.QueryLabel, metrics.TotalLabel).Inc()
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel, metrics.TotalLabel).Inc()
|
||||
return q.publishRetrieveResultWithCtx(q.releaseCtx, result, nodeID)
|
||||
}
|
||||
|
||||
|
@ -1424,7 +1424,7 @@ func (q *queryCollection) publishFailedQueryResultWithCtx(ctx context.Context, m
|
|||
case commonpb.MsgType_Retrieve:
|
||||
retrieveMsg := msg.(*msgstream.RetrieveMsg)
|
||||
baseResult.MsgType = commonpb.MsgType_RetrieveResult
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.QueryLabel, metrics.FailLabel).Inc()
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel, metrics.FailLabel).Inc()
|
||||
return q.publishRetrieveResult(&internalpb.RetrieveResults{
|
||||
Base: baseResult,
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: errMsg},
|
||||
|
@ -1435,7 +1435,7 @@ func (q *queryCollection) publishFailedQueryResultWithCtx(ctx context.Context, m
|
|||
case commonpb.MsgType_Search:
|
||||
searchMsg := msg.(*msgstream.SearchMsg)
|
||||
baseResult.MsgType = commonpb.MsgType_SearchResult
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.SearchLabel, metrics.FailLabel).Inc()
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.FailLabel).Inc()
|
||||
return q.publishSearchResultWithCtx(ctx, &internalpb.SearchResults{
|
||||
Base: baseResult,
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: errMsg},
|
||||
|
|
|
@ -91,9 +91,9 @@ func genSimpleSegmentInfo() *querypb.SegmentInfo {
|
|||
|
||||
func genSimpleSealedSegmentsChangeInfo() *querypb.SealedSegmentsChangeInfo {
|
||||
changeInfo := &querypb.SegmentChangeInfo{
|
||||
OnlineNodeID: Params.QueryNodeCfg.QueryNodeID,
|
||||
OnlineNodeID: Params.QueryNodeCfg.GetNodeID(),
|
||||
OnlineSegments: []*querypb.SegmentInfo{},
|
||||
OfflineNodeID: Params.QueryNodeCfg.QueryNodeID,
|
||||
OfflineNodeID: Params.QueryNodeCfg.GetNodeID(),
|
||||
OfflineSegments: []*querypb.SegmentInfo{},
|
||||
}
|
||||
return &querypb.SealedSegmentsChangeInfo{
|
||||
|
|
|
@ -147,9 +147,9 @@ func (node *QueryNode) initSession() error {
|
|||
return fmt.Errorf("session is nil, the etcd client connection may have failed")
|
||||
}
|
||||
node.session.Init(typeutil.QueryNodeRole, Params.QueryNodeCfg.QueryNodeIP+":"+strconv.FormatInt(Params.QueryNodeCfg.QueryNodePort, 10), false, true)
|
||||
Params.QueryNodeCfg.QueryNodeID = node.session.ServerID
|
||||
Params.SetLogger(Params.QueryNodeCfg.QueryNodeID)
|
||||
log.Debug("QueryNode", zap.Int64("nodeID", Params.QueryNodeCfg.QueryNodeID), zap.String("node address", node.session.Address))
|
||||
Params.QueryNodeCfg.SetNodeID(node.session.ServerID)
|
||||
Params.SetLogger(Params.QueryNodeCfg.GetNodeID())
|
||||
log.Debug("QueryNode", zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()), zap.String("node address", node.session.Address))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ func (node *QueryNode) Init() error {
|
|||
// qsOptWithSessionManager(node.sessionManager))
|
||||
|
||||
log.Debug("query node init successfully",
|
||||
zap.Any("queryNodeID", Params.QueryNodeCfg.QueryNodeID),
|
||||
zap.Any("queryNodeID", Params.QueryNodeCfg.GetNodeID()),
|
||||
zap.Any("IP", Params.QueryNodeCfg.QueryNodeIP),
|
||||
zap.Any("Port", Params.QueryNodeCfg.QueryNodePort),
|
||||
)
|
||||
|
@ -378,7 +378,7 @@ func (node *QueryNode) Start() error {
|
|||
|
||||
node.UpdateStateCode(internalpb.StateCode_Healthy)
|
||||
log.Debug("query node start successfully",
|
||||
zap.Any("queryNodeID", Params.QueryNodeCfg.QueryNodeID),
|
||||
zap.Any("queryNodeID", Params.QueryNodeCfg.GetNodeID()),
|
||||
zap.Any("IP", Params.QueryNodeCfg.QueryNodeIP),
|
||||
zap.Any("Port", Params.QueryNodeCfg.QueryNodePort),
|
||||
)
|
||||
|
@ -544,7 +544,7 @@ func (node *QueryNode) removeSegments(segmentChangeInfos *querypb.SealedSegments
|
|||
// For offline segments:
|
||||
for _, segmentInfo := range info.OfflineSegments {
|
||||
// load balance or compaction, remove old sealed segments.
|
||||
if info.OfflineNodeID == Params.QueryNodeCfg.QueryNodeID {
|
||||
if info.OfflineNodeID == Params.QueryNodeCfg.GetNodeID() {
|
||||
err := node.historical.replica.removeSegment(segmentInfo.SegmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -366,7 +366,7 @@ func TestQueryNode_adjustByChangeInfo(t *testing.T) {
|
|||
|
||||
segmentChangeInfos := genSimpleChangeInfo()
|
||||
segmentChangeInfos.Infos[0].OnlineSegments = nil
|
||||
segmentChangeInfos.Infos[0].OfflineNodeID = Params.QueryNodeCfg.QueryNodeID
|
||||
segmentChangeInfos.Infos[0].OfflineNodeID = Params.QueryNodeCfg.GetNodeID()
|
||||
|
||||
/*
|
||||
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
|
||||
|
@ -439,7 +439,7 @@ func TestQueryNode_watchChangeInfo(t *testing.T) {
|
|||
|
||||
segmentChangeInfos := genSimpleChangeInfo()
|
||||
segmentChangeInfos.Infos[0].OnlineSegments = nil
|
||||
segmentChangeInfos.Infos[0].OfflineNodeID = Params.QueryNodeCfg.QueryNodeID
|
||||
segmentChangeInfos.Infos[0].OfflineNodeID = Params.QueryNodeCfg.GetNodeID()
|
||||
|
||||
/*
|
||||
qc, err := node.queryService.getQueryCollection(defaultCollectionID)
|
||||
|
|
|
@ -315,7 +315,7 @@ func (s *Segment) search(plan *SearchPlan,
|
|||
log.Debug("do search on segment", zap.Int64("segmentID", s.segmentID), zap.Int32("segmentType", int32(s.segmentType)))
|
||||
tr := timerecord.NewTimeRecorder("cgoSearch")
|
||||
status := C.Search(s.segmentPtr, plan.cSearchPlan, cPlaceHolderGroup, ts, &searchResult.cSearchResult, C.int64_t(s.segmentID))
|
||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID), metrics.SearchLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
if err := HandleCStatus(&status, "Search failed"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ func (s *Segment) retrieve(plan *RetrievePlan) (*segcorepb.RetrieveResults, erro
|
|||
ts := C.uint64_t(plan.Timestamp)
|
||||
tr := timerecord.NewTimeRecorder("cgoRetrieve")
|
||||
status := C.Retrieve(s.segmentPtr, plan.cRetrievePlan, ts, &retrieveResult.cRetrieveResult)
|
||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID),
|
||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
|
||||
metrics.QueryLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
if err := HandleCStatus(&status, "Retrieve failed"); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -151,7 +151,7 @@ func (loader *segmentLoader) loadSegment(req *querypb.LoadSegmentsRequest, segme
|
|||
return err
|
||||
}
|
||||
|
||||
metrics.QueryNodeLoadSegmentLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
metrics.QueryNodeLoadSegmentLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -519,14 +519,14 @@ func (loader *segmentLoader) FromDmlCPLoadDelete(ctx context.Context, collection
|
|||
}
|
||||
|
||||
defer func() {
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Dec()
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Dec()
|
||||
stream.Close()
|
||||
}()
|
||||
|
||||
pChannelName := funcutil.ToPhysicalChannel(position.ChannelName)
|
||||
position.ChannelName = pChannelName
|
||||
|
||||
stream.AsConsumer([]string{pChannelName}, fmt.Sprintf("querynode-%d-%d", Params.QueryNodeCfg.QueryNodeID, collectionID))
|
||||
stream.AsConsumer([]string{pChannelName}, fmt.Sprintf("querynode-%d-%d", Params.QueryNodeCfg.GetNodeID(), collectionID))
|
||||
lastMsgID, err := stream.GetLatestMsgID(pChannelName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -537,7 +537,7 @@ func (loader *segmentLoader) FromDmlCPLoadDelete(ctx context.Context, collection
|
|||
return nil
|
||||
}
|
||||
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID)).Inc()
|
||||
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
|
||||
err = stream.Seek([]*internalpb.MsgPosition{position})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -85,7 +85,7 @@ func (sService *statsService) publicStatistic(fieldStats []*internalpb.FieldStat
|
|||
queryNodeStats := internalpb.QueryNodeStats{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_QueryNodeStats,
|
||||
SourceID: Params.QueryNodeCfg.QueryNodeID,
|
||||
SourceID: Params.QueryNodeCfg.GetNodeID(),
|
||||
},
|
||||
SegStats: segStats,
|
||||
FieldStats: fieldStats,
|
||||
|
|
|
@ -220,7 +220,7 @@ func (s *streaming) search(searchReqs []*searchRequest, collID UniqueID, partIDs
|
|||
err2 = err
|
||||
return
|
||||
}
|
||||
metrics.QueryNodeSQSegmentLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.QueryNodeID),
|
||||
metrics.QueryNodeSQSegmentLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
|
||||
metrics.SearchLabel,
|
||||
metrics.GrowingSegmentLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
segmentLock.Lock()
|
||||
|
|
|
@ -139,7 +139,7 @@ func (r *addQueryChannelTask) Execute(ctx context.Context) error {
|
|||
qc := r.node.queryShardService.getQueryChannel(collectionID)
|
||||
log.Debug("add query channel for collection", zap.Int64("collectionID", collectionID))
|
||||
|
||||
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.QueryNodeID)
|
||||
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.GetNodeID())
|
||||
|
||||
err := qc.AsConsumer(r.req.QueryChannel, consumeSubName, r.req.SeekPosition)
|
||||
if err != nil {
|
||||
|
@ -299,7 +299,7 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|||
}
|
||||
}()
|
||||
|
||||
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.QueryNodeID)
|
||||
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.GetNodeID())
|
||||
|
||||
// group channels by to seeking or consuming
|
||||
channel2SeekPosition := make(map[string]*internalpb.MsgPosition)
|
||||
|
@ -528,7 +528,7 @@ func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
|
|||
log.Warn("watchDeltaChannel, add flowGraph for deltaChannel failed", zap.Int64("collectionID", collectionID), zap.Strings("vDeltaChannels", vDeltaChannels), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.QueryNodeID)
|
||||
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.GetNodeID())
|
||||
// channels as consumer
|
||||
for channel, fg := range channel2FlowGraph {
|
||||
// use pChannel to consume
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
// TimeTickProvider is the interface all services implement
|
||||
|
@ -111,9 +110,6 @@ type DataNodeComponent interface {
|
|||
// Return nil in status:
|
||||
// The dataCoord is not nil.
|
||||
SetDataCoord(dataCoord DataCoord) error
|
||||
|
||||
// SetNodeID set node id for DataNode
|
||||
SetNodeID(typeutil.UniqueID)
|
||||
}
|
||||
|
||||
// DataCoord is the interface `datacoord` package implements
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
@ -403,7 +404,7 @@ type proxyConfig struct {
|
|||
|
||||
Alias string
|
||||
|
||||
ProxyID UniqueID
|
||||
NodeID atomic.Value
|
||||
TimeTickInterval time.Duration
|
||||
MsgStreamTimeTickBufSize int64
|
||||
MaxNameLength int64
|
||||
|
@ -429,7 +430,7 @@ type proxyConfig struct {
|
|||
|
||||
func (p *proxyConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
|
||||
p.NodeID.Store(UniqueID(0))
|
||||
p.initTimeTickInterval()
|
||||
|
||||
p.initMsgStreamTimeTickBufSize()
|
||||
|
@ -543,16 +544,26 @@ func (p *proxyConfig) initGinLogging() {
|
|||
p.GinLogging = p.Base.ParseBool("proxy.ginLogging", true)
|
||||
}
|
||||
|
||||
func (p *proxyConfig) SetNodeID(id UniqueID) {
|
||||
p.NodeID.Store(id)
|
||||
}
|
||||
|
||||
func (p *proxyConfig) GetNodeID() UniqueID {
|
||||
val := p.NodeID.Load()
|
||||
if val != nil {
|
||||
return val.(UniqueID)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- querycoord ---
|
||||
type queryCoordConfig struct {
|
||||
Base *BaseTable
|
||||
|
||||
NodeID uint64
|
||||
|
||||
Address string
|
||||
Port int
|
||||
QueryCoordID UniqueID
|
||||
Address string
|
||||
Port int
|
||||
NodeID atomic.Value
|
||||
|
||||
CreatedTime time.Time
|
||||
UpdatedTime time.Time
|
||||
|
@ -569,7 +580,7 @@ type queryCoordConfig struct {
|
|||
|
||||
func (p *queryCoordConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
|
||||
p.NodeID.Store(UniqueID(0))
|
||||
//---- Handoff ---
|
||||
p.initAutoHandoff()
|
||||
|
||||
|
@ -627,6 +638,18 @@ func (p *queryCoordConfig) initMemoryUsageMaxDifferencePercentage() {
|
|||
p.MemoryUsageMaxDifferencePercentage = float64(diffPercentage) / 100
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) SetNodeID(id UniqueID) {
|
||||
p.NodeID.Store(id)
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) GetNodeID() UniqueID {
|
||||
val := p.NodeID.Load()
|
||||
if val != nil {
|
||||
return val.(UniqueID)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- querynode ---
|
||||
type queryNodeConfig struct {
|
||||
|
@ -635,7 +658,7 @@ type queryNodeConfig struct {
|
|||
Alias string
|
||||
QueryNodeIP string
|
||||
QueryNodePort int64
|
||||
QueryNodeID UniqueID
|
||||
NodeID atomic.Value
|
||||
// TODO: remove cacheSize
|
||||
CacheSize int64 // deprecated
|
||||
|
||||
|
@ -678,7 +701,7 @@ type queryNodeConfig struct {
|
|||
|
||||
func (p *queryNodeConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
|
||||
p.NodeID.Store(UniqueID(0))
|
||||
p.initCacheSize()
|
||||
p.initGracefulTime()
|
||||
|
||||
|
@ -779,6 +802,7 @@ func (p *queryNodeConfig) initCacheMemoryLimit() {
|
|||
}
|
||||
p.CacheMemoryLimit = cacheMemoryLimit
|
||||
}
|
||||
|
||||
func (p *queryNodeConfig) initCacheEnabled() {
|
||||
var err error
|
||||
cacheEnabled := p.Base.LoadWithDefault("queryNode.cache.enabled", "true")
|
||||
|
@ -788,12 +812,24 @@ func (p *queryNodeConfig) initCacheEnabled() {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *queryNodeConfig) SetNodeID(id UniqueID) {
|
||||
p.NodeID.Store(id)
|
||||
}
|
||||
|
||||
func (p *queryNodeConfig) GetNodeID() UniqueID {
|
||||
val := p.NodeID.Load()
|
||||
if val != nil {
|
||||
return val.(UniqueID)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- datacoord ---
|
||||
type dataCoordConfig struct {
|
||||
Base *BaseTable
|
||||
|
||||
NodeID int64
|
||||
NodeID atomic.Value
|
||||
|
||||
IP string
|
||||
Port int
|
||||
|
@ -825,7 +861,6 @@ type dataCoordConfig struct {
|
|||
|
||||
func (p *dataCoordConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
|
||||
p.initChannelWatchPrefix()
|
||||
|
||||
p.initSegmentMaxSize()
|
||||
|
@ -895,14 +930,26 @@ func (p *dataCoordConfig) initCompactionEntityExpiration() {
|
|||
}(p.CompactionEntityExpiration, p.RetentionDuration)
|
||||
}
|
||||
|
||||
func (p *dataCoordConfig) SetNodeID(id UniqueID) {
|
||||
p.NodeID.Store(id)
|
||||
}
|
||||
|
||||
func (p *dataCoordConfig) GetNodeID() UniqueID {
|
||||
val := p.NodeID.Load()
|
||||
if val != nil {
|
||||
return val.(UniqueID)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- datanode ---
|
||||
type dataNodeConfig struct {
|
||||
Base *BaseTable
|
||||
|
||||
// ID of the current DataNode
|
||||
NodeID UniqueID
|
||||
|
||||
// ID of the current node
|
||||
//NodeID atomic.Value
|
||||
NodeID atomic.Value
|
||||
// IP of the current DataNode
|
||||
IP string
|
||||
|
||||
|
@ -925,7 +972,7 @@ type dataNodeConfig struct {
|
|||
|
||||
func (p *dataNodeConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
|
||||
p.NodeID.Store(UniqueID(0))
|
||||
p.initFlowGraphMaxQueueLength()
|
||||
p.initFlowGraphMaxParallelism()
|
||||
p.initFlushInsertBufferSize()
|
||||
|
@ -982,6 +1029,18 @@ func (p *dataNodeConfig) initChannelWatchPath() {
|
|||
p.ChannelWatchSubPath = "channelwatch"
|
||||
}
|
||||
|
||||
func (p *dataNodeConfig) SetNodeID(id UniqueID) {
|
||||
p.NodeID.Store(id)
|
||||
}
|
||||
|
||||
func (p *dataNodeConfig) GetNodeID() UniqueID {
|
||||
val := p.NodeID.Load()
|
||||
if val != nil {
|
||||
return val.(UniqueID)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- indexcoord ---
|
||||
type indexCoordConfig struct {
|
||||
|
@ -1020,8 +1079,9 @@ type indexNodeConfig struct {
|
|||
Address string
|
||||
Port int
|
||||
|
||||
NodeID int64
|
||||
Alias string
|
||||
NodeID atomic.Value
|
||||
|
||||
Alias string
|
||||
|
||||
IndexStorageRootPath string
|
||||
|
||||
|
@ -1031,7 +1091,7 @@ type indexNodeConfig struct {
|
|||
|
||||
func (p *indexNodeConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
|
||||
p.NodeID.Store(UniqueID(0))
|
||||
p.initIndexStorageRootPath()
|
||||
}
|
||||
|
||||
|
@ -1047,3 +1107,15 @@ func (p *indexNodeConfig) initIndexStorageRootPath() {
|
|||
}
|
||||
p.IndexStorageRootPath = path.Join(rootPath, "index_files")
|
||||
}
|
||||
|
||||
func (p *indexNodeConfig) SetNodeID(id UniqueID) {
|
||||
p.NodeID.Store(id)
|
||||
}
|
||||
|
||||
func (p *indexNodeConfig) GetNodeID() UniqueID {
|
||||
val := p.NodeID.Load()
|
||||
if val != nil {
|
||||
return val.(UniqueID)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -237,9 +237,9 @@ func TestComponentParam(t *testing.T) {
|
|||
t.Run("test dataNodeConfig", func(t *testing.T) {
|
||||
Params := CParams.DataNodeCfg
|
||||
|
||||
Params.NodeID = 2
|
||||
Params.SetNodeID(2)
|
||||
|
||||
id := Params.NodeID
|
||||
id := Params.GetNodeID()
|
||||
t.Logf("NodeID: %d", id)
|
||||
|
||||
alias := Params.Alias
|
||||
|
@ -293,7 +293,7 @@ func TestComponentParam(t *testing.T) {
|
|||
|
||||
t.Logf("Port: %v", Params.Port)
|
||||
|
||||
t.Logf("NodeID: %v", Params.NodeID)
|
||||
t.Logf("NodeID: %v", Params.GetNodeID())
|
||||
|
||||
t.Logf("Alias: %v", Params.Alias)
|
||||
|
||||
|
|
Loading…
Reference in New Issue