mirror of https://github.com/milvus-io/milvus.git
Use GetXXX instead of directly using dots to get value (#27045)
Signed-off-by: cai.zhang <cai.zhang@zilliz.com>pull/27113/head
parent
18cbe26876
commit
22d7fa4e1c
|
@ -356,24 +356,24 @@ func (ib *indexBuilder) getTaskState(buildID, nodeID UniqueID) indexTaskState {
|
|||
}
|
||||
if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Ctx(ib.ctx).Warn("IndexCoord get jobs info from IndexNode fail", zap.Int64("nodeID", nodeID),
|
||||
zap.Int64("buildID", buildID), zap.String("fail reason", response.Status.Reason))
|
||||
zap.Int64("buildID", buildID), zap.String("fail reason", response.GetStatus().GetReason()))
|
||||
return indexTaskInProgress
|
||||
}
|
||||
|
||||
// indexInfos length is always one.
|
||||
for _, info := range response.IndexInfos {
|
||||
if info.BuildID == buildID {
|
||||
if info.State == commonpb.IndexState_Failed || info.State == commonpb.IndexState_Finished {
|
||||
log.Ctx(ib.ctx).Info("this task has been finished", zap.Int64("buildID", info.BuildID),
|
||||
zap.String("index state", info.State.String()))
|
||||
for _, info := range response.GetIndexInfos() {
|
||||
if info.GetBuildID() == buildID {
|
||||
if info.GetState() == commonpb.IndexState_Failed || info.GetState() == commonpb.IndexState_Finished {
|
||||
log.Ctx(ib.ctx).Info("this task has been finished", zap.Int64("buildID", info.GetBuildID()),
|
||||
zap.String("index state", info.GetState().String()))
|
||||
if err := ib.meta.FinishTask(info); err != nil {
|
||||
log.Ctx(ib.ctx).Warn("IndexCoord update index state fail", zap.Int64("buildID", info.BuildID),
|
||||
zap.String("index state", info.State.String()), zap.Error(err))
|
||||
log.Ctx(ib.ctx).Warn("IndexCoord update index state fail", zap.Int64("buildID", info.GetBuildID()),
|
||||
zap.String("index state", info.GetState().String()), zap.Error(err))
|
||||
return indexTaskInProgress
|
||||
}
|
||||
return indexTaskDone
|
||||
} else if info.State == commonpb.IndexState_Retry || info.State == commonpb.IndexState_IndexStateNone {
|
||||
log.Ctx(ib.ctx).Info("this task should be retry", zap.Int64("buildID", buildID), zap.String("fail reason", info.FailReason))
|
||||
} else if info.GetState() == commonpb.IndexState_Retry || info.GetState() == commonpb.IndexState_IndexStateNone {
|
||||
log.Ctx(ib.ctx).Info("this task should be retry", zap.Int64("buildID", buildID), zap.String("fail reason", info.GetFailReason()))
|
||||
return indexTaskRetry
|
||||
}
|
||||
return indexTaskInProgress
|
||||
|
@ -403,9 +403,9 @@ func (ib *indexBuilder) dropIndexTask(buildID, nodeID UniqueID) bool {
|
|||
zap.Int64("nodeID", nodeID), zap.Error(err))
|
||||
return false
|
||||
}
|
||||
if status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
if status.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Ctx(ib.ctx).Warn("IndexCoord notify IndexNode drop the index task fail", zap.Int64("buildID", buildID),
|
||||
zap.Int64("nodeID", nodeID), zap.String("fail reason", status.Reason))
|
||||
zap.Int64("nodeID", nodeID), zap.String("fail reason", status.GetReason()))
|
||||
return false
|
||||
}
|
||||
log.Ctx(ib.ctx).Info("IndexCoord notify IndexNode drop the index task success",
|
||||
|
@ -428,9 +428,9 @@ func (ib *indexBuilder) assignTask(builderClient types.IndexNode, req *indexpb.C
|
|||
return err
|
||||
}
|
||||
|
||||
if resp.ErrorCode != commonpb.ErrorCode_Success {
|
||||
log.Error("IndexCoord assignmentTasksLoop builderClient.CreateIndex failed", zap.String("Reason", resp.Reason))
|
||||
return errors.New(resp.Reason)
|
||||
if resp.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Error("IndexCoord assignmentTasksLoop builderClient.CreateIndex failed", zap.String("Reason", resp.GetReason()))
|
||||
return errors.New(resp.GetReason())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -521,16 +521,16 @@ func (m *meta) FinishTask(taskInfo *indexpb.IndexTaskInfo) error {
|
|||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
segIdx, ok := m.buildID2SegmentIndex[taskInfo.BuildID]
|
||||
segIdx, ok := m.buildID2SegmentIndex[taskInfo.GetBuildID()]
|
||||
if !ok {
|
||||
log.Warn("there is no index with buildID", zap.Int64("buildID", taskInfo.BuildID))
|
||||
log.Warn("there is no index with buildID", zap.Int64("buildID", taskInfo.GetBuildID()))
|
||||
return nil
|
||||
}
|
||||
updateFunc := func(segIdx *model.SegmentIndex) error {
|
||||
segIdx.IndexState = taskInfo.State
|
||||
segIdx.IndexFileKeys = common.CloneStringList(taskInfo.IndexFileKeys)
|
||||
segIdx.FailReason = taskInfo.FailReason
|
||||
segIdx.IndexSize = taskInfo.SerializedSize
|
||||
segIdx.IndexState = taskInfo.GetState()
|
||||
segIdx.IndexFileKeys = common.CloneStringList(taskInfo.GetIndexFileKeys())
|
||||
segIdx.FailReason = taskInfo.GetFailReason()
|
||||
segIdx.IndexSize = taskInfo.GetSerializedSize()
|
||||
return m.alterSegmentIndexes([]*model.SegmentIndex{segIdx})
|
||||
}
|
||||
|
||||
|
@ -538,10 +538,10 @@ func (m *meta) FinishTask(taskInfo *indexpb.IndexTaskInfo) error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Info("finish index task success", zap.Int64("buildID", taskInfo.BuildID),
|
||||
log.Info("finish index task success", zap.Int64("buildID", taskInfo.GetBuildID()),
|
||||
zap.String("state", taskInfo.GetState().String()), zap.String("fail reason", taskInfo.GetFailReason()))
|
||||
m.updateIndexTasksMetrics()
|
||||
metrics.FlushedSegmentFileNum.WithLabelValues(metrics.IndexFileLabel).Observe(float64(len(taskInfo.IndexFileKeys)))
|
||||
metrics.FlushedSegmentFileNum.WithLabelValues(metrics.IndexFileLabel).Observe(float64(len(taskInfo.GetIndexFileKeys())))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ func (s *Server) createIndexForSegmentLoop(ctx context.Context) {
|
|||
// indexBuilder will find this task and assign it to IndexNode for execution.
|
||||
func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive CreateIndex request",
|
||||
zap.String("IndexName", req.GetIndexName()), zap.Int64("fieldID", req.GetFieldID()),
|
||||
|
@ -221,10 +221,10 @@ func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexReques
|
|||
// Deprecated
|
||||
func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRequest) (*indexpb.GetIndexStateResponse, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive GetIndexState request",
|
||||
zap.String("indexName", req.IndexName))
|
||||
zap.String("indexName", req.GetIndexName()))
|
||||
|
||||
errResp := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
|
@ -244,7 +244,7 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
|
|||
errResp.ErrorCode = commonpb.ErrorCode_IndexNotExist
|
||||
errResp.Reason = fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName)
|
||||
log.Error("GetIndexState fail",
|
||||
zap.String("indexName", req.IndexName), zap.String("fail reason", errResp.Reason))
|
||||
zap.String("indexName", req.GetIndexName()), zap.String("fail reason", errResp.Reason))
|
||||
return &indexpb.GetIndexStateResponse{
|
||||
Status: errResp,
|
||||
}, nil
|
||||
|
@ -281,7 +281,7 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
|
|||
|
||||
func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive GetSegmentIndexState",
|
||||
zap.String("IndexName", req.GetIndexName()), zap.Int64s("fieldID", req.GetSegmentIDs()))
|
||||
|
@ -313,7 +313,7 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
|
|||
},
|
||||
}, nil
|
||||
}
|
||||
for _, segID := range req.SegmentIDs {
|
||||
for _, segID := range req.GetSegmentIDs() {
|
||||
state := s.meta.GetSegmentIndexState(req.GetCollectionID(), segID)
|
||||
ret.States = append(ret.States, &indexpb.SegmentIndexState{
|
||||
SegmentID: segID,
|
||||
|
@ -456,7 +456,7 @@ func (s *Server) completeIndexInfo(indexInfo *indexpb.IndexInfo, index *model.In
|
|||
// Deprecated
|
||||
func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetIndexBuildProgressRequest) (*indexpb.GetIndexBuildProgressResponse, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive GetIndexBuildProgress request", zap.String("indexName", req.GetIndexName()))
|
||||
errResp := &commonpb.Status{
|
||||
|
@ -493,7 +493,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
|
|||
}, nil
|
||||
}
|
||||
indexInfo := &indexpb.IndexInfo{
|
||||
CollectionID: req.CollectionID,
|
||||
CollectionID: req.GetCollectionID(),
|
||||
IndexID: indexes[0].IndexID,
|
||||
IndexedRows: 0,
|
||||
TotalRows: 0,
|
||||
|
@ -516,7 +516,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
|
|||
// DescribeIndex describe the index info of the collection.
|
||||
func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRequest) (*indexpb.DescribeIndexResponse, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive DescribeIndex request", zap.String("indexName", req.GetIndexName()),
|
||||
zap.Uint64("timestamp", req.GetTimestamp()))
|
||||
|
@ -536,11 +536,11 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
|
|||
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
||||
if len(indexes) == 0 {
|
||||
errMsg := fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName)
|
||||
log.Warn("DescribeIndex fail", zap.String("indexName", req.IndexName), zap.String("fail reason", errMsg))
|
||||
log.Warn("DescribeIndex fail", zap.String("indexName", req.GetIndexName()), zap.String("fail reason", errMsg))
|
||||
return &indexpb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_IndexNotExist,
|
||||
Reason: fmt.Sprint("index doesn't exist, collectionID ", req.CollectionID),
|
||||
Reason: fmt.Sprint("index doesn't exist, collectionID ", req.GetCollectionID()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -582,7 +582,7 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
|
|||
// GetIndexStatistics get the statistics of the index. DescribeIndex doesn't contain statistics.
|
||||
func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexStatisticsRequest) (*indexpb.GetIndexStatisticsResponse, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive GetIndexStatistics request", zap.String("indexName", req.GetIndexName()))
|
||||
if s.isClosed() {
|
||||
|
@ -596,12 +596,12 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
|
|||
if len(indexes) == 0 {
|
||||
errMsg := fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName)
|
||||
log.Warn("GetIndexStatistics fail",
|
||||
zap.String("indexName", req.IndexName),
|
||||
zap.String("indexName", req.GetIndexName()),
|
||||
zap.String("fail reason", errMsg))
|
||||
return &indexpb.GetIndexStatisticsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_IndexNotExist,
|
||||
Reason: fmt.Sprint("index doesn't exist, collectionID ", req.CollectionID),
|
||||
Reason: fmt.Sprint("index doesn't exist, collectionID ", req.GetCollectionID()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -642,7 +642,7 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
|
|||
// index tasks.
|
||||
func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
log.Info("receive DropIndex request",
|
||||
zap.Int64s("partitionIDs", req.GetPartitionIDs()), zap.String("indexName", req.GetIndexName()),
|
||||
|
@ -678,7 +678,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
|
|||
}
|
||||
if len(req.GetPartitionIDs()) == 0 {
|
||||
// drop collection index
|
||||
err := s.meta.MarkIndexAsDeleted(req.CollectionID, indexIDs)
|
||||
err := s.meta.MarkIndexAsDeleted(req.GetCollectionID(), indexIDs)
|
||||
if err != nil {
|
||||
log.Warn("DropIndex fail", zap.String("indexName", req.IndexName), zap.Error(err))
|
||||
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
|
@ -687,15 +687,15 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
|
|||
}
|
||||
}
|
||||
|
||||
log.Debug("DropIndex success", zap.Int64s("partitionIDs", req.PartitionIDs), zap.String("indexName", req.IndexName),
|
||||
zap.Int64s("indexIDs", indexIDs))
|
||||
log.Debug("DropIndex success", zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||
zap.String("indexName", req.GetIndexName()), zap.Int64s("indexIDs", indexIDs))
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetIndexInfos gets the index file paths for segment from DataCoord.
|
||||
func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoRequest) (*indexpb.GetIndexInfoResponse, error) {
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("collectionID", req.GetCollectionID()),
|
||||
)
|
||||
errResp := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
|
@ -714,10 +714,10 @@ func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoReq
|
|||
SegmentInfo: map[int64]*indexpb.SegmentInfo{},
|
||||
}
|
||||
|
||||
for _, segID := range req.SegmentIDs {
|
||||
for _, segID := range req.GetSegmentIDs() {
|
||||
segIdxes := s.meta.GetSegmentIndexes(segID)
|
||||
ret.SegmentInfo[segID] = &indexpb.SegmentInfo{
|
||||
CollectionID: req.CollectionID,
|
||||
CollectionID: req.GetCollectionID(),
|
||||
SegmentID: segID,
|
||||
EnableIndex: false,
|
||||
IndexInfos: make([]*indexpb.IndexFilePathInfo, 0),
|
||||
|
|
|
@ -125,10 +125,10 @@ func (nm *IndexNodeManager) PeekClient(meta *model.SegmentIndex) (UniqueID, type
|
|||
}
|
||||
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("get IndexNode slots failed", zap.Int64("nodeID", nodeID),
|
||||
zap.String("reason", resp.Status.Reason))
|
||||
zap.String("reason", resp.GetStatus().GetReason()))
|
||||
return
|
||||
}
|
||||
if resp.TaskSlots > 0 {
|
||||
if resp.GetTaskSlots() > 0 {
|
||||
nodeMutex.Lock()
|
||||
defer nodeMutex.Unlock()
|
||||
log.Info("peek client success", zap.Int64("nodeID", nodeID))
|
||||
|
@ -181,11 +181,11 @@ func (nm *IndexNodeManager) ClientSupportDisk() bool {
|
|||
}
|
||||
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("get IndexNode slots failed", zap.Int64("nodeID", nodeID),
|
||||
zap.String("reason", resp.Status.Reason))
|
||||
zap.String("reason", resp.GetStatus().GetReason()))
|
||||
return
|
||||
}
|
||||
log.Debug("get job stats success", zap.Int64("nodeID", nodeID), zap.Bool("enable disk", resp.EnableDisk))
|
||||
if resp.EnableDisk {
|
||||
log.Debug("get job stats success", zap.Int64("nodeID", nodeID), zap.Bool("enable disk", resp.GetEnableDisk()))
|
||||
if resp.GetEnableDisk() {
|
||||
nodeMutex.Lock()
|
||||
defer nodeMutex.Unlock()
|
||||
cancel()
|
||||
|
|
|
@ -44,7 +44,8 @@ import (
|
|||
func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest) (*commonpb.Status, error) {
|
||||
if !i.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
stateCode := i.lifetime.GetState()
|
||||
log.Ctx(ctx).Warn("index node not ready", zap.String("state", stateCode.String()), zap.String("ClusterID", req.ClusterID), zap.Int64("IndexBuildID", req.BuildID))
|
||||
log.Ctx(ctx).Warn("index node not ready", zap.String("state", stateCode.String()),
|
||||
zap.String("ClusterID", req.GetClusterID()), zap.Int64("IndexBuildID", req.GetBuildID()))
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||
Reason: "state code is not healthy",
|
||||
|
@ -52,39 +53,39 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
|
|||
}
|
||||
defer i.lifetime.Done()
|
||||
log.Ctx(ctx).Info("IndexNode building index ...",
|
||||
zap.String("ClusterID", req.ClusterID),
|
||||
zap.Int64("IndexBuildID", req.BuildID),
|
||||
zap.Int64("IndexID", req.IndexID),
|
||||
zap.String("IndexName", req.IndexName),
|
||||
zap.String("IndexFilePrefix", req.IndexFilePrefix),
|
||||
zap.Int64("IndexVersion", req.IndexVersion),
|
||||
zap.Strings("DataPaths", req.DataPaths),
|
||||
zap.Any("TypeParams", req.TypeParams),
|
||||
zap.Any("IndexParams", req.IndexParams),
|
||||
zap.String("ClusterID", req.GetClusterID()),
|
||||
zap.Int64("IndexBuildID", req.GetBuildID()),
|
||||
zap.Int64("IndexID", req.GetIndexID()),
|
||||
zap.String("IndexName", req.GetIndexName()),
|
||||
zap.String("IndexFilePrefix", req.GetIndexFilePrefix()),
|
||||
zap.Int64("IndexVersion", req.GetIndexVersion()),
|
||||
zap.Strings("DataPaths", req.GetDataPaths()),
|
||||
zap.Any("TypeParams", req.GetTypeParams()),
|
||||
zap.Any("IndexParams", req.GetIndexParams()),
|
||||
zap.Int64("num_rows", req.GetNumRows()))
|
||||
ctx, sp := otel.Tracer(typeutil.IndexNodeRole).Start(ctx, "IndexNode-CreateIndex", trace.WithAttributes(
|
||||
attribute.Int64("IndexBuildID", req.BuildID),
|
||||
attribute.String("ClusterID", req.ClusterID),
|
||||
attribute.Int64("IndexBuildID", req.GetBuildID()),
|
||||
attribute.String("ClusterID", req.GetClusterID()),
|
||||
))
|
||||
defer sp.End()
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.TotalLabel).Inc()
|
||||
|
||||
taskCtx, taskCancel := context.WithCancel(i.loopCtx)
|
||||
if oldInfo := i.loadOrStoreTask(req.ClusterID, req.BuildID, &taskInfo{
|
||||
if oldInfo := i.loadOrStoreTask(req.GetClusterID(), req.GetBuildID(), &taskInfo{
|
||||
cancel: taskCancel,
|
||||
state: commonpb.IndexState_InProgress}); oldInfo != nil {
|
||||
log.Ctx(ctx).Warn("duplicated index build task", zap.String("ClusterID", req.ClusterID), zap.Int64("BuildID", req.BuildID))
|
||||
log.Ctx(ctx).Warn("duplicated index build task", zap.String("ClusterID", req.GetClusterID()), zap.Int64("BuildID", req.GetBuildID()))
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_BuildIndexError,
|
||||
Reason: "duplicated index build task",
|
||||
}, nil
|
||||
}
|
||||
cm, err := i.storageFactory.NewChunkManager(i.loopCtx, req.StorageConfig)
|
||||
cm, err := i.storageFactory.NewChunkManager(i.loopCtx, req.GetStorageConfig())
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error("create chunk manager failed", zap.String("Bucket", req.StorageConfig.BucketName),
|
||||
zap.String("AccessKey", req.StorageConfig.AccessKeyID),
|
||||
zap.String("ClusterID", req.ClusterID), zap.Int64("IndexBuildID", req.BuildID),
|
||||
log.Ctx(ctx).Error("create chunk manager failed", zap.String("Bucket", req.GetStorageConfig().GetBucketName()),
|
||||
zap.String("AccessKey", req.GetStorageConfig().GetAccessKeyID()),
|
||||
zap.String("ClusterID", req.GetClusterID()), zap.Int64("IndexBuildID", req.GetBuildID()),
|
||||
zap.Error(err),
|
||||
)
|
||||
i.deleteTaskInfos(ctx, []taskKey{{ClusterID: req.GetClusterID(), BuildID: req.GetBuildID()}})
|
||||
|
@ -98,8 +99,8 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
|
|||
ident: fmt.Sprintf("%s/%d", req.ClusterID, req.BuildID),
|
||||
ctx: taskCtx,
|
||||
cancel: taskCancel,
|
||||
BuildID: req.BuildID,
|
||||
ClusterID: req.ClusterID,
|
||||
BuildID: req.GetBuildID(),
|
||||
ClusterID: req.GetClusterID(),
|
||||
node: i,
|
||||
req: req,
|
||||
cm: cm,
|
||||
|
@ -109,14 +110,16 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
|
|||
}
|
||||
ret := merr.Status(nil)
|
||||
if err := i.sched.IndexBuildQueue.Enqueue(task); err != nil {
|
||||
log.Ctx(ctx).Warn("IndexNode failed to schedule", zap.Int64("IndexBuildID", req.BuildID), zap.String("ClusterID", req.ClusterID), zap.Error(err))
|
||||
log.Ctx(ctx).Warn("IndexNode failed to schedule", zap.Int64("IndexBuildID", req.GetBuildID()),
|
||||
zap.String("ClusterID", req.GetClusterID()), zap.Error(err))
|
||||
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||
ret.Reason = err.Error()
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.FailLabel).Inc()
|
||||
return ret, nil
|
||||
}
|
||||
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SuccessLabel).Inc()
|
||||
log.Ctx(ctx).Info("IndexNode successfully scheduled", zap.Int64("IndexBuildID", req.BuildID), zap.String("ClusterID", req.ClusterID), zap.String("indexName", req.IndexName))
|
||||
log.Ctx(ctx).Info("IndexNode successfully scheduled", zap.Int64("IndexBuildID", req.GetBuildID()),
|
||||
zap.String("ClusterID", req.GetClusterID()), zap.String("indexName", req.GetIndexName()))
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
@ -137,7 +140,7 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
|
|||
defer i.lifetime.Done()
|
||||
infos := make(map[UniqueID]*taskInfo)
|
||||
i.foreachTaskInfo(func(ClusterID string, buildID UniqueID, info *taskInfo) {
|
||||
if ClusterID == req.ClusterID {
|
||||
if ClusterID == req.GetClusterID() {
|
||||
infos[buildID] = &taskInfo{
|
||||
state: info.state,
|
||||
fileKeys: common.CloneStringList(info.fileKeys),
|
||||
|
@ -148,10 +151,10 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
|
|||
})
|
||||
ret := &indexpb.QueryJobsResponse{
|
||||
Status: merr.Status(nil),
|
||||
ClusterID: req.ClusterID,
|
||||
IndexInfos: make([]*indexpb.IndexTaskInfo, 0, len(req.BuildIDs)),
|
||||
ClusterID: req.GetClusterID(),
|
||||
IndexInfos: make([]*indexpb.IndexTaskInfo, 0, len(req.GetBuildIDs())),
|
||||
}
|
||||
for i, buildID := range req.BuildIDs {
|
||||
for i, buildID := range req.GetBuildIDs() {
|
||||
ret.IndexInfos = append(ret.IndexInfos, &indexpb.IndexTaskInfo{
|
||||
BuildID: buildID,
|
||||
State: commonpb.IndexState_IndexStateNone,
|
||||
|
@ -182,9 +185,9 @@ func (i *IndexNode) DropJobs(ctx context.Context, req *indexpb.DropJobsRequest)
|
|||
}, nil
|
||||
}
|
||||
defer i.lifetime.Done()
|
||||
keys := make([]taskKey, 0, len(req.BuildIDs))
|
||||
for _, buildID := range req.BuildIDs {
|
||||
keys = append(keys, taskKey{ClusterID: req.ClusterID, BuildID: buildID})
|
||||
keys := make([]taskKey, 0, len(req.GetBuildIDs()))
|
||||
for _, buildID := range req.GetBuildIDs() {
|
||||
keys = append(keys, taskKey{ClusterID: req.GetClusterID(), BuildID: buildID})
|
||||
}
|
||||
infos := i.deleteTaskInfos(ctx, keys)
|
||||
for _, info := range infos {
|
||||
|
@ -192,8 +195,8 @@ func (i *IndexNode) DropJobs(ctx context.Context, req *indexpb.DropJobsRequest)
|
|||
info.cancel()
|
||||
}
|
||||
}
|
||||
log.Ctx(ctx).Info("drop index build jobs success", zap.String("ClusterID", req.ClusterID),
|
||||
zap.Int64s("IndexBuildIDs", req.BuildIDs))
|
||||
log.Ctx(ctx).Info("drop index build jobs success", zap.String("ClusterID", req.GetClusterID()),
|
||||
zap.Int64s("IndexBuildIDs", req.GetBuildIDs()))
|
||||
return merr.Status(nil), nil
|
||||
}
|
||||
|
||||
|
@ -238,7 +241,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
if !i.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
log.Ctx(ctx).Warn("IndexNode.GetMetrics failed",
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("req", req.GetRequest()),
|
||||
zap.Error(errIndexNodeIsUnhealthy(paramtable.GetNodeID())))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
|
@ -251,11 +254,11 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
}
|
||||
defer i.lifetime.Done()
|
||||
|
||||
metricType, err := metricsinfo.ParseMetricType(req.Request)
|
||||
metricType, err := metricsinfo.ParseMetricType(req.GetRequest())
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Warn("IndexNode.GetMetrics failed to parse metric type",
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("req", req.GetRequest()),
|
||||
zap.Error(err))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
|
@ -272,7 +275,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
|
||||
log.Ctx(ctx).RatedDebug(60, "IndexNode.GetMetrics",
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("req", req.GetRequest()),
|
||||
zap.String("metric_type", metricType),
|
||||
zap.Error(err))
|
||||
|
||||
|
@ -281,7 +284,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
|
|||
|
||||
log.Ctx(ctx).RatedWarn(60, "IndexNode.GetMetrics failed, request metric type is not implemented yet",
|
||||
zap.Int64("nodeID", paramtable.GetNodeID()),
|
||||
zap.String("req", req.Request),
|
||||
zap.String("req", req.GetRequest()),
|
||||
zap.String("metric_type", metricType))
|
||||
|
||||
return &milvuspb.GetMetricsResponse{
|
||||
|
|
Loading…
Reference in New Issue