mirror of https://github.com/milvus-io/milvus.git
enhance: refine the datacoord meta related interfaces (#37957)
issue: #35917 This PR refines the meta-related APIs in datacoord to allow the ctx to be passed down to the catalog operation interfaces Signed-off-by: tinswzy <zhenyuan.wei@zilliz.com>pull/38007/head
parent
2208b7c2ef
commit
1dbb6cd7cb
|
@ -96,12 +96,12 @@ func (m *analyzeMeta) AddAnalyzeTask(task *indexpb.AnalyzeTask) error {
|
|||
return m.saveTask(task)
|
||||
}
|
||||
|
||||
func (m *analyzeMeta) DropAnalyzeTask(taskID int64) error {
|
||||
func (m *analyzeMeta) DropAnalyzeTask(ctx context.Context, taskID int64) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
log.Info("drop analyze task", zap.Int64("taskID", taskID))
|
||||
if err := m.catalog.DropAnalyzeTask(m.ctx, taskID); err != nil {
|
||||
if err := m.catalog.DropAnalyzeTask(ctx, taskID); err != nil {
|
||||
log.Warn("drop analyze task by catalog failed", zap.Int64("taskID", taskID),
|
||||
zap.Error(err))
|
||||
return err
|
||||
|
|
|
@ -136,7 +136,7 @@ func (s *AnalyzeMetaSuite) Test_AnalyzeMeta() {
|
|||
})
|
||||
|
||||
s.Run("DropAnalyzeTask", func() {
|
||||
err := am.DropAnalyzeTask(7)
|
||||
err := am.DropAnalyzeTask(ctx, 7)
|
||||
s.NoError(err)
|
||||
s.Equal(6, len(am.GetAllTasks()))
|
||||
})
|
||||
|
@ -212,7 +212,7 @@ func (s *AnalyzeMetaSuite) Test_failCase() {
|
|||
})
|
||||
|
||||
s.Run("DropAnalyzeTask", func() {
|
||||
err := am.DropAnalyzeTask(1)
|
||||
err := am.DropAnalyzeTask(ctx, 1)
|
||||
s.Error(err)
|
||||
s.NotNil(am.GetTask(1))
|
||||
})
|
||||
|
|
|
@ -58,7 +58,7 @@ type compactionPlanContext interface {
|
|||
isFull() bool
|
||||
// get compaction tasks by signal id
|
||||
getCompactionTasksNumBySignalID(signalID int64) int
|
||||
getCompactionInfo(signalID int64) *compactionInfo
|
||||
getCompactionInfo(ctx context.Context, signalID int64) *compactionInfo
|
||||
removeTasksByChannel(channel string)
|
||||
}
|
||||
|
||||
|
@ -96,8 +96,8 @@ type compactionPlanHandler struct {
|
|||
stopWg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (c *compactionPlanHandler) getCompactionInfo(triggerID int64) *compactionInfo {
|
||||
tasks := c.meta.GetCompactionTasksByTriggerID(triggerID)
|
||||
func (c *compactionPlanHandler) getCompactionInfo(ctx context.Context, triggerID int64) *compactionInfo {
|
||||
tasks := c.meta.GetCompactionTasksByTriggerID(ctx, triggerID)
|
||||
return summaryCompactionState(tasks)
|
||||
}
|
||||
|
||||
|
@ -323,7 +323,7 @@ func (c *compactionPlanHandler) start() {
|
|||
|
||||
func (c *compactionPlanHandler) loadMeta() {
|
||||
// TODO: make it compatible to all types of compaction with persist meta
|
||||
triggers := c.meta.GetCompactionTasks()
|
||||
triggers := c.meta.GetCompactionTasks(context.TODO())
|
||||
for _, tasks := range triggers {
|
||||
for _, task := range tasks {
|
||||
state := task.GetState()
|
||||
|
@ -346,7 +346,7 @@ func (c *compactionPlanHandler) loadMeta() {
|
|||
zap.Error(err),
|
||||
)
|
||||
// ignore the drop error
|
||||
c.meta.DropCompactionTask(task)
|
||||
c.meta.DropCompactionTask(context.TODO(), task)
|
||||
continue
|
||||
}
|
||||
if t.NeedReAssignNodeID() {
|
||||
|
@ -434,14 +434,14 @@ func (c *compactionPlanHandler) Clean() {
|
|||
|
||||
func (c *compactionPlanHandler) cleanCompactionTaskMeta() {
|
||||
// gc clustering compaction tasks
|
||||
triggers := c.meta.GetCompactionTasks()
|
||||
triggers := c.meta.GetCompactionTasks(context.TODO())
|
||||
for _, tasks := range triggers {
|
||||
for _, task := range tasks {
|
||||
if task.State == datapb.CompactionTaskState_completed || task.State == datapb.CompactionTaskState_cleaned {
|
||||
duration := time.Since(time.Unix(task.StartTime, 0)).Seconds()
|
||||
if duration > float64(Params.DataCoordCfg.CompactionDropToleranceInSeconds.GetAsDuration(time.Second).Seconds()) {
|
||||
// try best to delete meta
|
||||
err := c.meta.DropCompactionTask(task)
|
||||
err := c.meta.DropCompactionTask(context.TODO(), task)
|
||||
log.Debug("drop compaction task meta", zap.Int64("planID", task.PlanID))
|
||||
if err != nil {
|
||||
log.Warn("fail to drop task", zap.Int64("planID", task.PlanID), zap.Error(err))
|
||||
|
@ -478,7 +478,7 @@ func (c *compactionPlanHandler) cleanPartitionStats() error {
|
|||
for _, info := range unusedPartStats {
|
||||
log.Debug("collection has been dropped, remove partition stats",
|
||||
zap.Int64("collID", info.GetCollectionID()))
|
||||
if err := c.meta.CleanPartitionStatsInfo(info); err != nil {
|
||||
if err := c.meta.CleanPartitionStatsInfo(context.TODO(), info); err != nil {
|
||||
log.Warn("gcPartitionStatsInfo fail", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
@ -492,7 +492,7 @@ func (c *compactionPlanHandler) cleanPartitionStats() error {
|
|||
if len(infos) > 2 {
|
||||
for i := 2; i < len(infos); i++ {
|
||||
info := infos[i]
|
||||
if err := c.meta.CleanPartitionStatsInfo(info); err != nil {
|
||||
if err := c.meta.CleanPartitionStatsInfo(context.TODO(), info); err != nil {
|
||||
log.Warn("gcPartitionStatsInfo fail", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ func (c *compactionPlanHandler) enqueueCompaction(task *datapb.CompactionTask) e
|
|||
t.SetTask(t.ShadowClone(setStartTime(time.Now().Unix())))
|
||||
err = t.SaveTaskMeta()
|
||||
if err != nil {
|
||||
c.meta.SetSegmentsCompacting(t.GetTaskProto().GetInputSegments(), false)
|
||||
c.meta.SetSegmentsCompacting(context.TODO(), t.GetTaskProto().GetInputSegments(), false)
|
||||
log.Warn("Failed to enqueue compaction task, unable to save task meta", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
@ -614,7 +614,7 @@ func (c *compactionPlanHandler) createCompactTask(t *datapb.CompactionTask) (Com
|
|||
default:
|
||||
return nil, merr.WrapErrIllegalCompactionPlan("illegal compaction type")
|
||||
}
|
||||
exist, succeed := c.meta.CheckAndSetSegmentsCompacting(t.GetInputSegments())
|
||||
exist, succeed := c.meta.CheckAndSetSegmentsCompacting(context.TODO(), t.GetInputSegments())
|
||||
if !exist {
|
||||
return nil, merr.WrapErrIllegalCompactionPlan("segment not exist")
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func (policy *clusteringCompactionPolicy) checkAllL2SegmentsContains(ctx context
|
|||
segment.GetLevel() == datapb.SegmentLevel_L2 &&
|
||||
segment.isCompacting
|
||||
}
|
||||
segments := policy.meta.SelectSegments(SegmentFilterFunc(getCompactingL2Segment))
|
||||
segments := policy.meta.SelectSegments(ctx, SegmentFilterFunc(getCompactingL2Segment))
|
||||
if len(segments) > 0 {
|
||||
log.Ctx(ctx).Info("there are some segments are compacting",
|
||||
zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID),
|
||||
|
|
|
@ -210,13 +210,14 @@ func (s *ClusteringCompactionPolicySuite) TestTriggerOneCollectionAbnormal() {
|
|||
}
|
||||
|
||||
func (s *ClusteringCompactionPolicySuite) TestTriggerOneCollectionNoClusteringKeySchema() {
|
||||
ctx := context.Background()
|
||||
coll := &collectionInfo{
|
||||
ID: 100,
|
||||
Schema: newTestSchema(),
|
||||
}
|
||||
s.handler.EXPECT().GetCollection(mock.Anything, mock.Anything).Return(coll, nil)
|
||||
|
||||
s.meta.compactionTaskMeta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
s.meta.compactionTaskMeta.SaveCompactionTask(ctx, &datapb.CompactionTask{
|
||||
TriggerID: 1,
|
||||
PlanID: 10,
|
||||
CollectionID: 100,
|
||||
|
@ -230,13 +231,14 @@ func (s *ClusteringCompactionPolicySuite) TestTriggerOneCollectionNoClusteringKe
|
|||
}
|
||||
|
||||
func (s *ClusteringCompactionPolicySuite) TestTriggerOneCollectionCompacting() {
|
||||
ctx := context.Background()
|
||||
coll := &collectionInfo{
|
||||
ID: 100,
|
||||
Schema: newTestScalarClusteringKeySchema(),
|
||||
}
|
||||
s.handler.EXPECT().GetCollection(mock.Anything, mock.Anything).Return(coll, nil)
|
||||
|
||||
s.meta.compactionTaskMeta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
s.meta.compactionTaskMeta.SaveCompactionTask(ctx, &datapb.CompactionTask{
|
||||
TriggerID: 1,
|
||||
PlanID: 10,
|
||||
CollectionID: 100,
|
||||
|
@ -250,6 +252,7 @@ func (s *ClusteringCompactionPolicySuite) TestTriggerOneCollectionCompacting() {
|
|||
}
|
||||
|
||||
func (s *ClusteringCompactionPolicySuite) TestCollectionIsClusteringCompacting() {
|
||||
ctx := context.Background()
|
||||
s.Run("no collection is compacting", func() {
|
||||
compacting, triggerID := s.clusteringCompactionPolicy.collectionIsClusteringCompacting(collID)
|
||||
s.False(compacting)
|
||||
|
@ -280,7 +283,7 @@ func (s *ClusteringCompactionPolicySuite) TestCollectionIsClusteringCompacting()
|
|||
s.clusteringCompactionPolicy.meta = &meta{
|
||||
compactionTaskMeta: compactionTaskMeta,
|
||||
}
|
||||
compactionTaskMeta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
compactionTaskMeta.SaveCompactionTask(ctx, &datapb.CompactionTask{
|
||||
TriggerID: 1,
|
||||
PlanID: 10,
|
||||
CollectionID: collID,
|
||||
|
|
|
@ -111,6 +111,7 @@ func (s *SingleCompactionPolicySuite) TestIsDeleteRowsTooManySegment() {
|
|||
}
|
||||
|
||||
func (s *SingleCompactionPolicySuite) TestL2SingleCompaction() {
|
||||
ctx := context.Background()
|
||||
paramtable.Get().Save(paramtable.Get().DataCoordCfg.IndexBasedCompaction.Key, "false")
|
||||
defer paramtable.Get().Reset(paramtable.Get().DataCoordCfg.IndexBasedCompaction.Key)
|
||||
|
||||
|
@ -134,7 +135,7 @@ func (s *SingleCompactionPolicySuite) TestL2SingleCompaction() {
|
|||
compactionTaskMeta: compactionTaskMeta,
|
||||
segments: segmentsInfo,
|
||||
}
|
||||
compactionTaskMeta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
compactionTaskMeta.SaveCompactionTask(ctx, &datapb.CompactionTask{
|
||||
TriggerID: 1,
|
||||
PlanID: 10,
|
||||
CollectionID: collID,
|
||||
|
|
|
@ -198,7 +198,7 @@ func (t *clusteringCompactionTask) BuildCompactionRequest() (*datapb.CompactionP
|
|||
log := log.With(zap.Int64("taskID", taskProto.GetTriggerID()), zap.Int64("planID", plan.GetPlanID()))
|
||||
|
||||
for _, segID := range taskProto.GetInputSegments() {
|
||||
segInfo := t.meta.GetHealthySegment(segID)
|
||||
segInfo := t.meta.GetHealthySegment(context.TODO(), segID)
|
||||
if segInfo == nil {
|
||||
return nil, merr.WrapErrSegmentNotFound(segID)
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ func (t *clusteringCompactionTask) processExecuting() error {
|
|||
return segment.GetSegmentID()
|
||||
})
|
||||
|
||||
_, metricMutation, err := t.meta.CompleteCompactionMutation(t.GetTaskProto(), t.result)
|
||||
_, metricMutation, err := t.meta.CompleteCompactionMutation(context.TODO(), t.GetTaskProto(), t.result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -435,7 +435,7 @@ func (t *clusteringCompactionTask) markResultSegmentsVisible() error {
|
|||
operators = append(operators, UpdateSegmentPartitionStatsVersionOperator(segID, t.GetTaskProto().GetPlanID()))
|
||||
}
|
||||
|
||||
err := t.meta.UpdateSegmentsInfo(operators...)
|
||||
err := t.meta.UpdateSegmentsInfo(context.TODO(), operators...)
|
||||
if err != nil {
|
||||
log.Warn("markResultSegmentVisible UpdateSegmentsInfo fail", zap.Error(err))
|
||||
return merr.WrapErrClusteringCompactionMetaError("markResultSegmentVisible UpdateSegmentsInfo", err)
|
||||
|
@ -449,7 +449,7 @@ func (t *clusteringCompactionTask) markInputSegmentsDropped() error {
|
|||
for _, segID := range t.GetTaskProto().GetInputSegments() {
|
||||
operators = append(operators, UpdateStatusOperator(segID, commonpb.SegmentState_Dropped))
|
||||
}
|
||||
err := t.meta.UpdateSegmentsInfo(operators...)
|
||||
err := t.meta.UpdateSegmentsInfo(context.TODO(), operators...)
|
||||
if err != nil {
|
||||
log.Warn("markInputSegmentsDropped UpdateSegmentsInfo fail", zap.Error(err))
|
||||
return merr.WrapErrClusteringCompactionMetaError("markInputSegmentsDropped UpdateSegmentsInfo", err)
|
||||
|
@ -519,7 +519,7 @@ func (t *clusteringCompactionTask) processAnalyzing() error {
|
|||
}
|
||||
|
||||
func (t *clusteringCompactionTask) resetSegmentCompacting() {
|
||||
t.meta.SetSegmentsCompacting(t.GetTaskProto().GetInputSegments(), false)
|
||||
t.meta.SetSegmentsCompacting(context.TODO(), t.GetTaskProto().GetInputSegments(), false)
|
||||
}
|
||||
|
||||
func (t *clusteringCompactionTask) processFailedOrTimeout() error {
|
||||
|
@ -532,7 +532,7 @@ func (t *clusteringCompactionTask) processFailedOrTimeout() error {
|
|||
}
|
||||
isInputDropped := false
|
||||
for _, segID := range t.GetTaskProto().GetInputSegments() {
|
||||
if t.meta.GetHealthySegment(segID) == nil {
|
||||
if t.meta.GetHealthySegment(context.TODO(), segID) == nil {
|
||||
isInputDropped = true
|
||||
break
|
||||
}
|
||||
|
@ -559,7 +559,7 @@ func (t *clusteringCompactionTask) processFailedOrTimeout() error {
|
|||
operators = append(operators, UpdateSegmentLevelOperator(segID, datapb.SegmentLevel_L1))
|
||||
operators = append(operators, UpdateSegmentPartitionStatsVersionOperator(segID, 0))
|
||||
}
|
||||
err := t.meta.UpdateSegmentsInfo(operators...)
|
||||
err := t.meta.UpdateSegmentsInfo(context.TODO(), operators...)
|
||||
if err != nil {
|
||||
log.Warn("UpdateSegmentsInfo fail", zap.Error(err))
|
||||
return merr.WrapErrClusteringCompactionMetaError("UpdateSegmentsInfo", err)
|
||||
|
@ -576,7 +576,7 @@ func (t *clusteringCompactionTask) processFailedOrTimeout() error {
|
|||
// tmpSegment is always invisible
|
||||
operators = append(operators, UpdateStatusOperator(segID, commonpb.SegmentState_Dropped))
|
||||
}
|
||||
err := t.meta.UpdateSegmentsInfo(operators...)
|
||||
err := t.meta.UpdateSegmentsInfo(context.TODO(), operators...)
|
||||
if err != nil {
|
||||
log.Warn("UpdateSegmentsInfo fail", zap.Error(err))
|
||||
return merr.WrapErrClusteringCompactionMetaError("UpdateSegmentsInfo", err)
|
||||
|
@ -593,7 +593,7 @@ func (t *clusteringCompactionTask) processFailedOrTimeout() error {
|
|||
Version: t.GetTaskProto().GetPlanID(),
|
||||
SegmentIDs: t.GetTaskProto().GetResultSegments(),
|
||||
}
|
||||
err := t.meta.CleanPartitionStatsInfo(partitionStatsInfo)
|
||||
err := t.meta.CleanPartitionStatsInfo(context.TODO(), partitionStatsInfo)
|
||||
if err != nil {
|
||||
log.Warn("gcPartitionStatsInfo fail", zap.Error(err))
|
||||
}
|
||||
|
@ -703,7 +703,7 @@ func (t *clusteringCompactionTask) checkTimeout() bool {
|
|||
}
|
||||
|
||||
func (t *clusteringCompactionTask) saveTaskMeta(task *datapb.CompactionTask) error {
|
||||
return t.meta.SaveCompactionTask(task)
|
||||
return t.meta.SaveCompactionTask(context.TODO(), task)
|
||||
}
|
||||
|
||||
func (t *clusteringCompactionTask) SaveTaskMeta() error {
|
||||
|
|
|
@ -111,9 +111,9 @@ func (s *ClusteringCompactionTaskSuite) TestClusteringCompactionSegmentMetaChang
|
|||
|
||||
task.processPipelining()
|
||||
|
||||
seg11 := s.meta.GetSegment(101)
|
||||
seg11 := s.meta.GetSegment(context.TODO(), 101)
|
||||
s.Equal(datapb.SegmentLevel_L1, seg11.Level)
|
||||
seg21 := s.meta.GetSegment(102)
|
||||
seg21 := s.meta.GetSegment(context.TODO(), 102)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg21.Level)
|
||||
s.Equal(int64(10000), seg21.PartitionStatsVersion)
|
||||
|
||||
|
@ -165,21 +165,21 @@ func (s *ClusteringCompactionTaskSuite) TestClusteringCompactionSegmentMetaChang
|
|||
|
||||
task.processFailedOrTimeout()
|
||||
|
||||
seg12 := s.meta.GetSegment(101)
|
||||
seg12 := s.meta.GetSegment(context.TODO(), 101)
|
||||
s.Equal(datapb.SegmentLevel_L1, seg12.Level)
|
||||
s.Equal(commonpb.SegmentState_Dropped, seg12.State)
|
||||
|
||||
seg22 := s.meta.GetSegment(102)
|
||||
seg22 := s.meta.GetSegment(context.TODO(), 102)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg22.Level)
|
||||
s.Equal(int64(10000), seg22.PartitionStatsVersion)
|
||||
s.Equal(commonpb.SegmentState_Dropped, seg22.State)
|
||||
|
||||
seg32 := s.meta.GetSegment(103)
|
||||
seg32 := s.meta.GetSegment(context.TODO(), 103)
|
||||
s.Equal(datapb.SegmentLevel_L1, seg32.Level)
|
||||
s.Equal(int64(0), seg32.PartitionStatsVersion)
|
||||
s.Equal(commonpb.SegmentState_Flushed, seg32.State)
|
||||
|
||||
seg42 := s.meta.GetSegment(104)
|
||||
seg42 := s.meta.GetSegment(context.TODO(), 104)
|
||||
s.Equal(datapb.SegmentLevel_L1, seg42.Level)
|
||||
s.Equal(int64(0), seg42.PartitionStatsVersion)
|
||||
s.Equal(commonpb.SegmentState_Flushed, seg42.State)
|
||||
|
@ -254,29 +254,29 @@ func (s *ClusteringCompactionTaskSuite) TestClusteringCompactionSegmentMetaChang
|
|||
|
||||
task.processFailedOrTimeout()
|
||||
|
||||
seg12 := s.meta.GetSegment(101)
|
||||
seg12 := s.meta.GetSegment(context.TODO(), 101)
|
||||
s.Equal(datapb.SegmentLevel_L1, seg12.Level)
|
||||
seg22 := s.meta.GetSegment(102)
|
||||
seg22 := s.meta.GetSegment(context.TODO(), 102)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg22.Level)
|
||||
s.Equal(int64(10000), seg22.PartitionStatsVersion)
|
||||
|
||||
seg32 := s.meta.GetSegment(103)
|
||||
seg32 := s.meta.GetSegment(context.TODO(), 103)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg32.Level)
|
||||
s.Equal(commonpb.SegmentState_Dropped, seg32.State)
|
||||
s.True(seg32.IsInvisible)
|
||||
|
||||
seg42 := s.meta.GetSegment(104)
|
||||
seg42 := s.meta.GetSegment(context.TODO(), 104)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg42.Level)
|
||||
s.Equal(commonpb.SegmentState_Dropped, seg42.State)
|
||||
s.True(seg42.IsInvisible)
|
||||
|
||||
seg52 := s.meta.GetSegment(105)
|
||||
seg52 := s.meta.GetSegment(context.TODO(), 105)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg52.Level)
|
||||
s.Equal(int64(10001), seg52.PartitionStatsVersion)
|
||||
s.Equal(commonpb.SegmentState_Dropped, seg52.State)
|
||||
s.True(seg52.IsInvisible)
|
||||
|
||||
seg62 := s.meta.GetSegment(106)
|
||||
seg62 := s.meta.GetSegment(context.TODO(), 106)
|
||||
s.Equal(datapb.SegmentLevel_L2, seg62.Level)
|
||||
s.Equal(int64(10001), seg62.PartitionStatsVersion)
|
||||
s.Equal(commonpb.SegmentState_Dropped, seg62.State)
|
||||
|
@ -636,7 +636,7 @@ func (s *ClusteringCompactionTaskSuite) TestProcessIndexingState() {
|
|||
}
|
||||
|
||||
task.updateAndSaveTaskMeta(setResultSegments([]int64{10, 11}))
|
||||
err := s.meta.indexMeta.CreateIndex(index)
|
||||
err := s.meta.indexMeta.CreateIndex(context.TODO(), index)
|
||||
s.NoError(err)
|
||||
|
||||
s.False(task.Process())
|
||||
|
@ -650,7 +650,7 @@ func (s *ClusteringCompactionTaskSuite) TestProcessIndexingState() {
|
|||
CollectionID: 1,
|
||||
IndexID: 3,
|
||||
}
|
||||
err := s.meta.indexMeta.CreateIndex(index)
|
||||
err := s.meta.indexMeta.CreateIndex(context.TODO(), index)
|
||||
s.NoError(err)
|
||||
|
||||
s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
|
||||
|
|
|
@ -269,7 +269,7 @@ func (t *l0CompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, err
|
|||
|
||||
log := log.With(zap.Int64("taskID", taskProto.GetTriggerID()), zap.Int64("planID", plan.GetPlanID()))
|
||||
for _, segID := range taskProto.GetInputSegments() {
|
||||
segInfo := t.meta.GetHealthySegment(segID)
|
||||
segInfo := t.meta.GetHealthySegment(context.TODO(), segID)
|
||||
if segInfo == nil {
|
||||
return nil, merr.WrapErrSegmentNotFound(segID)
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ func (t *l0CompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, err
|
|||
|
||||
// Select sealed L1 segments for LevelZero compaction that meets the condition:
|
||||
// dmlPos < triggerInfo.pos
|
||||
sealedSegments := t.meta.SelectSegments(WithCollection(taskProto.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
sealedSegments := t.meta.SelectSegments(context.TODO(), WithCollection(taskProto.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return (taskProto.GetPartitionID() == common.AllPartitionsID || info.GetPartitionID() == taskProto.GetPartitionID()) &&
|
||||
info.GetInsertChannel() == plan.GetChannel() &&
|
||||
isFlushState(info.GetState()) &&
|
||||
|
@ -321,7 +321,7 @@ func (t *l0CompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, err
|
|||
}
|
||||
|
||||
func (t *l0CompactionTask) resetSegmentCompacting() {
|
||||
t.meta.SetSegmentsCompacting(t.GetTaskProto().GetInputSegments(), false)
|
||||
t.meta.SetSegmentsCompacting(context.TODO(), t.GetTaskProto().GetInputSegments(), false)
|
||||
}
|
||||
|
||||
func (t *l0CompactionTask) hasAssignedWorker() bool {
|
||||
|
@ -347,7 +347,7 @@ func (t *l0CompactionTask) updateAndSaveTaskMeta(opts ...compactionTaskOpt) erro
|
|||
}
|
||||
|
||||
func (t *l0CompactionTask) saveTaskMeta(task *datapb.CompactionTask) error {
|
||||
return t.meta.SaveCompactionTask(task)
|
||||
return t.meta.SaveCompactionTask(context.TODO(), task)
|
||||
}
|
||||
|
||||
func (t *l0CompactionTask) saveSegmentMeta() error {
|
||||
|
@ -365,7 +365,7 @@ func (t *l0CompactionTask) saveSegmentMeta() error {
|
|||
zap.Int64("planID", t.GetTaskProto().GetPlanID()),
|
||||
)
|
||||
|
||||
return t.meta.UpdateSegmentsInfo(operators...)
|
||||
return t.meta.UpdateSegmentsInfo(context.TODO(), operators...)
|
||||
}
|
||||
|
||||
func (t *l0CompactionTask) GetSlotUsage() int64 {
|
||||
|
|
|
@ -60,7 +60,7 @@ func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_NormalL0() {
|
|||
channel := "Ch-1"
|
||||
deltaLogs := []*datapb.FieldBinlog{getFieldBinlogIDs(101, 3)}
|
||||
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything).Return(
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||
[]*SegmentInfo{
|
||||
{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: 200,
|
||||
|
@ -80,7 +80,7 @@ func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_NormalL0() {
|
|||
},
|
||||
)
|
||||
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
Level: datapb.SegmentLevel_L0,
|
||||
|
@ -115,7 +115,7 @@ func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_NormalL0() {
|
|||
|
||||
func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_SegmentNotFoundL0() {
|
||||
channel := "Ch-1"
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return nil
|
||||
}).Once()
|
||||
task := newL0CompactionTask(&datapb.CompactionTask{
|
||||
|
@ -141,7 +141,7 @@ func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_SegmentNotFoundL0() {
|
|||
func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_SelectZeroSegmentsL0() {
|
||||
channel := "Ch-1"
|
||||
deltaLogs := []*datapb.FieldBinlog{getFieldBinlogIDs(101, 3)}
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
Level: datapb.SegmentLevel_L0,
|
||||
|
@ -150,7 +150,7 @@ func (s *L0CompactionTaskSuite) TestProcessRefreshPlan_SelectZeroSegmentsL0() {
|
|||
Deltalogs: deltaLogs,
|
||||
}}
|
||||
}).Times(2)
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
task := newL0CompactionTask(&datapb.CompactionTask{
|
||||
PlanID: 1,
|
||||
|
@ -213,7 +213,7 @@ func (s *L0CompactionTaskSuite) generateTestL0Task(state datapb.CompactionTaskSt
|
|||
func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
||||
s.Run("test pipelining needReassignNodeID", func() {
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_pipelining)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t.updateAndSaveTaskMeta(setNodeID(NullNodeID))
|
||||
got := t.Process()
|
||||
s.False(got)
|
||||
|
@ -222,14 +222,14 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test pipelining Compaction failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
s.mockAlloc.EXPECT().AllocN(mock.Anything).Return(100, 200, nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_pipelining)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
channel := "ch-1"
|
||||
deltaLogs := []*datapb.FieldBinlog{getFieldBinlogIDs(101, 3)}
|
||||
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything).Return(
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||
[]*SegmentInfo{
|
||||
{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: 200,
|
||||
|
@ -239,7 +239,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
},
|
||||
)
|
||||
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
Level: datapb.SegmentLevel_L0,
|
||||
|
@ -248,7 +248,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
Deltalogs: deltaLogs,
|
||||
}}
|
||||
}).Twice()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
s.mockSessMgr.EXPECT().Compaction(mock.Anything, mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, nodeID int64, plan *datapb.CompactionPlan) error {
|
||||
s.Require().EqualValues(t.GetTaskProto().NodeID, nodeID)
|
||||
|
@ -262,14 +262,14 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test pipelining success", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
s.mockAlloc.EXPECT().AllocN(mock.Anything).Return(100, 200, nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_pipelining)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
channel := "ch-1"
|
||||
deltaLogs := []*datapb.FieldBinlog{getFieldBinlogIDs(101, 3)}
|
||||
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything).Return(
|
||||
s.mockMeta.EXPECT().SelectSegments(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||
[]*SegmentInfo{
|
||||
{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: 200,
|
||||
|
@ -279,7 +279,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
},
|
||||
)
|
||||
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
Level: datapb.SegmentLevel_L0,
|
||||
|
@ -301,7 +301,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
|
||||
// stay in executing state when GetCompactionPlanResults error except ErrNodeNotFound
|
||||
s.Run("test executing GetCompactionPlanResult fail NodeNotFound", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -316,7 +316,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
|
||||
// stay in executing state when GetCompactionPlanResults error except ErrNodeNotFound
|
||||
s.Run("test executing GetCompactionPlanResult fail mock error", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -331,7 +331,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test executing with result executing", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -347,7 +347,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test executing with result completed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -359,16 +359,16 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
}, nil).Once()
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(nil)
|
||||
|
||||
s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil).Times(2)
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).Return().Once()
|
||||
s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil).Times(2)
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).Return().Once()
|
||||
|
||||
got := t.Process()
|
||||
s.True(got)
|
||||
s.Equal(datapb.CompactionTaskState_completed, t.GetTaskProto().GetState())
|
||||
})
|
||||
s.Run("test executing with result completed save segment meta failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -379,7 +379,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
State: datapb.CompactionTaskState_completed,
|
||||
}, nil).Once()
|
||||
|
||||
s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(errors.New("mock error")).Once()
|
||||
|
||||
got := t.Process()
|
||||
|
@ -387,7 +387,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
s.Equal(datapb.CompactionTaskState_executing, t.GetTaskProto().GetState())
|
||||
})
|
||||
s.Run("test executing with result completed save compaction meta failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -398,8 +398,8 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
State: datapb.CompactionTaskState_completed,
|
||||
}, nil).Once()
|
||||
|
||||
s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(errors.New("mock error")).Once()
|
||||
s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||
|
||||
got := t.Process()
|
||||
s.False(got)
|
||||
|
@ -407,7 +407,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test executing with result failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -418,14 +418,14 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
State: datapb.CompactionTaskState_failed,
|
||||
}, nil).Once()
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).Return().Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).Return().Once()
|
||||
|
||||
got := t.Process()
|
||||
s.True(got)
|
||||
s.Equal(datapb.CompactionTaskState_cleaned, t.GetTaskProto().GetState())
|
||||
})
|
||||
s.Run("test executing with result failed save compaction meta failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_executing)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
|
@ -435,7 +435,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
PlanID: t.GetTaskProto().GetPlanID(),
|
||||
State: datapb.CompactionTaskState_failed,
|
||||
}, nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(errors.New("mock error")).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||
|
||||
got := t.Process()
|
||||
s.False(got)
|
||||
|
@ -443,13 +443,13 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test metaSaved success", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_meta_saved)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
t.result = &datapb.CompactionPlanResult{}
|
||||
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).RunAndReturn(func(segIDs []int64, isCompacting bool) {
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).RunAndReturn(func(ctx context.Context, segIDs []int64, isCompacting bool) {
|
||||
s.ElementsMatch(segIDs, t.GetTaskProto().GetInputSegments())
|
||||
}).Once()
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(nil).Once()
|
||||
|
@ -460,13 +460,13 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test metaSaved failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_meta_saved)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
t.result = &datapb.CompactionPlanResult{}
|
||||
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(errors.New("mock error")).Once()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||
|
||||
got := t.Process()
|
||||
s.False(got)
|
||||
|
@ -474,13 +474,13 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test complete drop failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_completed)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
t.result = &datapb.CompactionPlanResult{}
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(errors.New("mock error")).Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).RunAndReturn(func(segIDs []int64, isCompacting bool) {
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).RunAndReturn(func(ctx context.Context, segIDs []int64, isCompacting bool) {
|
||||
s.ElementsMatch(segIDs, t.GetTaskProto().GetInputSegments())
|
||||
}).Once()
|
||||
|
||||
|
@ -490,13 +490,13 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test complete success", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_completed)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
t.result = &datapb.CompactionPlanResult{}
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).RunAndReturn(func(segIDs []int64, isCompacting bool) {
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).RunAndReturn(func(ctx context.Context, segIDs []int64, isCompacting bool) {
|
||||
s.ElementsMatch(segIDs, t.GetTaskProto().GetInputSegments())
|
||||
}).Once()
|
||||
|
||||
|
@ -506,12 +506,12 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test process failed success", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_failed)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).RunAndReturn(func(segIDs []int64, isCompacting bool) {
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).RunAndReturn(func(ctx context.Context, segIDs []int64, isCompacting bool) {
|
||||
s.ElementsMatch(segIDs, t.GetTaskProto().GetInputSegments())
|
||||
}).Once()
|
||||
|
||||
|
@ -521,12 +521,12 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
})
|
||||
|
||||
s.Run("test process failed failed", func() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_failed)
|
||||
t.updateAndSaveTaskMeta(setNodeID(100))
|
||||
s.Require().True(t.GetTaskProto().GetNodeID() > 0)
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(t.GetTaskProto().GetNodeID(), mock.Anything).Return(errors.New("mock error")).Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, false).RunAndReturn(func(segIDs []int64, isCompacting bool) {
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, false).RunAndReturn(func(ctx context.Context, segIDs []int64, isCompacting bool) {
|
||||
s.ElementsMatch(segIDs, t.GetTaskProto().GetInputSegments())
|
||||
}).Once()
|
||||
|
||||
|
@ -544,7 +544,7 @@ func (s *L0CompactionTaskSuite) TestPorcessStateTrans() {
|
|||
}
|
||||
|
||||
func (s *L0CompactionTaskSuite) TestSetterGetter() {
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_pipelining)
|
||||
|
||||
span := t.GetSpan()
|
||||
|
@ -572,7 +572,7 @@ func (s *L0CompactionTaskSuite) TestSetterGetter() {
|
|||
s.Run("set NodeID", func() {
|
||||
t := s.generateTestL0Task(datapb.CompactionTaskState_pipelining)
|
||||
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil)
|
||||
t.SetNodeID(1000)
|
||||
s.EqualValues(1000, t.GetTaskProto().GetNodeID())
|
||||
})
|
||||
|
|
|
@ -146,10 +146,10 @@ func (csm *compactionTaskMeta) GetCompactionTasksByTriggerID(triggerID int64) []
|
|||
return res
|
||||
}
|
||||
|
||||
func (csm *compactionTaskMeta) SaveCompactionTask(task *datapb.CompactionTask) error {
|
||||
func (csm *compactionTaskMeta) SaveCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
csm.Lock()
|
||||
defer csm.Unlock()
|
||||
if err := csm.catalog.SaveCompactionTask(csm.ctx, task); err != nil {
|
||||
if err := csm.catalog.SaveCompactionTask(ctx, task); err != nil {
|
||||
log.Error("meta update: update compaction task fail", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
@ -166,10 +166,10 @@ func (csm *compactionTaskMeta) saveCompactionTaskMemory(task *datapb.CompactionT
|
|||
csm.taskStats.Add(task.PlanID, newCompactionTaskStats(task))
|
||||
}
|
||||
|
||||
func (csm *compactionTaskMeta) DropCompactionTask(task *datapb.CompactionTask) error {
|
||||
func (csm *compactionTaskMeta) DropCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
csm.Lock()
|
||||
defer csm.Unlock()
|
||||
if err := csm.catalog.DropCompactionTask(csm.ctx, task); err != nil {
|
||||
if err := csm.catalog.DropCompactionTask(ctx, task); err != nil {
|
||||
log.Error("meta update: drop compaction task fail", zap.Int64("triggerID", task.TriggerID), zap.Int64("planID", task.PlanID), zap.Int64("collectionID", task.CollectionID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func newTestCompactionTaskMeta(t *testing.T) *compactionTaskMeta {
|
|||
}
|
||||
|
||||
func (suite *CompactionTaskMetaSuite) TestGetCompactionTasksByCollection() {
|
||||
suite.meta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
suite.meta.SaveCompactionTask(context.TODO(), &datapb.CompactionTask{
|
||||
TriggerID: 1,
|
||||
PlanID: 10,
|
||||
CollectionID: 100,
|
||||
|
@ -69,12 +69,12 @@ func (suite *CompactionTaskMetaSuite) TestGetCompactionTasksByCollection() {
|
|||
}
|
||||
|
||||
func (suite *CompactionTaskMetaSuite) TestGetCompactionTasksByCollectionAbnormal() {
|
||||
suite.meta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
suite.meta.SaveCompactionTask(context.TODO(), &datapb.CompactionTask{
|
||||
TriggerID: 1,
|
||||
PlanID: 10,
|
||||
CollectionID: 100,
|
||||
})
|
||||
suite.meta.SaveCompactionTask(&datapb.CompactionTask{
|
||||
suite.meta.SaveCompactionTask(context.TODO(), &datapb.CompactionTask{
|
||||
TriggerID: 2,
|
||||
PlanID: 11,
|
||||
CollectionID: 101,
|
||||
|
@ -113,9 +113,9 @@ func (suite *CompactionTaskMetaSuite) TestTaskStatsJSON() {
|
|||
actualJSON := suite.meta.TaskStatsJSON()
|
||||
suite.Equal("[]", actualJSON)
|
||||
|
||||
err := suite.meta.SaveCompactionTask(task1)
|
||||
err := suite.meta.SaveCompactionTask(context.TODO(), task1)
|
||||
suite.NoError(err)
|
||||
err = suite.meta.SaveCompactionTask(task2)
|
||||
err = suite.meta.SaveCompactionTask(context.TODO(), task2)
|
||||
suite.NoError(err)
|
||||
|
||||
expectedTasks := []*metricsinfo.CompactionTask{
|
||||
|
|
|
@ -155,7 +155,7 @@ func (t *mixCompactionTask) processExecuting() bool {
|
|||
}
|
||||
|
||||
func (t *mixCompactionTask) saveTaskMeta(task *datapb.CompactionTask) error {
|
||||
return t.meta.SaveCompactionTask(task)
|
||||
return t.meta.SaveCompactionTask(context.TODO(), task)
|
||||
}
|
||||
|
||||
func (t *mixCompactionTask) SaveTaskMeta() error {
|
||||
|
@ -165,7 +165,7 @@ func (t *mixCompactionTask) SaveTaskMeta() error {
|
|||
func (t *mixCompactionTask) saveSegmentMeta() error {
|
||||
log := log.With(zap.Int64("triggerID", t.GetTaskProto().GetTriggerID()), zap.Int64("PlanID", t.GetTaskProto().GetPlanID()), zap.Int64("collectionID", t.GetTaskProto().GetCollectionID()))
|
||||
// Also prepare metric updates.
|
||||
newSegments, metricMutation, err := t.meta.CompleteCompactionMutation(t.taskProto.Load().(*datapb.CompactionTask), t.result)
|
||||
newSegments, metricMutation, err := t.meta.CompleteCompactionMutation(context.TODO(), t.taskProto.Load().(*datapb.CompactionTask), t.result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ func (t *mixCompactionTask) processCompleted() bool {
|
|||
}
|
||||
|
||||
func (t *mixCompactionTask) resetSegmentCompacting() {
|
||||
t.meta.SetSegmentsCompacting(t.taskProto.Load().(*datapb.CompactionTask).GetInputSegments(), false)
|
||||
t.meta.SetSegmentsCompacting(context.TODO(), t.taskProto.Load().(*datapb.CompactionTask).GetInputSegments(), false)
|
||||
}
|
||||
|
||||
func (t *mixCompactionTask) ShadowClone(opts ...compactionTaskOpt) *datapb.CompactionTask {
|
||||
|
@ -316,7 +316,7 @@ func (t *mixCompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, er
|
|||
|
||||
segIDMap := make(map[int64][]*datapb.FieldBinlog, len(plan.SegmentBinlogs))
|
||||
for _, segID := range taskProto.GetInputSegments() {
|
||||
segInfo := t.meta.GetHealthySegment(segID)
|
||||
segInfo := t.meta.GetHealthySegment(context.TODO(), segID)
|
||||
if segInfo == nil {
|
||||
return nil, merr.WrapErrSegmentNotFound(segID)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/samber/lo"
|
||||
|
@ -33,7 +34,7 @@ func (s *MixCompactionTaskSuite) SetupTest() {
|
|||
func (s *MixCompactionTaskSuite) TestProcessRefreshPlan_NormalMix() {
|
||||
channel := "Ch-1"
|
||||
binLogs := []*datapb.FieldBinlog{getFieldBinlogIDs(101, 3)}
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
Level: datapb.SegmentLevel_L1,
|
||||
|
@ -69,7 +70,7 @@ func (s *MixCompactionTaskSuite) TestProcessRefreshPlan_NormalMix() {
|
|||
func (s *MixCompactionTaskSuite) TestProcessRefreshPlan_MixSegmentNotFound() {
|
||||
channel := "Ch-1"
|
||||
s.Run("segment_not_found", func() {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything).RunAndReturn(func(segID int64) *SegmentInfo {
|
||||
s.mockMeta.EXPECT().GetHealthySegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, segID int64) *SegmentInfo {
|
||||
return nil
|
||||
}).Once()
|
||||
task := newMixCompactionTask(&datapb.CompactionTask{
|
||||
|
|
|
@ -53,7 +53,7 @@ type CompactionPlanHandlerSuite struct {
|
|||
|
||||
func (s *CompactionPlanHandlerSuite) SetupTest() {
|
||||
s.mockMeta = NewMockCompactionMeta(s.T())
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything).Return(nil).Maybe()
|
||||
s.mockMeta.EXPECT().SaveCompactionTask(mock.Anything, mock.Anything).Return(nil).Maybe()
|
||||
s.mockAlloc = allocator.NewMockAllocator(s.T())
|
||||
s.mockCm = NewMockChannelManager(s.T())
|
||||
s.mockSessMgr = session.NewMockDataNodeManager(s.T())
|
||||
|
@ -604,7 +604,7 @@ func (s *CompactionPlanHandlerSuite) TestGetCompactionTask() {
|
|||
2: t2,
|
||||
3: t3,
|
||||
}
|
||||
s.mockMeta.EXPECT().GetCompactionTasksByTriggerID(mock.Anything).RunAndReturn(func(i int64) []*datapb.CompactionTask {
|
||||
s.mockMeta.EXPECT().GetCompactionTasksByTriggerID(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, i int64) []*datapb.CompactionTask {
|
||||
var ret []*datapb.CompactionTask
|
||||
for _, t := range inTasks {
|
||||
if t.GetTaskProto().GetTriggerID() != i {
|
||||
|
@ -621,7 +621,7 @@ func (s *CompactionPlanHandlerSuite) TestGetCompactionTask() {
|
|||
|
||||
s.handler.schedule()
|
||||
|
||||
info := s.handler.getCompactionInfo(1)
|
||||
info := s.handler.getCompactionInfo(context.TODO(), 1)
|
||||
s.Equal(1, info.completedCnt)
|
||||
s.Equal(1, info.executingCnt)
|
||||
s.Equal(1, info.failedCnt)
|
||||
|
@ -629,7 +629,7 @@ func (s *CompactionPlanHandlerSuite) TestGetCompactionTask() {
|
|||
|
||||
func (s *CompactionPlanHandlerSuite) TestExecCompactionPlan() {
|
||||
s.SetupTest()
|
||||
s.mockMeta.EXPECT().CheckAndSetSegmentsCompacting(mock.Anything).Return(true, true).Maybe()
|
||||
s.mockMeta.EXPECT().CheckAndSetSegmentsCompacting(mock.Anything, mock.Anything).Return(true, true).Maybe()
|
||||
handler := newCompactionPlanHandler(nil, s.mockSessMgr, s.mockMeta, s.mockAlloc, nil, nil)
|
||||
|
||||
task := &datapb.CompactionTask{
|
||||
|
@ -669,7 +669,7 @@ func (s *CompactionPlanHandlerSuite) TestCheckCompaction() {
|
|||
}, nil).Once()
|
||||
|
||||
s.mockSessMgr.EXPECT().DropCompactionPlan(mock.Anything, mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything).Return()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, mock.Anything).Return()
|
||||
|
||||
t1 := newMixCompactionTask(&datapb.CompactionTask{
|
||||
PlanID: 1,
|
||||
|
@ -747,8 +747,8 @@ func (s *CompactionPlanHandlerSuite) TestCheckCompaction() {
|
|||
|
||||
// s.mockSessMgr.EXPECT().SyncSegments(int64(111), mock.Anything).Return(nil)
|
||||
// s.mockMeta.EXPECT().UpdateSegmentsInfo(mock.Anything).Return(nil)
|
||||
s.mockMeta.EXPECT().CompleteCompactionMutation(mock.Anything, mock.Anything).RunAndReturn(
|
||||
func(t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error) {
|
||||
s.mockMeta.EXPECT().CompleteCompactionMutation(mock.Anything, mock.Anything, mock.Anything).RunAndReturn(
|
||||
func(ctx context.Context, t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error) {
|
||||
if t.GetPlanID() == 2 {
|
||||
segment := NewSegmentInfo(&datapb.SegmentInfo{ID: 100})
|
||||
return []*SegmentInfo{segment}, &segMetricMutation{}, nil
|
||||
|
@ -815,7 +815,7 @@ func (s *CompactionPlanHandlerSuite) TestCompactionGC() {
|
|||
s.NoError(err)
|
||||
s.handler.meta = &meta{compactionTaskMeta: compactionTaskMeta}
|
||||
for _, t := range inTasks {
|
||||
s.handler.meta.SaveCompactionTask(t)
|
||||
s.handler.meta.SaveCompactionTask(context.TODO(), t)
|
||||
}
|
||||
|
||||
s.handler.cleanCompactionTaskMeta()
|
||||
|
@ -828,9 +828,9 @@ func (s *CompactionPlanHandlerSuite) TestProcessCompleteCompaction() {
|
|||
s.SetupTest()
|
||||
|
||||
// s.mockSessMgr.EXPECT().SyncSegments(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything).Return().Once()
|
||||
s.mockMeta.EXPECT().SetSegmentsCompacting(mock.Anything, mock.Anything, mock.Anything).Return().Once()
|
||||
segment := NewSegmentInfo(&datapb.SegmentInfo{ID: 100})
|
||||
s.mockMeta.EXPECT().CompleteCompactionMutation(mock.Anything, mock.Anything).Return(
|
||||
s.mockMeta.EXPECT().CompleteCompactionMutation(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||
[]*SegmentInfo{segment},
|
||||
&segMetricMutation{}, nil).Once()
|
||||
|
||||
|
|
|
@ -423,7 +423,7 @@ func (t *compactionTrigger) handleSignal(signal *compactionSignal) {
|
|||
return
|
||||
}
|
||||
|
||||
segment := t.meta.GetHealthySegment(signal.segmentID)
|
||||
segment := t.meta.GetHealthySegment(t.meta.ctx, signal.segmentID)
|
||||
if segment == nil {
|
||||
log.Warn("segment in compaction signal not found in meta", zap.Int64("segmentID", signal.segmentID))
|
||||
return
|
||||
|
|
|
@ -54,7 +54,7 @@ func (h *spyCompactionHandler) getCompactionTasksNumBySignalID(signalID int64) i
|
|||
return 0
|
||||
}
|
||||
|
||||
func (h *spyCompactionHandler) getCompactionInfo(signalID int64) *compactionInfo {
|
||||
func (h *spyCompactionHandler) getCompactionInfo(ctx context.Context, signalID int64) *compactionInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2357,7 +2357,7 @@ func (s *CompactionTriggerSuite) SetupTest() {
|
|||
},
|
||||
},
|
||||
}
|
||||
s.meta.UpdateChannelCheckpoint(s.channel, &msgpb.MsgPosition{
|
||||
s.meta.UpdateChannelCheckpoint(context.TODO(), s.channel, &msgpb.MsgPosition{
|
||||
ChannelName: s.channel,
|
||||
Timestamp: tsoutil.ComposeTSByTime(time.Now(), 0),
|
||||
MsgID: []byte{1, 2, 3, 4},
|
||||
|
|
|
@ -324,7 +324,7 @@ func (gc *garbageCollector) recycleUnusedBinLogWithChecker(ctx context.Context,
|
|||
return true
|
||||
}
|
||||
|
||||
segment := gc.meta.GetSegment(segmentID)
|
||||
segment := gc.meta.GetSegment(ctx, segmentID)
|
||||
if checker(chunkInfo, segment) {
|
||||
valid++
|
||||
logger.Info("garbageCollector recycleUnusedBinlogFiles skip file since it is valid", zap.String("filePath", chunkInfo.FilePath), zap.Int64("segmentID", segmentID))
|
||||
|
@ -416,7 +416,7 @@ func (gc *garbageCollector) recycleDroppedSegments(ctx context.Context) {
|
|||
log.Info("start clear dropped segments...")
|
||||
defer func() { log.Info("clear dropped segments done", zap.Duration("timeCost", time.Since(start))) }()
|
||||
|
||||
all := gc.meta.SelectSegments()
|
||||
all := gc.meta.SelectSegments(ctx)
|
||||
drops := make(map[int64]*SegmentInfo, 0)
|
||||
compactTo := make(map[int64]*SegmentInfo)
|
||||
channels := typeutil.NewSet[string]()
|
||||
|
@ -480,7 +480,7 @@ func (gc *garbageCollector) recycleDroppedSegments(ctx context.Context) {
|
|||
continue
|
||||
}
|
||||
|
||||
if err := gc.meta.DropSegment(segment.GetID()); err != nil {
|
||||
if err := gc.meta.DropSegment(ctx, segment.GetID()); err != nil {
|
||||
log.Warn("GC segment meta failed to drop segment", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
@ -622,7 +622,7 @@ func (gc *garbageCollector) recycleUnusedIndexes(ctx context.Context) {
|
|||
}
|
||||
|
||||
log := log.With(zap.Int64("collectionID", index.CollectionID), zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID))
|
||||
if err := gc.meta.indexMeta.RemoveIndex(index.CollectionID, index.IndexID); err != nil {
|
||||
if err := gc.meta.indexMeta.RemoveIndex(ctx, index.CollectionID, index.IndexID); err != nil {
|
||||
log.Warn("remove index on collection fail", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
@ -646,7 +646,7 @@ func (gc *garbageCollector) recycleUnusedSegIndexes(ctx context.Context) {
|
|||
|
||||
// 1. segment belongs to is deleted.
|
||||
// 2. index is deleted.
|
||||
if gc.meta.GetSegment(segIdx.SegmentID) == nil || !gc.meta.indexMeta.IsIndexExist(segIdx.CollectionID, segIdx.IndexID) {
|
||||
if gc.meta.GetSegment(ctx, segIdx.SegmentID) == nil || !gc.meta.indexMeta.IsIndexExist(segIdx.CollectionID, segIdx.IndexID) {
|
||||
indexFiles := gc.getAllIndexFilesOfIndex(segIdx)
|
||||
log := log.With(zap.Int64("collectionID", segIdx.CollectionID),
|
||||
zap.Int64("partitionID", segIdx.PartitionID),
|
||||
|
@ -664,7 +664,7 @@ func (gc *garbageCollector) recycleUnusedSegIndexes(ctx context.Context) {
|
|||
}
|
||||
|
||||
// Remove meta from index meta.
|
||||
if err := gc.meta.indexMeta.RemoveSegmentIndex(segIdx.CollectionID, segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexID, segIdx.BuildID); err != nil {
|
||||
if err := gc.meta.indexMeta.RemoveSegmentIndex(ctx, segIdx.CollectionID, segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexID, segIdx.BuildID); err != nil {
|
||||
log.Warn("delete index meta from etcd failed, wait to retry", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
@ -850,7 +850,7 @@ func (gc *garbageCollector) recycleUnusedTextIndexFiles(ctx context.Context) {
|
|||
log.Info("start recycleUnusedTextIndexFiles...")
|
||||
defer func() { log.Info("recycleUnusedTextIndexFiles done", zap.Duration("timeCost", time.Since(start))) }()
|
||||
|
||||
hasTextIndexSegments := gc.meta.SelectSegments(SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
hasTextIndexSegments := gc.meta.SelectSegments(ctx, SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return len(info.GetTextStatsLogs()) != 0
|
||||
}))
|
||||
fileNum := 0
|
||||
|
|
|
@ -1411,25 +1411,25 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
|||
|
||||
conclusion: only G is GCed.
|
||||
*/
|
||||
segA := gc.meta.GetSegment(segID)
|
||||
segA := gc.meta.GetSegment(context.TODO(), segID)
|
||||
assert.NotNil(t, segA)
|
||||
segB := gc.meta.GetSegment(segID + 1)
|
||||
segB := gc.meta.GetSegment(context.TODO(), segID+1)
|
||||
assert.NotNil(t, segB)
|
||||
segC := gc.meta.GetSegment(segID + 2)
|
||||
segC := gc.meta.GetSegment(context.TODO(), segID+2)
|
||||
assert.NotNil(t, segC)
|
||||
segD := gc.meta.GetSegment(segID + 3)
|
||||
segD := gc.meta.GetSegment(context.TODO(), segID+3)
|
||||
assert.NotNil(t, segD)
|
||||
segE := gc.meta.GetSegment(segID + 4)
|
||||
segE := gc.meta.GetSegment(context.TODO(), segID+4)
|
||||
assert.NotNil(t, segE)
|
||||
segF := gc.meta.GetSegment(segID + 5)
|
||||
segF := gc.meta.GetSegment(context.TODO(), segID+5)
|
||||
assert.NotNil(t, segF)
|
||||
segG := gc.meta.GetSegment(segID + 6)
|
||||
segG := gc.meta.GetSegment(context.TODO(), segID+6)
|
||||
assert.NotNil(t, segG)
|
||||
segH := gc.meta.GetSegment(segID + 7)
|
||||
segH := gc.meta.GetSegment(context.TODO(), segID+7)
|
||||
assert.NotNil(t, segH)
|
||||
segG = gc.meta.GetSegment(segID + 8)
|
||||
segG = gc.meta.GetSegment(context.TODO(), segID+8)
|
||||
assert.Nil(t, segG)
|
||||
err := gc.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err := gc.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: segID + 4,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
|
@ -1459,9 +1459,9 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
|||
D: dropped, not indexed, should be GCed since E is indexed
|
||||
*/
|
||||
|
||||
segC = gc.meta.GetSegment(segID + 2)
|
||||
segC = gc.meta.GetSegment(context.TODO(), segID+2)
|
||||
assert.Nil(t, segC)
|
||||
segD = gc.meta.GetSegment(segID + 3)
|
||||
segD = gc.meta.GetSegment(context.TODO(), segID+3)
|
||||
assert.Nil(t, segD)
|
||||
|
||||
gc.recycleDroppedSegments(context.TODO())
|
||||
|
@ -1469,9 +1469,9 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
|||
A: compacted became false due to C is GCed already, A should be GCed since dropTolernace is meet
|
||||
B: compacted became false due to C is GCed already, B should be GCed since dropTolerance is meet
|
||||
*/
|
||||
segA = gc.meta.GetSegment(segID)
|
||||
segA = gc.meta.GetSegment(context.TODO(), segID)
|
||||
assert.Nil(t, segA)
|
||||
segB = gc.meta.GetSegment(segID + 1)
|
||||
segB = gc.meta.GetSegment(context.TODO(), segID+1)
|
||||
assert.Nil(t, segB)
|
||||
}
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ func (h *ServerHandler) getEarliestSegmentDMLPos(channel string, partitionIDs ..
|
|||
var minPos *msgpb.MsgPosition
|
||||
var minPosSegID int64
|
||||
var minPosTs uint64
|
||||
segments := h.s.meta.SelectSegments(WithChannel(channel))
|
||||
segments := h.s.meta.SelectSegments(context.TODO(), WithChannel(channel))
|
||||
|
||||
validPartitions := lo.Filter(partitionIDs, func(partitionID int64, _ int) bool { return partitionID > allPartitionID })
|
||||
partitionSet := typeutil.NewUniqueSet(validPartitions...)
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestGetQueryVChanPositionsRetrieveM2N(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
err := svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err := svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
CollectionID: 1,
|
||||
FieldID: 2,
|
||||
IndexID: 1,
|
||||
|
@ -91,7 +91,7 @@ func TestGetQueryVChanPositionsRetrieveM2N(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
if arg.indexed {
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: arg.segID,
|
||||
BuildID: arg.segID,
|
||||
IndexID: 1,
|
||||
|
@ -150,7 +150,7 @@ func TestGetQueryVChanPositions(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
err := svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err := svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -174,7 +174,7 @@ func TestGetQueryVChanPositions(t *testing.T) {
|
|||
}
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(s1))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
BuildID: 1,
|
||||
IndexID: 1,
|
||||
|
@ -321,7 +321,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
|
|||
ID: 0,
|
||||
Schema: schema,
|
||||
})
|
||||
err := svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err := svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -391,7 +391,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
|
|||
ID: 0,
|
||||
Schema: schema,
|
||||
})
|
||||
err := svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err := svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -477,7 +477,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
|
|||
ID: 0,
|
||||
Schema: schema,
|
||||
})
|
||||
err := svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err := svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -515,7 +515,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
|
|||
}
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(d))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 2,
|
||||
BuildID: 1,
|
||||
IndexID: 1,
|
||||
|
@ -543,7 +543,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
|
|||
}
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(e))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
BuildID: 2,
|
||||
IndexID: 1,
|
||||
|
@ -586,7 +586,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
|
|||
ID: 0,
|
||||
Schema: schema,
|
||||
})
|
||||
err := svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err := svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
|
|
@ -85,7 +85,7 @@ func (c *importChecker) Start() {
|
|||
log.Info("import checker exited")
|
||||
return
|
||||
case <-ticker1.C:
|
||||
jobs := c.imeta.GetJobBy()
|
||||
jobs := c.imeta.GetJobBy(context.TODO())
|
||||
for _, job := range jobs {
|
||||
switch job.GetState() {
|
||||
case internalpb.ImportJobState_Pending:
|
||||
|
@ -103,7 +103,7 @@ func (c *importChecker) Start() {
|
|||
}
|
||||
}
|
||||
case <-ticker2.C:
|
||||
jobs := c.imeta.GetJobBy()
|
||||
jobs := c.imeta.GetJobBy(context.TODO())
|
||||
for _, job := range jobs {
|
||||
c.tryTimeoutJob(job)
|
||||
c.checkGC(job)
|
||||
|
@ -142,9 +142,9 @@ func (c *importChecker) LogStats() {
|
|||
metrics.ImportTasks.WithLabelValues(taskType.String(), datapb.ImportTaskStateV2_Completed.String()).Set(float64(completed))
|
||||
metrics.ImportTasks.WithLabelValues(taskType.String(), datapb.ImportTaskStateV2_Failed.String()).Set(float64(failed))
|
||||
}
|
||||
tasks := c.imeta.GetTaskBy(WithType(PreImportTaskType))
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithType(PreImportTaskType))
|
||||
logFunc(tasks, PreImportTaskType)
|
||||
tasks = c.imeta.GetTaskBy(WithType(ImportTaskType))
|
||||
tasks = c.imeta.GetTaskBy(context.TODO(), WithType(ImportTaskType))
|
||||
logFunc(tasks, ImportTaskType)
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ func (c *importChecker) getLackFilesForPreImports(job ImportJob) []*internalpb.I
|
|||
lacks := lo.KeyBy(job.GetFiles(), func(file *internalpb.ImportFile) int64 {
|
||||
return file.GetId()
|
||||
})
|
||||
exists := c.imeta.GetTaskBy(WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
||||
exists := c.imeta.GetTaskBy(context.TODO(), WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
||||
for _, task := range exists {
|
||||
for _, file := range task.GetFileStats() {
|
||||
delete(lacks, file.GetImportFile().GetId())
|
||||
|
@ -162,7 +162,7 @@ func (c *importChecker) getLackFilesForPreImports(job ImportJob) []*internalpb.I
|
|||
}
|
||||
|
||||
func (c *importChecker) getLackFilesForImports(job ImportJob) []*datapb.ImportFileStats {
|
||||
preimports := c.imeta.GetTaskBy(WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
||||
preimports := c.imeta.GetTaskBy(context.TODO(), WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
||||
lacks := make(map[int64]*datapb.ImportFileStats, 0)
|
||||
for _, t := range preimports {
|
||||
if t.GetState() != datapb.ImportTaskStateV2_Completed {
|
||||
|
@ -173,7 +173,7 @@ func (c *importChecker) getLackFilesForImports(job ImportJob) []*datapb.ImportFi
|
|||
lacks[stat.GetImportFile().GetId()] = stat
|
||||
}
|
||||
}
|
||||
exists := c.imeta.GetTaskBy(WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
exists := c.imeta.GetTaskBy(context.TODO(), WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
for _, task := range exists {
|
||||
for _, file := range task.GetFileStats() {
|
||||
delete(lacks, file.GetImportFile().GetId())
|
||||
|
@ -196,7 +196,7 @@ func (c *importChecker) checkPendingJob(job ImportJob) {
|
|||
return
|
||||
}
|
||||
for _, t := range newTasks {
|
||||
err = c.imeta.AddTask(t)
|
||||
err = c.imeta.AddTask(context.TODO(), t)
|
||||
if err != nil {
|
||||
log.Warn("add preimport task failed", WrapTaskLog(t, zap.Error(err))...)
|
||||
return
|
||||
|
@ -204,7 +204,7 @@ func (c *importChecker) checkPendingJob(job ImportJob) {
|
|||
log.Info("add new preimport task", WrapTaskLog(t)...)
|
||||
}
|
||||
|
||||
err = c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_PreImporting))
|
||||
err = c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_PreImporting))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to PreImporting", zap.Error(err))
|
||||
return
|
||||
|
@ -224,7 +224,7 @@ func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
|||
requestSize, err := CheckDiskQuota(job, c.meta, c.imeta)
|
||||
if err != nil {
|
||||
log.Warn("import failed, disk quota exceeded", zap.Error(err))
|
||||
err = c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed), UpdateJobReason(err.Error()))
|
||||
err = c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed), UpdateJobReason(err.Error()))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Failed", zap.Error(err))
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
|||
return
|
||||
}
|
||||
for _, t := range newTasks {
|
||||
err = c.imeta.AddTask(t)
|
||||
err = c.imeta.AddTask(context.TODO(), t)
|
||||
if err != nil {
|
||||
log.Warn("add new import task failed", WrapTaskLog(t, zap.Error(err))...)
|
||||
return
|
||||
|
@ -247,7 +247,7 @@ func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
|||
log.Info("add new import task", WrapTaskLog(t)...)
|
||||
}
|
||||
|
||||
err = c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Importing), UpdateRequestedDiskSize(requestSize))
|
||||
err = c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Importing), UpdateRequestedDiskSize(requestSize))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Importing", zap.Error(err))
|
||||
return
|
||||
|
@ -259,13 +259,13 @@ func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
|||
|
||||
func (c *importChecker) checkImportingJob(job ImportJob) {
|
||||
log := log.With(zap.Int64("jobID", job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
for _, t := range tasks {
|
||||
if t.GetState() != datapb.ImportTaskStateV2_Completed {
|
||||
return
|
||||
}
|
||||
}
|
||||
err := c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Stats))
|
||||
err := c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Stats))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Stats", zap.Error(err))
|
||||
return
|
||||
|
@ -278,7 +278,7 @@ func (c *importChecker) checkImportingJob(job ImportJob) {
|
|||
func (c *importChecker) checkStatsJob(job ImportJob) {
|
||||
log := log.With(zap.Int64("jobID", job.GetJobID()))
|
||||
updateJobState := func(state internalpb.ImportJobState) {
|
||||
err := c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(state))
|
||||
err := c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(state))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state", zap.Error(err))
|
||||
return
|
||||
|
@ -299,7 +299,7 @@ func (c *importChecker) checkStatsJob(job ImportJob) {
|
|||
taskCnt = 0
|
||||
doneCnt = 0
|
||||
)
|
||||
tasks := c.imeta.GetTaskBy(WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
for _, task := range tasks {
|
||||
originSegmentIDs := task.(*importTask).GetSegmentIDs()
|
||||
statsSegmentIDs := task.(*importTask).GetStatsSegmentIDs()
|
||||
|
@ -335,7 +335,7 @@ func (c *importChecker) checkStatsJob(job ImportJob) {
|
|||
|
||||
func (c *importChecker) checkIndexBuildingJob(job ImportJob) {
|
||||
log := log.With(zap.Int64("jobID", job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
originSegmentIDs := lo.FlatMap(tasks, func(t ImportTask, _ int) []int64 {
|
||||
return t.(*importTask).GetSegmentIDs()
|
||||
})
|
||||
|
@ -366,7 +366,7 @@ func (c *importChecker) checkIndexBuildingJob(job ImportJob) {
|
|||
|
||||
// Here, all segment indexes have been successfully built, try unset isImporting flag for all segments.
|
||||
isImportingSegments := lo.Filter(append(originSegmentIDs, statsSegmentIDs...), func(segmentID int64, _ int) bool {
|
||||
segment := c.meta.GetSegment(segmentID)
|
||||
segment := c.meta.GetSegment(context.TODO(), segmentID)
|
||||
if segment == nil {
|
||||
log.Warn("cannot find segment", zap.Int64("segmentID", segmentID))
|
||||
return false
|
||||
|
@ -387,7 +387,7 @@ func (c *importChecker) checkIndexBuildingJob(job ImportJob) {
|
|||
op1 := UpdateStartPosition([]*datapb.SegmentStartPosition{{StartPosition: channelCP, SegmentID: segmentID}})
|
||||
op2 := UpdateDmlPosition(segmentID, channelCP)
|
||||
op3 := UpdateIsImporting(segmentID, false)
|
||||
err = c.meta.UpdateSegmentsInfo(op1, op2, op3)
|
||||
err = c.meta.UpdateSegmentsInfo(context.TODO(), op1, op2, op3)
|
||||
if err != nil {
|
||||
log.Warn("update import segment failed", zap.Error(err))
|
||||
return
|
||||
|
@ -396,7 +396,7 @@ func (c *importChecker) checkIndexBuildingJob(job ImportJob) {
|
|||
|
||||
// all finished, update import job state to `Completed`.
|
||||
completeTime := time.Now().Format("2006-01-02T15:04:05Z07:00")
|
||||
err = c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Completed), UpdateJobCompleteTime(completeTime))
|
||||
err = c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Completed), UpdateJobCompleteTime(completeTime))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Completed", zap.Error(err))
|
||||
return
|
||||
|
@ -407,7 +407,7 @@ func (c *importChecker) checkIndexBuildingJob(job ImportJob) {
|
|||
}
|
||||
|
||||
func (c *importChecker) checkFailedJob(job ImportJob) {
|
||||
tasks := c.imeta.GetTaskBy(WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithType(ImportTaskType), WithJob(job.GetJobID()))
|
||||
originSegmentIDs := lo.FlatMap(tasks, func(t ImportTask, _ int) []int64 {
|
||||
return t.(*importTask).GetSegmentIDs()
|
||||
})
|
||||
|
@ -422,7 +422,7 @@ func (c *importChecker) checkFailedJob(job ImportJob) {
|
|||
}
|
||||
|
||||
func (c *importChecker) tryFailingTasks(job ImportJob) {
|
||||
tasks := c.imeta.GetTaskBy(WithJob(job.GetJobID()), WithStates(datapb.ImportTaskStateV2_Pending,
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithStates(datapb.ImportTaskStateV2_Pending,
|
||||
datapb.ImportTaskStateV2_InProgress, datapb.ImportTaskStateV2_Completed))
|
||||
if len(tasks) == 0 {
|
||||
return
|
||||
|
@ -430,7 +430,7 @@ func (c *importChecker) tryFailingTasks(job ImportJob) {
|
|||
log.Warn("Import job has failed, all tasks with the same jobID will be marked as failed",
|
||||
zap.Int64("jobID", job.GetJobID()), zap.String("reason", job.GetReason()))
|
||||
for _, task := range tasks {
|
||||
err := c.imeta.UpdateTask(task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed),
|
||||
err := c.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed),
|
||||
UpdateReason(job.GetReason()))
|
||||
if err != nil {
|
||||
log.Warn("failed to update import task state to failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
|
@ -444,7 +444,7 @@ func (c *importChecker) tryTimeoutJob(job ImportJob) {
|
|||
if time.Now().After(timeoutTime) {
|
||||
log.Warn("Import timeout, expired the specified time limit",
|
||||
zap.Int64("jobID", job.GetJobID()), zap.Time("timeoutTime", timeoutTime))
|
||||
err := c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
err := c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
UpdateJobReason("import timeout"))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Failed", zap.Int64("jobID", job.GetJobID()), zap.Error(err))
|
||||
|
@ -469,7 +469,7 @@ func (c *importChecker) checkCollection(collectionID int64, jobs []ImportJob) {
|
|||
return job.GetState() != internalpb.ImportJobState_Failed
|
||||
})
|
||||
for _, job := range jobs {
|
||||
err = c.imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
err = c.imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
UpdateJobReason(fmt.Sprintf("collection %d dropped", collectionID)))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Failed", zap.Int64("jobID", job.GetJobID()), zap.Error(err))
|
||||
|
@ -489,7 +489,7 @@ func (c *importChecker) checkGC(job ImportJob) {
|
|||
GCRetention := Params.DataCoordCfg.ImportTaskRetention.GetAsDuration(time.Second)
|
||||
log.Info("job has reached the GC retention",
|
||||
zap.Time("cleanupTime", cleanupTime), zap.Duration("GCRetention", GCRetention))
|
||||
tasks := c.imeta.GetTaskBy(WithJob(job.GetJobID()))
|
||||
tasks := c.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()))
|
||||
shouldRemoveJob := true
|
||||
for _, task := range tasks {
|
||||
if job.GetState() == internalpb.ImportJobState_Failed && task.GetType() == ImportTaskType {
|
||||
|
@ -502,7 +502,7 @@ func (c *importChecker) checkGC(job ImportJob) {
|
|||
shouldRemoveJob = false
|
||||
continue
|
||||
}
|
||||
err := c.imeta.RemoveTask(task.GetTaskID())
|
||||
err := c.imeta.RemoveTask(context.TODO(), task.GetTaskID())
|
||||
if err != nil {
|
||||
log.Warn("remove task failed during GC", WrapTaskLog(task, zap.Error(err))...)
|
||||
shouldRemoveJob = false
|
||||
|
@ -513,7 +513,7 @@ func (c *importChecker) checkGC(job ImportJob) {
|
|||
if !shouldRemoveJob {
|
||||
return
|
||||
}
|
||||
err := c.imeta.RemoveJob(job.GetJobID())
|
||||
err := c.imeta.RemoveJob(context.TODO(), job.GetJobID())
|
||||
if err != nil {
|
||||
log.Warn("remove import job failed", zap.Error(err))
|
||||
return
|
||||
|
|
|
@ -49,9 +49,9 @@ type ImportCheckerSuite struct {
|
|||
|
||||
func (s *ImportCheckerSuite) SetupTest() {
|
||||
catalog := mocks.NewDataCoordCatalog(s.T())
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil)
|
||||
|
@ -64,7 +64,7 @@ func (s *ImportCheckerSuite) SetupTest() {
|
|||
cluster := NewMockCluster(s.T())
|
||||
s.alloc = allocator.NewMockAllocator(s.T())
|
||||
|
||||
imeta, err := NewImportMeta(catalog)
|
||||
imeta, err := NewImportMeta(context.TODO(), catalog)
|
||||
s.NoError(err)
|
||||
s.imeta = imeta
|
||||
|
||||
|
@ -105,16 +105,16 @@ func (s *ImportCheckerSuite) SetupTest() {
|
|||
tr: timerecord.NewTimeRecorder("import job"),
|
||||
}
|
||||
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
err = s.imeta.AddJob(job)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
err = s.imeta.AddJob(context.TODO(), job)
|
||||
s.NoError(err)
|
||||
s.jobID = job.GetJobID()
|
||||
}
|
||||
|
||||
func (s *ImportCheckerSuite) TestLogStats() {
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
pit1 := &preImportTask{
|
||||
PreImportTask: &datapb.PreImportTask{
|
||||
|
@ -124,7 +124,7 @@ func (s *ImportCheckerSuite) TestLogStats() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("preimport task"),
|
||||
}
|
||||
err := s.imeta.AddTask(pit1)
|
||||
err := s.imeta.AddTask(context.TODO(), pit1)
|
||||
s.NoError(err)
|
||||
|
||||
it1 := &importTask{
|
||||
|
@ -136,14 +136,14 @@ func (s *ImportCheckerSuite) TestLogStats() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import task"),
|
||||
}
|
||||
err = s.imeta.AddTask(it1)
|
||||
err = s.imeta.AddTask(context.TODO(), it1)
|
||||
s.NoError(err)
|
||||
|
||||
s.checker.LogStats()
|
||||
}
|
||||
|
||||
func (s *ImportCheckerSuite) TestCheckJob() {
|
||||
job := s.imeta.GetJob(s.jobID)
|
||||
job := s.imeta.GetJob(context.TODO(), s.jobID)
|
||||
|
||||
// test checkPendingJob
|
||||
alloc := s.alloc
|
||||
|
@ -152,40 +152,40 @@ func (s *ImportCheckerSuite) TestCheckJob() {
|
|||
return id, id + n, nil
|
||||
})
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
s.checker.checkPendingJob(job)
|
||||
preimportTasks := s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
preimportTasks := s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
s.Equal(2, len(preimportTasks))
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
s.checker.checkPendingJob(job) // no lack
|
||||
preimportTasks = s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
preimportTasks = s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
s.Equal(2, len(preimportTasks))
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
// test checkPreImportingJob
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
for _, t := range preimportTasks {
|
||||
err := s.imeta.UpdateTask(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
err := s.imeta.UpdateTask(context.TODO(), t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
s.checker.checkPreImportingJob(job)
|
||||
importTasks := s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
importTasks := s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
s.Equal(1, len(importTasks))
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
s.checker.checkPreImportingJob(job) // no lack
|
||||
importTasks = s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
importTasks = s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
s.Equal(1, len(importTasks))
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
// test checkImportingJob
|
||||
s.checker.checkImportingJob(job)
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
for _, t := range importTasks {
|
||||
task := s.imeta.GetTask(t.GetTaskID())
|
||||
task := s.imeta.GetTask(context.TODO(), t.GetTaskID())
|
||||
for _, id := range task.(*importTask).GetSegmentIDs() {
|
||||
segment := s.checker.meta.GetSegment(id)
|
||||
segment := s.checker.meta.GetSegment(context.TODO(), id)
|
||||
s.Equal(true, segment.GetIsImporting())
|
||||
}
|
||||
}
|
||||
|
@ -203,14 +203,14 @@ func (s *ImportCheckerSuite) TestCheckJob() {
|
|||
}
|
||||
err := s.checker.meta.AddSegment(context.Background(), segment)
|
||||
s.NoError(err)
|
||||
err = s.imeta.UpdateTask(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed),
|
||||
err = s.imeta.UpdateTask(context.TODO(), t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed),
|
||||
UpdateSegmentIDs([]int64{segment.GetID()}), UpdateStatsSegmentIDs([]int64{rand.Int63()}))
|
||||
s.NoError(err)
|
||||
err = s.checker.meta.UpdateChannelCheckpoint(segment.GetInsertChannel(), &msgpb.MsgPosition{MsgID: []byte{0}})
|
||||
err = s.checker.meta.UpdateChannelCheckpoint(context.TODO(), segment.GetInsertChannel(), &msgpb.MsgPosition{MsgID: []byte{0}})
|
||||
s.NoError(err)
|
||||
}
|
||||
s.checker.checkImportingJob(job)
|
||||
s.Equal(internalpb.ImportJobState_Stats, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Stats, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
// test check stats job
|
||||
alloc.EXPECT().AllocID(mock.Anything).Return(rand.Int63(), nil).Maybe()
|
||||
|
@ -218,95 +218,95 @@ func (s *ImportCheckerSuite) TestCheckJob() {
|
|||
sjm.EXPECT().SubmitStatsTask(mock.Anything, mock.Anything, mock.Anything, false).Return(nil)
|
||||
sjm.EXPECT().GetStatsTaskState(mock.Anything, mock.Anything).Return(indexpb.JobState_JobStateNone)
|
||||
s.checker.checkStatsJob(job)
|
||||
s.Equal(internalpb.ImportJobState_Stats, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Stats, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
sjm = NewMockStatsJobManager(s.T())
|
||||
sjm.EXPECT().GetStatsTaskState(mock.Anything, mock.Anything).Return(indexpb.JobState_JobStateInProgress)
|
||||
s.checker.sjm = sjm
|
||||
s.checker.checkStatsJob(job)
|
||||
s.Equal(internalpb.ImportJobState_Stats, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Stats, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
sjm = NewMockStatsJobManager(s.T())
|
||||
sjm.EXPECT().GetStatsTaskState(mock.Anything, mock.Anything).Return(indexpb.JobState_JobStateFinished)
|
||||
s.checker.sjm = sjm
|
||||
s.checker.checkStatsJob(job)
|
||||
s.Equal(internalpb.ImportJobState_IndexBuilding, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_IndexBuilding, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
// test check IndexBuilding job
|
||||
s.checker.checkIndexBuildingJob(job)
|
||||
for _, t := range importTasks {
|
||||
task := s.imeta.GetTask(t.GetTaskID())
|
||||
task := s.imeta.GetTask(context.TODO(), t.GetTaskID())
|
||||
for _, id := range task.(*importTask).GetSegmentIDs() {
|
||||
segment := s.checker.meta.GetSegment(id)
|
||||
segment := s.checker.meta.GetSegment(context.TODO(), id)
|
||||
s.Equal(false, segment.GetIsImporting())
|
||||
}
|
||||
}
|
||||
s.Equal(internalpb.ImportJobState_Completed, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Completed, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
}
|
||||
|
||||
func (s *ImportCheckerSuite) TestCheckJob_Failed() {
|
||||
mockErr := errors.New("mock err")
|
||||
job := s.imeta.GetJob(s.jobID)
|
||||
job := s.imeta.GetJob(context.TODO(), s.jobID)
|
||||
|
||||
// test checkPendingJob
|
||||
alloc := s.alloc
|
||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, nil)
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(mockErr)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(mockErr)
|
||||
|
||||
s.checker.checkPendingJob(job)
|
||||
preimportTasks := s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
preimportTasks := s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
s.Equal(0, len(preimportTasks))
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
alloc.ExpectedCalls = nil
|
||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, mockErr)
|
||||
s.checker.checkPendingJob(job)
|
||||
preimportTasks = s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
preimportTasks = s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
s.Equal(0, len(preimportTasks))
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
alloc.ExpectedCalls = nil
|
||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, nil)
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
s.checker.checkPendingJob(job)
|
||||
preimportTasks = s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
preimportTasks = s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
s.Equal(2, len(preimportTasks))
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
// test checkPreImportingJob
|
||||
for _, t := range preimportTasks {
|
||||
err := s.imeta.UpdateTask(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
err := s.imeta.UpdateTask(context.TODO(), t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(mockErr)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(mockErr)
|
||||
s.checker.checkPreImportingJob(job)
|
||||
importTasks := s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
importTasks := s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
s.Equal(0, len(importTasks))
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
alloc.ExpectedCalls = nil
|
||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, mockErr)
|
||||
importTasks = s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
importTasks = s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
s.Equal(0, len(importTasks))
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_PreImporting, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
alloc.ExpectedCalls = nil
|
||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, nil)
|
||||
s.checker.checkPreImportingJob(job)
|
||||
importTasks = s.imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
importTasks = s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(ImportTaskType))
|
||||
s.Equal(1, len(importTasks))
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(job.GetJobID()).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Importing, s.imeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||
}
|
||||
|
||||
func (s *ImportCheckerSuite) TestCheckTimeout() {
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
var task ImportTask = &preImportTask{
|
||||
PreImportTask: &datapb.PreImportTask{
|
||||
|
@ -316,18 +316,18 @@ func (s *ImportCheckerSuite) TestCheckTimeout() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("preimport task"),
|
||||
}
|
||||
err := s.imeta.AddTask(task)
|
||||
err := s.imeta.AddTask(context.TODO(), task)
|
||||
s.NoError(err)
|
||||
s.checker.tryTimeoutJob(s.imeta.GetJob(s.jobID))
|
||||
s.checker.tryTimeoutJob(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
|
||||
job := s.imeta.GetJob(s.jobID)
|
||||
job := s.imeta.GetJob(context.TODO(), s.jobID)
|
||||
s.Equal(internalpb.ImportJobState_Failed, job.GetState())
|
||||
s.Equal("import timeout", job.GetReason())
|
||||
}
|
||||
|
||||
func (s *ImportCheckerSuite) TestCheckFailure() {
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
it := &importTask{
|
||||
ImportTaskV2: &datapb.ImportTaskV2{
|
||||
|
@ -339,28 +339,28 @@ func (s *ImportCheckerSuite) TestCheckFailure() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import task"),
|
||||
}
|
||||
err := s.imeta.AddTask(it)
|
||||
err := s.imeta.AddTask(context.TODO(), it)
|
||||
s.NoError(err)
|
||||
|
||||
sjm := NewMockStatsJobManager(s.T())
|
||||
sjm.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock err"))
|
||||
s.checker.sjm = sjm
|
||||
s.checker.checkFailedJob(s.imeta.GetJob(s.jobID))
|
||||
tasks := s.imeta.GetTaskBy(WithJob(s.jobID), WithStates(datapb.ImportTaskStateV2_Failed))
|
||||
s.checker.checkFailedJob(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
tasks := s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID), WithStates(datapb.ImportTaskStateV2_Failed))
|
||||
s.Equal(0, len(tasks))
|
||||
sjm.ExpectedCalls = nil
|
||||
sjm.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(errors.New("mock error"))
|
||||
s.checker.checkFailedJob(s.imeta.GetJob(s.jobID))
|
||||
tasks = s.imeta.GetTaskBy(WithJob(s.jobID), WithStates(datapb.ImportTaskStateV2_Failed))
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||
s.checker.checkFailedJob(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
tasks = s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID), WithStates(datapb.ImportTaskStateV2_Failed))
|
||||
s.Equal(0, len(tasks))
|
||||
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
s.checker.checkFailedJob(s.imeta.GetJob(s.jobID))
|
||||
tasks = s.imeta.GetTaskBy(WithJob(s.jobID), WithStates(datapb.ImportTaskStateV2_Failed))
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
s.checker.checkFailedJob(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
tasks = s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID), WithStates(datapb.ImportTaskStateV2_Failed))
|
||||
s.Equal(1, len(tasks))
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ func (s *ImportCheckerSuite) TestCheckGC() {
|
|||
mockErr := errors.New("mock err")
|
||||
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
var task ImportTask = &importTask{
|
||||
ImportTaskV2: &datapb.ImportTaskV2{
|
||||
JobID: s.jobID,
|
||||
|
@ -379,75 +379,75 @@ func (s *ImportCheckerSuite) TestCheckGC() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import task"),
|
||||
}
|
||||
err := s.imeta.AddTask(task)
|
||||
err := s.imeta.AddTask(context.TODO(), task)
|
||||
s.NoError(err)
|
||||
|
||||
// not failed or completed
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
err = s.imeta.UpdateJob(s.jobID, UpdateJobState(internalpb.ImportJobState_Failed))
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
err = s.imeta.UpdateJob(context.TODO(), s.jobID, UpdateJobState(internalpb.ImportJobState_Failed))
|
||||
s.NoError(err)
|
||||
|
||||
// not reach cleanup ts
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
GCRetention := Params.DataCoordCfg.ImportTaskRetention.GetAsDuration(time.Second)
|
||||
job := s.imeta.GetJob(s.jobID)
|
||||
job := s.imeta.GetJob(context.TODO(), s.jobID)
|
||||
job.(*importJob).CleanupTs = tsoutil.AddPhysicalDurationOnTs(job.GetCleanupTs(), GCRetention*-2)
|
||||
err = s.imeta.AddJob(job)
|
||||
err = s.imeta.AddJob(context.TODO(), job)
|
||||
s.NoError(err)
|
||||
|
||||
// origin segment not dropped
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(), UpdateSegmentIDs([]int64{}))
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateSegmentIDs([]int64{}))
|
||||
s.NoError(err)
|
||||
|
||||
// stats segment not dropped
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(), UpdateStatsSegmentIDs([]int64{}))
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateStatsSegmentIDs([]int64{}))
|
||||
s.NoError(err)
|
||||
|
||||
// task is not dropped
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(), UpdateNodeID(NullNodeID))
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateNodeID(NullNodeID))
|
||||
s.NoError(err)
|
||||
|
||||
// remove task failed
|
||||
catalog.EXPECT().DropImportTask(mock.Anything).Return(mockErr)
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
catalog.EXPECT().DropImportTask(mock.Anything, mock.Anything).Return(mockErr)
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(1, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
|
||||
// remove job failed
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().DropImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().DropImportJob(mock.Anything).Return(mockErr)
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(0, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy()))
|
||||
catalog.EXPECT().DropImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().DropImportJob(mock.Anything, mock.Anything).Return(mockErr)
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(0, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(1, len(s.imeta.GetJobBy(context.TODO())))
|
||||
|
||||
// normal case
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().DropImportJob(mock.Anything).Return(nil)
|
||||
s.checker.checkGC(s.imeta.GetJob(s.jobID))
|
||||
s.Equal(0, len(s.imeta.GetTaskBy(WithJob(s.jobID))))
|
||||
s.Equal(0, len(s.imeta.GetJobBy()))
|
||||
catalog.EXPECT().DropImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
s.checker.checkGC(s.imeta.GetJob(context.TODO(), s.jobID))
|
||||
s.Equal(0, len(s.imeta.GetTaskBy(context.TODO(), WithJob(s.jobID))))
|
||||
s.Equal(0, len(s.imeta.GetJobBy(context.TODO())))
|
||||
}
|
||||
|
||||
func (s *ImportCheckerSuite) TestCheckCollection() {
|
||||
mockErr := errors.New("mock err")
|
||||
|
||||
catalog := s.imeta.(*importMeta).catalog.(*mocks.DataCoordCatalog)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
var task ImportTask = &preImportTask{
|
||||
PreImportTask: &datapb.PreImportTask{
|
||||
JobID: s.jobID,
|
||||
|
@ -456,43 +456,43 @@ func (s *ImportCheckerSuite) TestCheckCollection() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("preimport task"),
|
||||
}
|
||||
err := s.imeta.AddTask(task)
|
||||
err := s.imeta.AddTask(context.TODO(), task)
|
||||
s.NoError(err)
|
||||
|
||||
// no jobs
|
||||
s.checker.checkCollection(1, []ImportJob{})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(s.jobID).GetState())
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(context.TODO(), s.jobID).GetState())
|
||||
|
||||
// collection exist
|
||||
broker := s.checker.broker.(*broker2.MockBroker)
|
||||
broker.EXPECT().HasCollection(mock.Anything, mock.Anything).Return(true, nil)
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(s.jobID).GetState())
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(context.TODO(), s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(context.TODO(), s.jobID).GetState())
|
||||
|
||||
// HasCollection failed
|
||||
s.checker.broker = broker2.NewMockBroker(s.T())
|
||||
broker = s.checker.broker.(*broker2.MockBroker)
|
||||
broker.EXPECT().HasCollection(mock.Anything, mock.Anything).Return(true, mockErr)
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(s.jobID).GetState())
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(context.TODO(), s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(context.TODO(), s.jobID).GetState())
|
||||
|
||||
// SaveImportJob failed
|
||||
s.checker.broker = broker2.NewMockBroker(s.T())
|
||||
broker = s.checker.broker.(*broker2.MockBroker)
|
||||
broker.EXPECT().HasCollection(mock.Anything, mock.Anything).Return(false, nil)
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(mockErr)
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(s.jobID).GetState())
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(mockErr)
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(context.TODO(), s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Pending, s.imeta.GetJob(context.TODO(), s.jobID).GetState())
|
||||
|
||||
// collection dropped
|
||||
s.checker.broker = broker2.NewMockBroker(s.T())
|
||||
broker = s.checker.broker.(*broker2.MockBroker)
|
||||
broker.EXPECT().HasCollection(mock.Anything, mock.Anything).Return(false, nil)
|
||||
catalog.ExpectedCalls = nil
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Failed, s.imeta.GetJob(s.jobID).GetState())
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
s.checker.checkCollection(1, []ImportJob{s.imeta.GetJob(context.TODO(), s.jobID)})
|
||||
s.Equal(internalpb.ImportJobState_Failed, s.imeta.GetJob(context.TODO(), s.jobID).GetState())
|
||||
}
|
||||
|
||||
func TestImportChecker(t *testing.T) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||
|
@ -29,19 +30,19 @@ import (
|
|||
)
|
||||
|
||||
type ImportMeta interface {
|
||||
AddJob(job ImportJob) error
|
||||
UpdateJob(jobID int64, actions ...UpdateJobAction) error
|
||||
GetJob(jobID int64) ImportJob
|
||||
GetJobBy(filters ...ImportJobFilter) []ImportJob
|
||||
CountJobBy(filters ...ImportJobFilter) int
|
||||
RemoveJob(jobID int64) error
|
||||
AddJob(ctx context.Context, job ImportJob) error
|
||||
UpdateJob(ctx context.Context, jobID int64, actions ...UpdateJobAction) error
|
||||
GetJob(ctx context.Context, jobID int64) ImportJob
|
||||
GetJobBy(ctx context.Context, filters ...ImportJobFilter) []ImportJob
|
||||
CountJobBy(ctx context.Context, filters ...ImportJobFilter) int
|
||||
RemoveJob(ctx context.Context, jobID int64) error
|
||||
|
||||
AddTask(task ImportTask) error
|
||||
UpdateTask(taskID int64, actions ...UpdateAction) error
|
||||
GetTask(taskID int64) ImportTask
|
||||
GetTaskBy(filters ...ImportTaskFilter) []ImportTask
|
||||
RemoveTask(taskID int64) error
|
||||
TaskStatsJSON() string
|
||||
AddTask(ctx context.Context, task ImportTask) error
|
||||
UpdateTask(ctx context.Context, taskID int64, actions ...UpdateAction) error
|
||||
GetTask(ctx context.Context, taskID int64) ImportTask
|
||||
GetTaskBy(ctx context.Context, filters ...ImportTaskFilter) []ImportTask
|
||||
RemoveTask(ctx context.Context, taskID int64) error
|
||||
TaskStatsJSON(ctx context.Context) string
|
||||
}
|
||||
|
||||
type importTasks struct {
|
||||
|
@ -92,16 +93,16 @@ type importMeta struct {
|
|||
catalog metastore.DataCoordCatalog
|
||||
}
|
||||
|
||||
func NewImportMeta(catalog metastore.DataCoordCatalog) (ImportMeta, error) {
|
||||
restoredPreImportTasks, err := catalog.ListPreImportTasks()
|
||||
func NewImportMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (ImportMeta, error) {
|
||||
restoredPreImportTasks, err := catalog.ListPreImportTasks(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restoredImportTasks, err := catalog.ListImportTasks()
|
||||
restoredImportTasks, err := catalog.ListImportTasks(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restoredJobs, err := catalog.ListImportJobs()
|
||||
restoredJobs, err := catalog.ListImportJobs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -136,10 +137,10 @@ func NewImportMeta(catalog metastore.DataCoordCatalog) (ImportMeta, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *importMeta) AddJob(job ImportJob) error {
|
||||
func (m *importMeta) AddJob(ctx context.Context, job ImportJob) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
err := m.catalog.SaveImportJob(job.(*importJob).ImportJob)
|
||||
err := m.catalog.SaveImportJob(ctx, job.(*importJob).ImportJob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -147,7 +148,7 @@ func (m *importMeta) AddJob(job ImportJob) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *importMeta) UpdateJob(jobID int64, actions ...UpdateJobAction) error {
|
||||
func (m *importMeta) UpdateJob(ctx context.Context, jobID int64, actions ...UpdateJobAction) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if job, ok := m.jobs[jobID]; ok {
|
||||
|
@ -155,7 +156,7 @@ func (m *importMeta) UpdateJob(jobID int64, actions ...UpdateJobAction) error {
|
|||
for _, action := range actions {
|
||||
action(updatedJob)
|
||||
}
|
||||
err := m.catalog.SaveImportJob(updatedJob.(*importJob).ImportJob)
|
||||
err := m.catalog.SaveImportJob(ctx, updatedJob.(*importJob).ImportJob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -164,13 +165,13 @@ func (m *importMeta) UpdateJob(jobID int64, actions ...UpdateJobAction) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *importMeta) GetJob(jobID int64) ImportJob {
|
||||
func (m *importMeta) GetJob(ctx context.Context, jobID int64) ImportJob {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.jobs[jobID]
|
||||
}
|
||||
|
||||
func (m *importMeta) GetJobBy(filters ...ImportJobFilter) []ImportJob {
|
||||
func (m *importMeta) GetJobBy(ctx context.Context, filters ...ImportJobFilter) []ImportJob {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.getJobBy(filters...)
|
||||
|
@ -190,17 +191,17 @@ OUTER:
|
|||
return ret
|
||||
}
|
||||
|
||||
func (m *importMeta) CountJobBy(filters ...ImportJobFilter) int {
|
||||
func (m *importMeta) CountJobBy(ctx context.Context, filters ...ImportJobFilter) int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return len(m.getJobBy(filters...))
|
||||
}
|
||||
|
||||
func (m *importMeta) RemoveJob(jobID int64) error {
|
||||
func (m *importMeta) RemoveJob(ctx context.Context, jobID int64) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if _, ok := m.jobs[jobID]; ok {
|
||||
err := m.catalog.DropImportJob(jobID)
|
||||
err := m.catalog.DropImportJob(ctx, jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -209,18 +210,18 @@ func (m *importMeta) RemoveJob(jobID int64) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *importMeta) AddTask(task ImportTask) error {
|
||||
func (m *importMeta) AddTask(ctx context.Context, task ImportTask) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
switch task.GetType() {
|
||||
case PreImportTaskType:
|
||||
err := m.catalog.SavePreImportTask(task.(*preImportTask).PreImportTask)
|
||||
err := m.catalog.SavePreImportTask(ctx, task.(*preImportTask).PreImportTask)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.tasks.add(task)
|
||||
case ImportTaskType:
|
||||
err := m.catalog.SaveImportTask(task.(*importTask).ImportTaskV2)
|
||||
err := m.catalog.SaveImportTask(ctx, task.(*importTask).ImportTaskV2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -229,7 +230,7 @@ func (m *importMeta) AddTask(task ImportTask) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *importMeta) UpdateTask(taskID int64, actions ...UpdateAction) error {
|
||||
func (m *importMeta) UpdateTask(ctx context.Context, taskID int64, actions ...UpdateAction) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if task := m.tasks.get(taskID); task != nil {
|
||||
|
@ -239,13 +240,13 @@ func (m *importMeta) UpdateTask(taskID int64, actions ...UpdateAction) error {
|
|||
}
|
||||
switch updatedTask.GetType() {
|
||||
case PreImportTaskType:
|
||||
err := m.catalog.SavePreImportTask(updatedTask.(*preImportTask).PreImportTask)
|
||||
err := m.catalog.SavePreImportTask(ctx, updatedTask.(*preImportTask).PreImportTask)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.tasks.add(updatedTask)
|
||||
case ImportTaskType:
|
||||
err := m.catalog.SaveImportTask(updatedTask.(*importTask).ImportTaskV2)
|
||||
err := m.catalog.SaveImportTask(ctx, updatedTask.(*importTask).ImportTaskV2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -256,13 +257,13 @@ func (m *importMeta) UpdateTask(taskID int64, actions ...UpdateAction) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *importMeta) GetTask(taskID int64) ImportTask {
|
||||
func (m *importMeta) GetTask(ctx context.Context, taskID int64) ImportTask {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.tasks.get(taskID)
|
||||
}
|
||||
|
||||
func (m *importMeta) GetTaskBy(filters ...ImportTaskFilter) []ImportTask {
|
||||
func (m *importMeta) GetTaskBy(ctx context.Context, filters ...ImportTaskFilter) []ImportTask {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
ret := make([]ImportTask, 0)
|
||||
|
@ -278,18 +279,18 @@ OUTER:
|
|||
return ret
|
||||
}
|
||||
|
||||
func (m *importMeta) RemoveTask(taskID int64) error {
|
||||
func (m *importMeta) RemoveTask(ctx context.Context, taskID int64) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if task := m.tasks.get(taskID); task != nil {
|
||||
switch task.GetType() {
|
||||
case PreImportTaskType:
|
||||
err := m.catalog.DropPreImportTask(taskID)
|
||||
err := m.catalog.DropPreImportTask(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case ImportTaskType:
|
||||
err := m.catalog.DropImportTask(taskID)
|
||||
err := m.catalog.DropImportTask(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -299,7 +300,7 @@ func (m *importMeta) RemoveTask(taskID int64) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *importMeta) TaskStatsJSON() string {
|
||||
func (m *importMeta) TaskStatsJSON(ctx context.Context) string {
|
||||
tasks := m.tasks.listTaskStats()
|
||||
|
||||
ret, err := json.Marshal(tasks)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
@ -34,55 +35,56 @@ import (
|
|||
|
||||
func TestImportMeta_Restore(t *testing.T) {
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return([]*datapb.ImportJob{{JobID: 0}}, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return([]*datapb.PreImportTask{{TaskID: 1}}, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return([]*datapb.ImportTaskV2{{TaskID: 2}}, nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return([]*datapb.ImportJob{{JobID: 0}}, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return([]*datapb.PreImportTask{{TaskID: 1}}, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return([]*datapb.ImportTaskV2{{TaskID: 2}}, nil)
|
||||
ctx := context.TODO()
|
||||
|
||||
im, err := NewImportMeta(catalog)
|
||||
im, err := NewImportMeta(ctx, catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
jobs := im.GetJobBy()
|
||||
jobs := im.GetJobBy(ctx)
|
||||
assert.Equal(t, 1, len(jobs))
|
||||
assert.Equal(t, int64(0), jobs[0].GetJobID())
|
||||
tasks := im.GetTaskBy()
|
||||
tasks := im.GetTaskBy(ctx)
|
||||
assert.Equal(t, 2, len(tasks))
|
||||
tasks = im.GetTaskBy(WithType(PreImportTaskType))
|
||||
tasks = im.GetTaskBy(ctx, WithType(PreImportTaskType))
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
assert.Equal(t, int64(1), tasks[0].GetTaskID())
|
||||
tasks = im.GetTaskBy(WithType(ImportTaskType))
|
||||
tasks = im.GetTaskBy(ctx, WithType(ImportTaskType))
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
assert.Equal(t, int64(2), tasks[0].GetTaskID())
|
||||
|
||||
// new meta failed
|
||||
mockErr := errors.New("mock error")
|
||||
catalog = mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListPreImportTasks().Return([]*datapb.PreImportTask{{TaskID: 1}}, mockErr)
|
||||
_, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return([]*datapb.PreImportTask{{TaskID: 1}}, mockErr)
|
||||
_, err = NewImportMeta(ctx, catalog)
|
||||
assert.Error(t, err)
|
||||
|
||||
catalog = mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportTasks().Return([]*datapb.ImportTaskV2{{TaskID: 2}}, mockErr)
|
||||
catalog.EXPECT().ListPreImportTasks().Return([]*datapb.PreImportTask{{TaskID: 1}}, nil)
|
||||
_, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return([]*datapb.ImportTaskV2{{TaskID: 2}}, mockErr)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return([]*datapb.PreImportTask{{TaskID: 1}}, nil)
|
||||
_, err = NewImportMeta(ctx, catalog)
|
||||
assert.Error(t, err)
|
||||
|
||||
catalog = mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return([]*datapb.ImportJob{{JobID: 0}}, mockErr)
|
||||
catalog.EXPECT().ListPreImportTasks().Return([]*datapb.PreImportTask{{TaskID: 1}}, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return([]*datapb.ImportTaskV2{{TaskID: 2}}, nil)
|
||||
_, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return([]*datapb.ImportJob{{JobID: 0}}, mockErr)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return([]*datapb.PreImportTask{{TaskID: 1}}, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return([]*datapb.ImportTaskV2{{TaskID: 2}}, nil)
|
||||
_, err = NewImportMeta(ctx, catalog)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestImportMeta_Job(t *testing.T) {
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().DropImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().DropImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
im, err := NewImportMeta(catalog)
|
||||
im, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
jobIDs := []int64{1000, 2000, 3000}
|
||||
|
@ -97,65 +99,65 @@ func TestImportMeta_Job(t *testing.T) {
|
|||
State: internalpb.ImportJobState_Pending,
|
||||
},
|
||||
}
|
||||
err = im.AddJob(job)
|
||||
err = im.AddJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
ret := im.GetJob(jobID)
|
||||
ret := im.GetJob(context.TODO(), jobID)
|
||||
assert.Equal(t, job, ret)
|
||||
jobs := im.GetJobBy()
|
||||
jobs := im.GetJobBy(context.TODO())
|
||||
assert.Equal(t, i+1, len(jobs))
|
||||
|
||||
// Add again, test idempotency
|
||||
err = im.AddJob(job)
|
||||
err = im.AddJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
ret = im.GetJob(jobID)
|
||||
ret = im.GetJob(context.TODO(), jobID)
|
||||
assert.Equal(t, job, ret)
|
||||
jobs = im.GetJobBy()
|
||||
jobs = im.GetJobBy(context.TODO())
|
||||
assert.Equal(t, i+1, len(jobs))
|
||||
}
|
||||
|
||||
jobs := im.GetJobBy()
|
||||
jobs := im.GetJobBy(context.TODO())
|
||||
assert.Equal(t, 3, len(jobs))
|
||||
|
||||
err = im.UpdateJob(jobIDs[0], UpdateJobState(internalpb.ImportJobState_Completed))
|
||||
err = im.UpdateJob(context.TODO(), jobIDs[0], UpdateJobState(internalpb.ImportJobState_Completed))
|
||||
assert.NoError(t, err)
|
||||
job0 := im.GetJob(jobIDs[0])
|
||||
job0 := im.GetJob(context.TODO(), jobIDs[0])
|
||||
assert.NotNil(t, job0)
|
||||
assert.Equal(t, internalpb.ImportJobState_Completed, job0.GetState())
|
||||
|
||||
err = im.UpdateJob(jobIDs[1], UpdateJobState(internalpb.ImportJobState_Importing))
|
||||
err = im.UpdateJob(context.TODO(), jobIDs[1], UpdateJobState(internalpb.ImportJobState_Importing))
|
||||
assert.NoError(t, err)
|
||||
job1 := im.GetJob(jobIDs[1])
|
||||
job1 := im.GetJob(context.TODO(), jobIDs[1])
|
||||
assert.NotNil(t, job1)
|
||||
assert.Equal(t, internalpb.ImportJobState_Importing, job1.GetState())
|
||||
|
||||
jobs = im.GetJobBy(WithJobStates(internalpb.ImportJobState_Pending))
|
||||
jobs = im.GetJobBy(context.TODO(), WithJobStates(internalpb.ImportJobState_Pending))
|
||||
assert.Equal(t, 1, len(jobs))
|
||||
jobs = im.GetJobBy(WithoutJobStates(internalpb.ImportJobState_Pending))
|
||||
jobs = im.GetJobBy(context.TODO(), WithoutJobStates(internalpb.ImportJobState_Pending))
|
||||
assert.Equal(t, 2, len(jobs))
|
||||
count := im.CountJobBy()
|
||||
count := im.CountJobBy(context.TODO())
|
||||
assert.Equal(t, 3, count)
|
||||
count = im.CountJobBy(WithJobStates(internalpb.ImportJobState_Pending))
|
||||
count = im.CountJobBy(context.TODO(), WithJobStates(internalpb.ImportJobState_Pending))
|
||||
assert.Equal(t, 1, count)
|
||||
count = im.CountJobBy(WithoutJobStates(internalpb.ImportJobState_Pending))
|
||||
count = im.CountJobBy(context.TODO(), WithoutJobStates(internalpb.ImportJobState_Pending))
|
||||
assert.Equal(t, 2, count)
|
||||
|
||||
err = im.RemoveJob(jobIDs[0])
|
||||
err = im.RemoveJob(context.TODO(), jobIDs[0])
|
||||
assert.NoError(t, err)
|
||||
jobs = im.GetJobBy()
|
||||
jobs = im.GetJobBy(context.TODO())
|
||||
assert.Equal(t, 2, len(jobs))
|
||||
count = im.CountJobBy()
|
||||
count = im.CountJobBy(context.TODO())
|
||||
assert.Equal(t, 2, count)
|
||||
}
|
||||
|
||||
func TestImportMeta_ImportTask(t *testing.T) {
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().DropImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().DropImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
im, err := NewImportMeta(catalog)
|
||||
im, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task1 := &importTask{
|
||||
|
@ -168,55 +170,55 @@ func TestImportMeta_ImportTask(t *testing.T) {
|
|||
State: datapb.ImportTaskStateV2_Pending,
|
||||
},
|
||||
}
|
||||
err = im.AddTask(task1)
|
||||
err = im.AddTask(context.TODO(), task1)
|
||||
assert.NoError(t, err)
|
||||
err = im.AddTask(task1)
|
||||
err = im.AddTask(context.TODO(), task1)
|
||||
assert.NoError(t, err)
|
||||
res := im.GetTask(task1.GetTaskID())
|
||||
res := im.GetTask(context.TODO(), task1.GetTaskID())
|
||||
assert.Equal(t, task1, res)
|
||||
|
||||
task2 := task1.Clone()
|
||||
task2.(*importTask).TaskID = 8
|
||||
task2.(*importTask).State = datapb.ImportTaskStateV2_Completed
|
||||
err = im.AddTask(task2)
|
||||
err = im.AddTask(context.TODO(), task2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tasks := im.GetTaskBy(WithJob(task1.GetJobID()))
|
||||
tasks := im.GetTaskBy(context.TODO(), WithJob(task1.GetJobID()))
|
||||
assert.Equal(t, 2, len(tasks))
|
||||
tasks = im.GetTaskBy(WithType(ImportTaskType), WithStates(datapb.ImportTaskStateV2_Completed))
|
||||
tasks = im.GetTaskBy(context.TODO(), WithType(ImportTaskType), WithStates(datapb.ImportTaskStateV2_Completed))
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
assert.Equal(t, task2.GetTaskID(), tasks[0].GetTaskID())
|
||||
|
||||
err = im.UpdateTask(task1.GetTaskID(), UpdateNodeID(9),
|
||||
err = im.UpdateTask(context.TODO(), task1.GetTaskID(), UpdateNodeID(9),
|
||||
UpdateState(datapb.ImportTaskStateV2_Failed),
|
||||
UpdateFileStats([]*datapb.ImportFileStats{1: {
|
||||
FileSize: 100,
|
||||
}}))
|
||||
assert.NoError(t, err)
|
||||
task := im.GetTask(task1.GetTaskID())
|
||||
task := im.GetTask(context.TODO(), task1.GetTaskID())
|
||||
assert.Equal(t, int64(9), task.GetNodeID())
|
||||
assert.Equal(t, datapb.ImportTaskStateV2_Failed, task.GetState())
|
||||
|
||||
err = im.RemoveTask(task1.GetTaskID())
|
||||
err = im.RemoveTask(context.TODO(), task1.GetTaskID())
|
||||
assert.NoError(t, err)
|
||||
tasks = im.GetTaskBy()
|
||||
tasks = im.GetTaskBy(context.TODO())
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
err = im.RemoveTask(10)
|
||||
err = im.RemoveTask(context.TODO(), 10)
|
||||
assert.NoError(t, err)
|
||||
tasks = im.GetTaskBy()
|
||||
tasks = im.GetTaskBy(context.TODO())
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
}
|
||||
|
||||
func TestImportMeta_Task_Failed(t *testing.T) {
|
||||
mockErr := errors.New("mock err")
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(mockErr)
|
||||
catalog.EXPECT().DropImportTask(mock.Anything).Return(mockErr)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(mockErr)
|
||||
catalog.EXPECT().DropImportTask(mock.Anything, mock.Anything).Return(mockErr)
|
||||
|
||||
im, err := NewImportMeta(catalog)
|
||||
im, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
im.(*importMeta).catalog = catalog
|
||||
|
||||
|
@ -231,26 +233,26 @@ func TestImportMeta_Task_Failed(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
err = im.AddTask(task)
|
||||
err = im.AddTask(context.TODO(), task)
|
||||
assert.Error(t, err)
|
||||
im.(*importMeta).tasks.add(task)
|
||||
err = im.UpdateTask(task.GetTaskID(), UpdateNodeID(9))
|
||||
err = im.UpdateTask(context.TODO(), task.GetTaskID(), UpdateNodeID(9))
|
||||
assert.Error(t, err)
|
||||
err = im.RemoveTask(task.GetTaskID())
|
||||
err = im.RemoveTask(context.TODO(), task.GetTaskID())
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestTaskStatsJSON(t *testing.T) {
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
im, err := NewImportMeta(catalog)
|
||||
im, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
statsJSON := im.TaskStatsJSON()
|
||||
statsJSON := im.TaskStatsJSON(context.TODO())
|
||||
assert.Equal(t, "[]", statsJSON)
|
||||
|
||||
task1 := &importTask{
|
||||
|
@ -258,7 +260,7 @@ func TestTaskStatsJSON(t *testing.T) {
|
|||
TaskID: 1,
|
||||
},
|
||||
}
|
||||
err = im.AddTask(task1)
|
||||
err = im.AddTask(context.TODO(), task1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task2 := &importTask{
|
||||
|
@ -266,13 +268,13 @@ func TestTaskStatsJSON(t *testing.T) {
|
|||
TaskID: 2,
|
||||
},
|
||||
}
|
||||
err = im.AddTask(task2)
|
||||
err = im.AddTask(context.TODO(), task2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = im.UpdateTask(1, UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
err = im.UpdateTask(context.TODO(), 1, UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
assert.NoError(t, err)
|
||||
|
||||
statsJSON = im.TaskStatsJSON()
|
||||
statsJSON = im.TaskStatsJSON(context.TODO())
|
||||
var tasks []*metricsinfo.ImportTask
|
||||
err = json.Unmarshal([]byte(statsJSON), &tasks)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -91,13 +92,13 @@ func (s *importScheduler) Close() {
|
|||
}
|
||||
|
||||
func (s *importScheduler) process() {
|
||||
jobs := s.imeta.GetJobBy()
|
||||
jobs := s.imeta.GetJobBy(context.TODO())
|
||||
sort.Slice(jobs, func(i, j int) bool {
|
||||
return jobs[i].GetJobID() < jobs[j].GetJobID()
|
||||
})
|
||||
nodeSlots := s.peekSlots()
|
||||
for _, job := range jobs {
|
||||
tasks := s.imeta.GetTaskBy(WithJob(job.GetJobID()))
|
||||
tasks := s.imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()))
|
||||
for _, task := range tasks {
|
||||
switch task.GetState() {
|
||||
case datapb.ImportTaskStateV2_Pending:
|
||||
|
@ -174,14 +175,14 @@ func (s *importScheduler) processPendingPreImport(task ImportTask, nodeID int64)
|
|||
return
|
||||
}
|
||||
log.Info("processing pending preimport task...", WrapTaskLog(task)...)
|
||||
job := s.imeta.GetJob(task.GetJobID())
|
||||
job := s.imeta.GetJob(context.TODO(), task.GetJobID())
|
||||
req := AssemblePreImportRequest(task, job)
|
||||
err := s.cluster.PreImport(nodeID, req)
|
||||
if err != nil {
|
||||
log.Warn("preimport failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
return
|
||||
}
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(),
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(),
|
||||
UpdateState(datapb.ImportTaskStateV2_InProgress),
|
||||
UpdateNodeID(nodeID))
|
||||
if err != nil {
|
||||
|
@ -198,7 +199,7 @@ func (s *importScheduler) processPendingImport(task ImportTask, nodeID int64) {
|
|||
return
|
||||
}
|
||||
log.Info("processing pending import task...", WrapTaskLog(task)...)
|
||||
job := s.imeta.GetJob(task.GetJobID())
|
||||
job := s.imeta.GetJob(context.TODO(), task.GetJobID())
|
||||
req, err := AssembleImportRequest(task, job, s.meta, s.alloc)
|
||||
if err != nil {
|
||||
log.Warn("assemble import request failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
|
@ -209,7 +210,7 @@ func (s *importScheduler) processPendingImport(task ImportTask, nodeID int64) {
|
|||
log.Warn("import failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
return
|
||||
}
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(),
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(),
|
||||
UpdateState(datapb.ImportTaskStateV2_InProgress),
|
||||
UpdateNodeID(nodeID))
|
||||
if err != nil {
|
||||
|
@ -228,7 +229,7 @@ func (s *importScheduler) processInProgressPreImport(task ImportTask) {
|
|||
}
|
||||
resp, err := s.cluster.QueryPreImport(task.GetNodeID(), req)
|
||||
if err != nil {
|
||||
updateErr := s.imeta.UpdateTask(task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Pending))
|
||||
updateErr := s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Pending))
|
||||
if updateErr != nil {
|
||||
log.Warn("failed to update preimport task state to pending", WrapTaskLog(task, zap.Error(updateErr))...)
|
||||
}
|
||||
|
@ -236,7 +237,7 @@ func (s *importScheduler) processInProgressPreImport(task ImportTask) {
|
|||
return
|
||||
}
|
||||
if resp.GetState() == datapb.ImportTaskStateV2_Failed {
|
||||
err = s.imeta.UpdateJob(task.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
err = s.imeta.UpdateJob(context.TODO(), task.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
UpdateJobReason(resp.GetReason()))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Failed", zap.Int64("jobID", task.GetJobID()), zap.Error(err))
|
||||
|
@ -248,7 +249,7 @@ func (s *importScheduler) processInProgressPreImport(task ImportTask) {
|
|||
if resp.GetState() == datapb.ImportTaskStateV2_Completed {
|
||||
actions = append(actions, UpdateState(datapb.ImportTaskStateV2_Completed))
|
||||
}
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(), actions...)
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), actions...)
|
||||
if err != nil {
|
||||
log.Warn("update preimport task failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
return
|
||||
|
@ -269,7 +270,7 @@ func (s *importScheduler) processInProgressImport(task ImportTask) {
|
|||
}
|
||||
resp, err := s.cluster.QueryImport(task.GetNodeID(), req)
|
||||
if err != nil {
|
||||
updateErr := s.imeta.UpdateTask(task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Pending))
|
||||
updateErr := s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Pending))
|
||||
if updateErr != nil {
|
||||
log.Warn("failed to update import task state to pending", WrapTaskLog(task, zap.Error(updateErr))...)
|
||||
}
|
||||
|
@ -277,7 +278,7 @@ func (s *importScheduler) processInProgressImport(task ImportTask) {
|
|||
return
|
||||
}
|
||||
if resp.GetState() == datapb.ImportTaskStateV2_Failed {
|
||||
err = s.imeta.UpdateJob(task.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
err = s.imeta.UpdateJob(context.TODO(), task.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed),
|
||||
UpdateJobReason(resp.GetReason()))
|
||||
if err != nil {
|
||||
log.Warn("failed to update job state to Failed", zap.Int64("jobID", task.GetJobID()), zap.Error(err))
|
||||
|
@ -293,13 +294,13 @@ func (s *importScheduler) processInProgressImport(task ImportTask) {
|
|||
}
|
||||
|
||||
for _, info := range resp.GetImportSegmentsInfo() {
|
||||
segment := s.meta.GetSegment(info.GetSegmentID())
|
||||
segment := s.meta.GetSegment(context.TODO(), info.GetSegmentID())
|
||||
if info.GetImportedRows() <= segment.GetNumOfRows() {
|
||||
continue // rows not changed, no need to update
|
||||
}
|
||||
diff := info.GetImportedRows() - segment.GetNumOfRows()
|
||||
op := UpdateImportedRows(info.GetSegmentID(), info.GetImportedRows())
|
||||
err = s.meta.UpdateSegmentsInfo(op)
|
||||
err = s.meta.UpdateSegmentsInfo(context.TODO(), op)
|
||||
if err != nil {
|
||||
log.Warn("update import segment rows failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
return
|
||||
|
@ -321,14 +322,14 @@ func (s *importScheduler) processInProgressImport(task ImportTask) {
|
|||
}
|
||||
op1 := UpdateBinlogsOperator(info.GetSegmentID(), info.GetBinlogs(), info.GetStatslogs(), info.GetDeltalogs())
|
||||
op2 := UpdateStatusOperator(info.GetSegmentID(), commonpb.SegmentState_Flushed)
|
||||
err = s.meta.UpdateSegmentsInfo(op1, op2)
|
||||
err = s.meta.UpdateSegmentsInfo(context.TODO(), op1, op2)
|
||||
if err != nil {
|
||||
log.Warn("update import segment binlogs failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
return
|
||||
}
|
||||
}
|
||||
completeTime := time.Now().Format("2006-01-02T15:04:05Z07:00")
|
||||
err = s.imeta.UpdateTask(task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed), UpdateCompleteTime(completeTime))
|
||||
err = s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed), UpdateCompleteTime(completeTime))
|
||||
if err != nil {
|
||||
log.Warn("update import task failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
return
|
||||
|
@ -355,14 +356,14 @@ func (s *importScheduler) processFailed(task ImportTask) {
|
|||
segments := append(originSegmentIDs, statsSegmentIDs...)
|
||||
for _, segment := range segments {
|
||||
op := UpdateStatusOperator(segment, commonpb.SegmentState_Dropped)
|
||||
err := s.meta.UpdateSegmentsInfo(op)
|
||||
err := s.meta.UpdateSegmentsInfo(context.TODO(), op)
|
||||
if err != nil {
|
||||
log.Warn("drop import segment failed", WrapTaskLog(task, zap.Int64("segment", segment), zap.Error(err))...)
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(segments) > 0 {
|
||||
err := s.imeta.UpdateTask(task.GetTaskID(), UpdateSegmentIDs(nil), UpdateStatsSegmentIDs(nil))
|
||||
err := s.imeta.UpdateTask(context.TODO(), task.GetTaskID(), UpdateSegmentIDs(nil), UpdateStatsSegmentIDs(nil))
|
||||
if err != nil {
|
||||
log.Warn("update import task segments failed", WrapTaskLog(task, zap.Error(err))...)
|
||||
}
|
||||
|
|
|
@ -53,9 +53,9 @@ func (s *ImportSchedulerSuite) SetupTest() {
|
|||
s.collectionID = 1
|
||||
|
||||
s.catalog = mocks.NewDataCoordCatalog(s.T())
|
||||
s.catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
s.catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
s.catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
s.catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
s.catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
s.catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
s.catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil)
|
||||
s.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil)
|
||||
s.catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil)
|
||||
|
@ -73,14 +73,14 @@ func (s *ImportSchedulerSuite) SetupTest() {
|
|||
ID: s.collectionID,
|
||||
Schema: newTestSchema(),
|
||||
})
|
||||
s.imeta, err = NewImportMeta(s.catalog)
|
||||
s.imeta, err = NewImportMeta(context.TODO(), s.catalog)
|
||||
s.NoError(err)
|
||||
s.scheduler = NewImportScheduler(s.meta, s.cluster, s.alloc, s.imeta).(*importScheduler)
|
||||
}
|
||||
|
||||
func (s *ImportSchedulerSuite) TestProcessPreImport() {
|
||||
s.catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
var task ImportTask = &preImportTask{
|
||||
PreImportTask: &datapb.PreImportTask{
|
||||
JobID: 0,
|
||||
|
@ -90,7 +90,7 @@ func (s *ImportSchedulerSuite) TestProcessPreImport() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("preimport task"),
|
||||
}
|
||||
err := s.imeta.AddTask(task)
|
||||
err := s.imeta.AddTask(context.TODO(), task)
|
||||
s.NoError(err)
|
||||
var job ImportJob = &importJob{
|
||||
ImportJob: &datapb.ImportJob{
|
||||
|
@ -101,7 +101,7 @@ func (s *ImportSchedulerSuite) TestProcessPreImport() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import job"),
|
||||
}
|
||||
err = s.imeta.AddJob(job)
|
||||
err = s.imeta.AddJob(context.TODO(), job)
|
||||
s.NoError(err)
|
||||
|
||||
// pending -> inProgress
|
||||
|
@ -117,7 +117,7 @@ func (s *ImportSchedulerSuite) TestProcessPreImport() {
|
|||
return []*session.Session{sess}
|
||||
})
|
||||
s.scheduler.process()
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(datapb.ImportTaskStateV2_InProgress, task.GetState())
|
||||
s.Equal(int64(nodeID), task.GetNodeID())
|
||||
|
||||
|
@ -126,19 +126,19 @@ func (s *ImportSchedulerSuite) TestProcessPreImport() {
|
|||
State: datapb.ImportTaskStateV2_Completed,
|
||||
}, nil)
|
||||
s.scheduler.process()
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(datapb.ImportTaskStateV2_Completed, task.GetState())
|
||||
|
||||
// drop import task
|
||||
s.cluster.EXPECT().DropImport(mock.Anything, mock.Anything).Return(nil)
|
||||
s.scheduler.process()
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(int64(NullNodeID), task.GetNodeID())
|
||||
}
|
||||
|
||||
func (s *ImportSchedulerSuite) TestProcessImport() {
|
||||
s.catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
var task ImportTask = &importTask{
|
||||
ImportTaskV2: &datapb.ImportTaskV2{
|
||||
JobID: 0,
|
||||
|
@ -162,7 +162,7 @@ func (s *ImportSchedulerSuite) TestProcessImport() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import task"),
|
||||
}
|
||||
err := s.imeta.AddTask(task)
|
||||
err := s.imeta.AddTask(context.TODO(), task)
|
||||
s.NoError(err)
|
||||
var job ImportJob = &importJob{
|
||||
ImportJob: &datapb.ImportJob{
|
||||
|
@ -175,7 +175,7 @@ func (s *ImportSchedulerSuite) TestProcessImport() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import job"),
|
||||
}
|
||||
err = s.imeta.AddJob(job)
|
||||
err = s.imeta.AddJob(context.TODO(), job)
|
||||
s.NoError(err)
|
||||
|
||||
// pending -> inProgress
|
||||
|
@ -193,7 +193,7 @@ func (s *ImportSchedulerSuite) TestProcessImport() {
|
|||
return []*session.Session{sess}
|
||||
})
|
||||
s.scheduler.process()
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(datapb.ImportTaskStateV2_InProgress, task.GetState())
|
||||
s.Equal(int64(nodeID), task.GetNodeID())
|
||||
|
||||
|
@ -205,19 +205,19 @@ func (s *ImportSchedulerSuite) TestProcessImport() {
|
|||
State: datapb.ImportTaskStateV2_Completed,
|
||||
}, nil)
|
||||
s.scheduler.process()
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(datapb.ImportTaskStateV2_Completed, task.GetState())
|
||||
|
||||
// drop import task
|
||||
s.cluster.EXPECT().DropImport(mock.Anything, mock.Anything).Return(nil)
|
||||
s.scheduler.process()
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(int64(NullNodeID), task.GetNodeID())
|
||||
}
|
||||
|
||||
func (s *ImportSchedulerSuite) TestProcessFailed() {
|
||||
s.catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
s.catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
var task ImportTask = &importTask{
|
||||
ImportTaskV2: &datapb.ImportTaskV2{
|
||||
JobID: 0,
|
||||
|
@ -230,7 +230,7 @@ func (s *ImportSchedulerSuite) TestProcessFailed() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import task"),
|
||||
}
|
||||
err := s.imeta.AddTask(task)
|
||||
err := s.imeta.AddTask(context.TODO(), task)
|
||||
s.NoError(err)
|
||||
var job ImportJob = &importJob{
|
||||
ImportJob: &datapb.ImportJob{
|
||||
|
@ -243,7 +243,7 @@ func (s *ImportSchedulerSuite) TestProcessFailed() {
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import job"),
|
||||
}
|
||||
err = s.imeta.AddJob(job)
|
||||
err = s.imeta.AddJob(context.TODO(), job)
|
||||
s.NoError(err)
|
||||
|
||||
s.catalog.EXPECT().AddSegment(mock.Anything, mock.Anything).Return(nil)
|
||||
|
@ -264,7 +264,7 @@ func (s *ImportSchedulerSuite) TestProcessFailed() {
|
|||
s.NoError(err)
|
||||
}
|
||||
for _, id := range task.(*importTask).GetSegmentIDs() {
|
||||
segment := s.meta.GetSegment(id)
|
||||
segment := s.meta.GetSegment(context.TODO(), id)
|
||||
s.NotNil(segment)
|
||||
}
|
||||
|
||||
|
@ -272,10 +272,10 @@ func (s *ImportSchedulerSuite) TestProcessFailed() {
|
|||
s.catalog.EXPECT().AlterSegments(mock.Anything, mock.Anything).Return(nil)
|
||||
s.scheduler.process()
|
||||
for _, id := range task.(*importTask).GetSegmentIDs() {
|
||||
segment := s.meta.GetSegment(id)
|
||||
segment := s.meta.GetSegment(context.TODO(), id)
|
||||
s.Equal(commonpb.SegmentState_Dropped, segment.GetState())
|
||||
}
|
||||
task = s.imeta.GetTask(task.GetTaskID())
|
||||
task = s.imeta.GetTask(context.TODO(), task.GetTaskID())
|
||||
s.Equal(datapb.ImportTaskStateV2_Failed, task.GetState())
|
||||
s.Equal(0, len(task.(*importTask).GetSegmentIDs()))
|
||||
s.Equal(int64(NullNodeID), task.GetNodeID())
|
||||
|
|
|
@ -250,7 +250,7 @@ func AssemblePreImportRequest(task ImportTask, job ImportJob) *datapb.PreImportR
|
|||
func AssembleImportRequest(task ImportTask, job ImportJob, meta *meta, alloc allocator.Allocator) (*datapb.ImportRequest, error) {
|
||||
requestSegments := make([]*datapb.ImportRequestSegment, 0)
|
||||
for _, segmentID := range task.(*importTask).GetSegmentIDs() {
|
||||
segment := meta.GetSegment(segmentID)
|
||||
segment := meta.GetSegment(context.TODO(), segmentID)
|
||||
if segment == nil {
|
||||
return nil, merr.WrapErrSegmentNotFound(segmentID, "assemble import request failed")
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ func CheckDiskQuota(job ImportJob, meta *meta, imeta ImportMeta) (int64, error)
|
|||
requestedTotal int64
|
||||
requestedCollections = make(map[int64]int64)
|
||||
)
|
||||
for _, j := range imeta.GetJobBy() {
|
||||
for _, j := range imeta.GetJobBy(context.TODO()) {
|
||||
requested := j.GetRequestedDiskSize()
|
||||
requestedTotal += requested
|
||||
requestedCollections[j.GetCollectionID()] += requested
|
||||
|
@ -369,7 +369,7 @@ func CheckDiskQuota(job ImportJob, meta *meta, imeta ImportMeta) (int64, error)
|
|||
quotaInfo := meta.GetQuotaInfo()
|
||||
totalUsage, collectionsUsage := quotaInfo.TotalBinlogSize, quotaInfo.CollectionBinlogSize
|
||||
|
||||
tasks := imeta.GetTaskBy(WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||
files := make([]*datapb.ImportFileStats, 0)
|
||||
for _, task := range tasks {
|
||||
files = append(files, task.GetFileStats()...)
|
||||
|
@ -403,11 +403,11 @@ func CheckDiskQuota(job ImportJob, meta *meta, imeta ImportMeta) (int64, error)
|
|||
}
|
||||
|
||||
func getPendingProgress(jobID int64, imeta ImportMeta) float32 {
|
||||
tasks := imeta.GetTaskBy(WithJob(jobID), WithType(PreImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(jobID), WithType(PreImportTaskType))
|
||||
preImportingFiles := lo.SumBy(tasks, func(task ImportTask) int {
|
||||
return len(task.GetFileStats())
|
||||
})
|
||||
totalFiles := len(imeta.GetJob(jobID).GetFiles())
|
||||
totalFiles := len(imeta.GetJob(context.TODO(), jobID).GetFiles())
|
||||
if totalFiles == 0 {
|
||||
return 1
|
||||
}
|
||||
|
@ -415,7 +415,7 @@ func getPendingProgress(jobID int64, imeta ImportMeta) float32 {
|
|||
}
|
||||
|
||||
func getPreImportingProgress(jobID int64, imeta ImportMeta) float32 {
|
||||
tasks := imeta.GetTaskBy(WithJob(jobID), WithType(PreImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(jobID), WithType(PreImportTaskType))
|
||||
completedTasks := lo.Filter(tasks, func(task ImportTask, _ int) bool {
|
||||
return task.GetState() == datapb.ImportTaskStateV2_Completed
|
||||
})
|
||||
|
@ -426,7 +426,7 @@ func getPreImportingProgress(jobID int64, imeta ImportMeta) float32 {
|
|||
}
|
||||
|
||||
func getImportRowsInfo(jobID int64, imeta ImportMeta, meta *meta) (importedRows, totalRows int64) {
|
||||
tasks := imeta.GetTaskBy(WithJob(jobID), WithType(ImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(jobID), WithType(ImportTaskType))
|
||||
segmentIDs := make([]int64, 0)
|
||||
for _, task := range tasks {
|
||||
totalRows += lo.SumBy(task.GetFileStats(), func(file *datapb.ImportFileStats) int64 {
|
||||
|
@ -450,7 +450,7 @@ func getStatsProgress(jobID int64, imeta ImportMeta, sjm StatsJobManager) float3
|
|||
if !Params.DataCoordCfg.EnableStatsTask.GetAsBool() {
|
||||
return 1
|
||||
}
|
||||
tasks := imeta.GetTaskBy(WithJob(jobID), WithType(ImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(jobID), WithType(ImportTaskType))
|
||||
originSegmentIDs := lo.FlatMap(tasks, func(t ImportTask, _ int) []int64 {
|
||||
return t.(*importTask).GetSegmentIDs()
|
||||
})
|
||||
|
@ -468,11 +468,11 @@ func getStatsProgress(jobID int64, imeta ImportMeta, sjm StatsJobManager) float3
|
|||
}
|
||||
|
||||
func getIndexBuildingProgress(jobID int64, imeta ImportMeta, meta *meta) float32 {
|
||||
job := imeta.GetJob(jobID)
|
||||
job := imeta.GetJob(context.TODO(), jobID)
|
||||
if !Params.DataCoordCfg.WaitForIndex.GetAsBool() {
|
||||
return 1
|
||||
}
|
||||
tasks := imeta.GetTaskBy(WithJob(jobID), WithType(ImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(jobID), WithType(ImportTaskType))
|
||||
originSegmentIDs := lo.FlatMap(tasks, func(t ImportTask, _ int) []int64 {
|
||||
return t.(*importTask).GetSegmentIDs()
|
||||
})
|
||||
|
@ -500,7 +500,7 @@ func getIndexBuildingProgress(jobID int64, imeta ImportMeta, meta *meta) float32
|
|||
// TODO: Wrap a function to map status to user status.
|
||||
// TODO: Save these progress to job instead of recalculating.
|
||||
func GetJobProgress(jobID int64, imeta ImportMeta, meta *meta, sjm StatsJobManager) (int64, internalpb.ImportJobState, int64, int64, string) {
|
||||
job := imeta.GetJob(jobID)
|
||||
job := imeta.GetJob(context.TODO(), jobID)
|
||||
if job == nil {
|
||||
return 0, internalpb.ImportJobState_Failed, 0, 0, fmt.Sprintf("import job does not exist, jobID=%d", jobID)
|
||||
}
|
||||
|
@ -539,7 +539,7 @@ func GetJobProgress(jobID int64, imeta ImportMeta, meta *meta, sjm StatsJobManag
|
|||
|
||||
func GetTaskProgresses(jobID int64, imeta ImportMeta, meta *meta) []*internalpb.ImportTaskProgress {
|
||||
progresses := make([]*internalpb.ImportTaskProgress, 0)
|
||||
tasks := imeta.GetTaskBy(WithJob(jobID), WithType(ImportTaskType))
|
||||
tasks := imeta.GetTaskBy(context.TODO(), WithJob(jobID), WithType(ImportTaskType))
|
||||
for _, task := range tasks {
|
||||
totalRows := lo.SumBy(task.GetFileStats(), func(file *datapb.ImportFileStats) int64 {
|
||||
return file.GetTotalRows()
|
||||
|
@ -578,7 +578,7 @@ func DropImportTask(task ImportTask, cluster Cluster, tm ImportMeta) error {
|
|||
return err
|
||||
}
|
||||
log.Info("drop import in datanode done", WrapTaskLog(task)...)
|
||||
return tm.UpdateTask(task.GetTaskID(), UpdateNodeID(NullNodeID))
|
||||
return tm.UpdateTask(context.TODO(), task.GetTaskID(), UpdateNodeID(NullNodeID))
|
||||
}
|
||||
|
||||
func ListBinlogsAndGroupBySegment(ctx context.Context, cm storage.ChunkManager, importFile *internalpb.ImportFile) ([]*internalpb.ImportFile, error) {
|
||||
|
|
|
@ -237,11 +237,11 @@ func TestImportUtil_RegroupImportFiles(t *testing.T) {
|
|||
|
||||
func TestImportUtil_CheckDiskQuota(t *testing.T) {
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil)
|
||||
|
@ -252,7 +252,7 @@ func TestImportUtil_CheckDiskQuota(t *testing.T) {
|
|||
catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil)
|
||||
|
||||
imeta, err := NewImportMeta(catalog)
|
||||
imeta, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
meta, err := newMeta(context.TODO(), catalog, nil)
|
||||
|
@ -264,7 +264,7 @@ func TestImportUtil_CheckDiskQuota(t *testing.T) {
|
|||
CollectionID: 100,
|
||||
},
|
||||
}
|
||||
err = imeta.AddJob(job)
|
||||
err = imeta.AddJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pit := &preImportTask{
|
||||
|
@ -277,7 +277,7 @@ func TestImportUtil_CheckDiskQuota(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = imeta.AddTask(pit)
|
||||
err = imeta.AddTask(context.TODO(), pit)
|
||||
assert.NoError(t, err)
|
||||
|
||||
Params.Save(Params.QuotaConfig.DiskProtectionEnabled.Key, "false")
|
||||
|
@ -324,12 +324,12 @@ func TestImportUtil_DropImportTask(t *testing.T) {
|
|||
cluster.EXPECT().DropImport(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
imeta, err := NewImportMeta(catalog)
|
||||
imeta, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task := &importTask{
|
||||
|
@ -338,7 +338,7 @@ func TestImportUtil_DropImportTask(t *testing.T) {
|
|||
TaskID: 1,
|
||||
},
|
||||
}
|
||||
err = imeta.AddTask(task)
|
||||
err = imeta.AddTask(context.TODO(), task)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = DropImportTask(task, cluster, imeta)
|
||||
|
@ -421,16 +421,16 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
mockErr := "mock err"
|
||||
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().AddSegment(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().AlterSegments(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().ListAnalyzeTasks(mock.Anything).Return(nil, nil)
|
||||
|
@ -438,7 +438,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil)
|
||||
|
||||
imeta, err := NewImportMeta(catalog)
|
||||
imeta, err := NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
|
||||
meta, err := newMeta(context.TODO(), catalog, nil)
|
||||
|
@ -462,7 +462,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
Files: []*internalpb.ImportFile{file1, file2, file3},
|
||||
},
|
||||
}
|
||||
err = imeta.AddJob(job)
|
||||
err = imeta.AddJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pit1 := &preImportTask{
|
||||
|
@ -481,7 +481,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = imeta.AddTask(pit1)
|
||||
err = imeta.AddTask(context.TODO(), pit1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pit2 := &preImportTask{
|
||||
|
@ -496,7 +496,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = imeta.AddTask(pit2)
|
||||
err = imeta.AddTask(context.TODO(), pit2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
it1 := &importTask{
|
||||
|
@ -517,7 +517,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = imeta.AddTask(it1)
|
||||
err = imeta.AddTask(context.TODO(), it1)
|
||||
assert.NoError(t, err)
|
||||
err = meta.AddSegment(ctx, &SegmentInfo{
|
||||
SegmentInfo: &datapb.SegmentInfo{ID: 10, IsImporting: true, State: commonpb.SegmentState_Flushed}, currRows: 50,
|
||||
|
@ -546,7 +546,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = imeta.AddTask(it2)
|
||||
err = imeta.AddTask(context.TODO(), it2)
|
||||
assert.NoError(t, err)
|
||||
err = meta.AddSegment(ctx, &SegmentInfo{
|
||||
SegmentInfo: &datapb.SegmentInfo{ID: 20, IsImporting: true, State: commonpb.SegmentState_Flushed}, currRows: 50,
|
||||
|
@ -562,7 +562,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// failed state
|
||||
err = imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed), UpdateJobReason(mockErr))
|
||||
err = imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed), UpdateJobReason(mockErr))
|
||||
assert.NoError(t, err)
|
||||
|
||||
progress, state, _, _, reason := GetJobProgress(job.GetJobID(), imeta, meta, nil)
|
||||
|
@ -577,7 +577,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.NotEqual(t, "", reason)
|
||||
|
||||
// pending state
|
||||
err = imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Pending))
|
||||
err = imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Pending))
|
||||
assert.NoError(t, err)
|
||||
progress, state, _, _, reason = GetJobProgress(job.GetJobID(), imeta, meta, nil)
|
||||
assert.Equal(t, int64(10), progress)
|
||||
|
@ -585,7 +585,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.Equal(t, "", reason)
|
||||
|
||||
// preImporting state
|
||||
err = imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_PreImporting))
|
||||
err = imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_PreImporting))
|
||||
assert.NoError(t, err)
|
||||
progress, state, _, _, reason = GetJobProgress(job.GetJobID(), imeta, meta, nil)
|
||||
assert.Equal(t, int64(10+30), progress)
|
||||
|
@ -593,7 +593,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.Equal(t, "", reason)
|
||||
|
||||
// importing state, segmentImportedRows/totalRows = 0.5
|
||||
err = imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Importing))
|
||||
err = imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Importing))
|
||||
assert.NoError(t, err)
|
||||
progress, state, _, _, reason = GetJobProgress(job.GetJobID(), imeta, meta, nil)
|
||||
assert.Equal(t, int64(10+30+30*0.5), progress)
|
||||
|
@ -601,17 +601,17 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.Equal(t, "", reason)
|
||||
|
||||
// importing state, segmentImportedRows/totalRows = 1
|
||||
err = meta.UpdateSegmentsInfo(UpdateImportedRows(10, 100))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateImportedRows(10, 100))
|
||||
assert.NoError(t, err)
|
||||
err = meta.UpdateSegmentsInfo(UpdateImportedRows(20, 100))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateImportedRows(20, 100))
|
||||
assert.NoError(t, err)
|
||||
err = meta.UpdateSegmentsInfo(UpdateImportedRows(11, 100))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateImportedRows(11, 100))
|
||||
assert.NoError(t, err)
|
||||
err = meta.UpdateSegmentsInfo(UpdateImportedRows(12, 100))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateImportedRows(12, 100))
|
||||
assert.NoError(t, err)
|
||||
err = meta.UpdateSegmentsInfo(UpdateImportedRows(21, 100))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateImportedRows(21, 100))
|
||||
assert.NoError(t, err)
|
||||
err = meta.UpdateSegmentsInfo(UpdateImportedRows(22, 100))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateImportedRows(22, 100))
|
||||
assert.NoError(t, err)
|
||||
progress, state, _, _, reason = GetJobProgress(job.GetJobID(), imeta, meta, nil)
|
||||
assert.Equal(t, int64(float32(10+30+30)), progress)
|
||||
|
@ -619,7 +619,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.Equal(t, "", reason)
|
||||
|
||||
// stats state, len(statsSegmentIDs) / (len(originalSegmentIDs) = 0.5
|
||||
err = imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Stats))
|
||||
err = imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Stats))
|
||||
assert.NoError(t, err)
|
||||
sjm := NewMockStatsJobManager(t)
|
||||
sjm.EXPECT().GetStatsTaskState(mock.Anything, mock.Anything).RunAndReturn(func(segmentID int64, _ indexpb.StatsSubJob) indexpb.JobState {
|
||||
|
@ -642,7 +642,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) {
|
|||
assert.Equal(t, "", reason)
|
||||
|
||||
// completed state
|
||||
err = imeta.UpdateJob(job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Completed))
|
||||
err = imeta.UpdateJob(context.TODO(), job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Completed))
|
||||
assert.NoError(t, err)
|
||||
progress, state, _, _, reason = GetJobProgress(job.GetJobID(), imeta, meta, sjm)
|
||||
assert.Equal(t, int64(100), progress)
|
||||
|
|
|
@ -374,13 +374,13 @@ func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID)
|
|||
return false, 0
|
||||
}
|
||||
|
||||
func (m *indexMeta) CreateIndex(index *model.Index) error {
|
||||
func (m *indexMeta) CreateIndex(ctx context.Context, index *model.Index) error {
|
||||
log.Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID),
|
||||
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName))
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if err := m.catalog.CreateIndex(m.ctx, index); err != nil {
|
||||
if err := m.catalog.CreateIndex(ctx, index); err != nil {
|
||||
log.Error("meta update: CreateIndex save meta fail", zap.Int64("collectionID", index.CollectionID),
|
||||
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID),
|
||||
zap.String("indexName", index.IndexName), zap.Error(err))
|
||||
|
@ -410,7 +410,7 @@ func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) err
|
|||
}
|
||||
|
||||
// AddSegmentIndex adds the index meta corresponding the indexBuildID to meta table.
|
||||
func (m *indexMeta) AddSegmentIndex(segIndex *model.SegmentIndex) error {
|
||||
func (m *indexMeta) AddSegmentIndex(ctx context.Context, segIndex *model.SegmentIndex) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
|
@ -420,7 +420,7 @@ func (m *indexMeta) AddSegmentIndex(segIndex *model.SegmentIndex) error {
|
|||
zap.Int64("buildID", buildID))
|
||||
|
||||
segIndex.IndexState = commonpb.IndexState_Unissued
|
||||
if err := m.catalog.CreateSegmentIndex(m.ctx, segIndex); err != nil {
|
||||
if err := m.catalog.CreateSegmentIndex(ctx, segIndex); err != nil {
|
||||
log.Warn("meta update: adding segment index failed",
|
||||
zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID),
|
||||
zap.Int64("buildID", segIndex.BuildID), zap.Error(err))
|
||||
|
@ -561,7 +561,7 @@ func (m *indexMeta) GetFieldIndexes(collID, fieldID UniqueID, indexName string)
|
|||
}
|
||||
|
||||
// MarkIndexAsDeleted will mark the corresponding index as deleted, and recycleUnusedIndexFiles will recycle these tasks.
|
||||
func (m *indexMeta) MarkIndexAsDeleted(collID UniqueID, indexIDs []UniqueID) error {
|
||||
func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, indexIDs []UniqueID) error {
|
||||
log.Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID),
|
||||
zap.Int64s("indexIDs", indexIDs))
|
||||
|
||||
|
@ -585,7 +585,7 @@ func (m *indexMeta) MarkIndexAsDeleted(collID UniqueID, indexIDs []UniqueID) err
|
|||
if len(indexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
err := m.catalog.AlterIndexes(m.ctx, indexes)
|
||||
err := m.catalog.AlterIndexes(ctx, indexes)
|
||||
if err != nil {
|
||||
log.Error("failed to alter index meta in meta store", zap.Int("indexes num", len(indexes)), zap.Error(err))
|
||||
return err
|
||||
|
@ -890,11 +890,11 @@ func (m *indexMeta) SetStoredIndexFileSizeMetric(collections map[UniqueID]*colle
|
|||
return total
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndex(collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
func (m *indexMeta) RemoveSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
err := m.catalog.DropSegmentIndex(m.ctx, collID, partID, segID, buildID)
|
||||
err := m.catalog.DropSegmentIndex(ctx, collID, partID, segID, buildID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -927,11 +927,11 @@ func (m *indexMeta) GetDeletedIndexes() []*model.Index {
|
|||
return deletedIndexes
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveIndex(collID, indexID UniqueID) error {
|
||||
func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
log.Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID))
|
||||
err := m.catalog.DropIndex(m.ctx, collID, indexID)
|
||||
err := m.catalog.DropIndex(ctx, collID, indexID)
|
||||
if err != nil {
|
||||
log.Info("IndexCoord meta table remove index fail", zap.Int64("collectionID", collID),
|
||||
zap.Int64("indexID", indexID), zap.Error(err))
|
||||
|
|
|
@ -283,7 +283,7 @@ func TestMeta_CanCreateIndex(t *testing.T) {
|
|||
UserIndexParams: userIndexParams,
|
||||
}
|
||||
|
||||
err = m.CreateIndex(index)
|
||||
err = m.CreateIndex(context.TODO(), index)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tmpIndexID, err = m.CanCreateIndex(req)
|
||||
|
@ -481,7 +481,7 @@ func TestMeta_CreateIndex(t *testing.T) {
|
|||
).Return(nil)
|
||||
|
||||
m := newSegmentIndexMeta(sc)
|
||||
err := m.CreateIndex(index)
|
||||
err := m.CreateIndex(context.TODO(), index)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
|
@ -493,7 +493,7 @@ func TestMeta_CreateIndex(t *testing.T) {
|
|||
).Return(errors.New("fail"))
|
||||
|
||||
m := newSegmentIndexMeta(ec)
|
||||
err := m.CreateIndex(index)
|
||||
err := m.CreateIndex(context.TODO(), index)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
@ -534,13 +534,13 @@ func TestMeta_AddSegmentIndex(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("save meta fail", func(t *testing.T) {
|
||||
err := m.AddSegmentIndex(segmentIndex)
|
||||
err := m.AddSegmentIndex(context.TODO(), segmentIndex)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
m.catalog = sc
|
||||
err := m.AddSegmentIndex(segmentIndex)
|
||||
err := m.AddSegmentIndex(context.TODO(), segmentIndex)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
@ -848,19 +848,19 @@ func TestMeta_MarkIndexAsDeleted(t *testing.T) {
|
|||
|
||||
t.Run("fail", func(t *testing.T) {
|
||||
m.catalog = ec
|
||||
err := m.MarkIndexAsDeleted(collID, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
err := m.MarkIndexAsDeleted(context.TODO(), collID, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
m.catalog = sc
|
||||
err := m.MarkIndexAsDeleted(collID, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
err := m.MarkIndexAsDeleted(context.TODO(), collID, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = m.MarkIndexAsDeleted(collID, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
err = m.MarkIndexAsDeleted(context.TODO(), collID, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = m.MarkIndexAsDeleted(collID+1, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
err = m.MarkIndexAsDeleted(context.TODO(), collID+1, []UniqueID{indexID, indexID + 1, indexID + 2})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
@ -1436,7 +1436,7 @@ func TestRemoveIndex(t *testing.T) {
|
|||
Return(expectedErr)
|
||||
|
||||
m := newSegmentIndexMeta(catalog)
|
||||
err := m.RemoveIndex(collID, indexID)
|
||||
err := m.RemoveIndex(context.TODO(), collID, indexID)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error")
|
||||
})
|
||||
|
@ -1456,7 +1456,7 @@ func TestRemoveIndex(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
err := m.RemoveIndex(collID, indexID)
|
||||
err := m.RemoveIndex(context.TODO(), collID, indexID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(m.indexes), 0)
|
||||
})
|
||||
|
@ -1471,7 +1471,7 @@ func TestRemoveSegmentIndex(t *testing.T) {
|
|||
Return(expectedErr)
|
||||
|
||||
m := newSegmentIndexMeta(catalog)
|
||||
err := m.RemoveSegmentIndex(0, 0, 0, 0, 0)
|
||||
err := m.RemoveSegmentIndex(context.TODO(), 0, 0, 0, 0, 0)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error")
|
||||
|
@ -1493,7 +1493,7 @@ func TestRemoveSegmentIndex(t *testing.T) {
|
|||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
}
|
||||
|
||||
err := m.RemoveSegmentIndex(collID, partID, segID, indexID, buildID)
|
||||
err := m.RemoveSegmentIndex(context.TODO(), collID, partID, segID, indexID, buildID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(m.segmentIndexes), 0)
|
||||
|
|
|
@ -53,7 +53,7 @@ func (s *Server) startIndexService(ctx context.Context) {
|
|||
go s.createIndexForSegmentLoop(ctx)
|
||||
}
|
||||
|
||||
func (s *Server) createIndexForSegment(segment *SegmentInfo, indexID UniqueID) error {
|
||||
func (s *Server) createIndexForSegment(ctx context.Context, segment *SegmentInfo, indexID UniqueID) error {
|
||||
if !segment.GetIsSorted() && Params.DataCoordCfg.EnableStatsTask.GetAsBool() && !segment.GetIsImporting() && segment.Level != datapb.SegmentLevel_L0 {
|
||||
log.Info("segment not sorted, skip create index", zap.Int64("segmentID", segment.GetID()))
|
||||
return nil
|
||||
|
@ -73,14 +73,14 @@ func (s *Server) createIndexForSegment(segment *SegmentInfo, indexID UniqueID) e
|
|||
CreatedUTCTime: uint64(time.Now().Unix()),
|
||||
WriteHandoff: false,
|
||||
}
|
||||
if err = s.meta.indexMeta.AddSegmentIndex(segIndex); err != nil {
|
||||
if err = s.meta.indexMeta.AddSegmentIndex(ctx, segIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
s.taskScheduler.enqueue(newIndexBuildTask(buildID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) createIndexesForSegment(segment *SegmentInfo) error {
|
||||
func (s *Server) createIndexesForSegment(ctx context.Context, segment *SegmentInfo) error {
|
||||
if Params.DataCoordCfg.EnableStatsTask.GetAsBool() && !segment.GetIsSorted() && !segment.GetIsImporting() {
|
||||
log.Debug("segment is not sorted by pk, skip create indexes", zap.Int64("segmentID", segment.GetID()))
|
||||
return nil
|
||||
|
@ -94,7 +94,7 @@ func (s *Server) createIndexesForSegment(segment *SegmentInfo) error {
|
|||
indexIDToSegIndexes := s.meta.indexMeta.GetSegmentIndexes(segment.CollectionID, segment.ID)
|
||||
for _, index := range indexes {
|
||||
if _, ok := indexIDToSegIndexes[index.IndexID]; !ok {
|
||||
if err := s.createIndexForSegment(segment, index.IndexID); err != nil {
|
||||
if err := s.createIndexForSegment(ctx, segment, index.IndexID); err != nil {
|
||||
log.Warn("create index for segment fail", zap.Int64("segmentID", segment.ID),
|
||||
zap.Int64("indexID", index.IndexID))
|
||||
return err
|
||||
|
@ -104,8 +104,8 @@ func (s *Server) createIndexesForSegment(segment *SegmentInfo) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) getUnIndexTaskSegments() []*SegmentInfo {
|
||||
flushedSegments := s.meta.SelectSegments(SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
func (s *Server) getUnIndexTaskSegments(ctx context.Context) []*SegmentInfo {
|
||||
flushedSegments := s.meta.SelectSegments(ctx, SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
return isFlush(seg)
|
||||
}))
|
||||
|
||||
|
@ -130,32 +130,32 @@ func (s *Server) createIndexForSegmentLoop(ctx context.Context) {
|
|||
log.Warn("DataCoord context done, exit...")
|
||||
return
|
||||
case <-ticker.C:
|
||||
segments := s.getUnIndexTaskSegments()
|
||||
segments := s.getUnIndexTaskSegments(ctx)
|
||||
for _, segment := range segments {
|
||||
if err := s.createIndexesForSegment(segment); err != nil {
|
||||
if err := s.createIndexesForSegment(ctx, segment); err != nil {
|
||||
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
|
||||
continue
|
||||
}
|
||||
}
|
||||
case collectionID := <-s.notifyIndexChan:
|
||||
log.Info("receive create index notify", zap.Int64("collectionID", collectionID))
|
||||
segments := s.meta.SelectSegments(WithCollection(collectionID), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := s.meta.SelectSegments(ctx, WithCollection(collectionID), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return isFlush(info) && (!Params.DataCoordCfg.EnableStatsTask.GetAsBool() || info.GetIsSorted())
|
||||
}))
|
||||
for _, segment := range segments {
|
||||
if err := s.createIndexesForSegment(segment); err != nil {
|
||||
if err := s.createIndexesForSegment(ctx, segment); err != nil {
|
||||
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
|
||||
continue
|
||||
}
|
||||
}
|
||||
case segID := <-getBuildIndexChSingleton():
|
||||
log.Info("receive new flushed segment", zap.Int64("segmentID", segID))
|
||||
segment := s.meta.GetSegment(segID)
|
||||
segment := s.meta.GetSegment(ctx, segID)
|
||||
if segment == nil {
|
||||
log.Warn("segment is not exist, no need to build index", zap.Int64("segmentID", segID))
|
||||
continue
|
||||
}
|
||||
if err := s.createIndexesForSegment(segment); err != nil {
|
||||
if err := s.createIndexesForSegment(ctx, segment); err != nil {
|
||||
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
|
||||
continue
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexReques
|
|||
}
|
||||
|
||||
// Get flushed segments and create index
|
||||
err = s.meta.indexMeta.CreateIndex(index)
|
||||
err = s.meta.indexMeta.CreateIndex(ctx, index)
|
||||
if err != nil {
|
||||
log.Error("CreateIndex fail",
|
||||
zap.Int64("fieldID", req.GetFieldID()), zap.String("indexName", req.GetIndexName()), zap.Error(err))
|
||||
|
@ -408,7 +408,7 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
|
|||
|
||||
indexInfo := &indexpb.IndexInfo{}
|
||||
// The total rows of all indexes should be based on the current perspective
|
||||
segments := s.selectSegmentIndexesStats(WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := s.selectSegmentIndexesStats(ctx, WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return info.GetLevel() != datapb.SegmentLevel_L0 && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
|
||||
}))
|
||||
|
||||
|
@ -460,10 +460,10 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *Server) selectSegmentIndexesStats(filters ...SegmentFilter) map[int64]*indexStats {
|
||||
func (s *Server) selectSegmentIndexesStats(ctx context.Context, filters ...SegmentFilter) map[int64]*indexStats {
|
||||
ret := make(map[int64]*indexStats)
|
||||
|
||||
segments := s.meta.SelectSegments(filters...)
|
||||
segments := s.meta.SelectSegments(ctx, filters...)
|
||||
segmentIDs := lo.Map(segments, func(info *SegmentInfo, i int) int64 {
|
||||
return info.GetID()
|
||||
})
|
||||
|
@ -659,7 +659,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
|
|||
}
|
||||
|
||||
// The total rows of all indexes should be based on the current perspective
|
||||
segments := s.selectSegmentIndexesStats(WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := s.selectSegmentIndexesStats(ctx, WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return info.GetLevel() != datapb.SegmentLevel_L0 && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
|
||||
}))
|
||||
|
||||
|
@ -709,7 +709,7 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
|
|||
}
|
||||
|
||||
// The total rows of all indexes should be based on the current perspective
|
||||
segments := s.selectSegmentIndexesStats(WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := s.selectSegmentIndexesStats(ctx, WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return info.GetLevel() != datapb.SegmentLevel_L0 && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
|
||||
}))
|
||||
|
||||
|
@ -767,7 +767,7 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
|
|||
}
|
||||
|
||||
// The total rows of all indexes should be based on the current perspective
|
||||
segments := s.selectSegmentIndexesStats(WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := s.selectSegmentIndexesStats(ctx, WithCollection(req.GetCollectionID()), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return info.GetLevel() != datapb.SegmentLevel_L0 && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
|
||||
}))
|
||||
|
||||
|
@ -833,7 +833,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
|
|||
// from being dropped at the same time when dropping_partition in version 2.1
|
||||
if len(req.GetPartitionIDs()) == 0 {
|
||||
// drop collection index
|
||||
err := s.meta.indexMeta.MarkIndexAsDeleted(req.GetCollectionID(), indexIDs)
|
||||
err := s.meta.indexMeta.MarkIndexAsDeleted(ctx, req.GetCollectionID(), indexIDs)
|
||||
if err != nil {
|
||||
log.Warn("DropIndex fail", zap.String("indexName", req.IndexName), zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
|
|
|
@ -2381,7 +2381,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
|||
s := &Server{meta: m}
|
||||
|
||||
t.Run("normal", func(t *testing.T) {
|
||||
segments := s.getUnIndexTaskSegments()
|
||||
segments := s.getUnIndexTaskSegments(context.TODO())
|
||||
assert.Equal(t, 1, len(segments))
|
||||
assert.Equal(t, segID, segments[0].ID)
|
||||
|
||||
|
@ -2404,7 +2404,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
|||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
|
||||
segments := s.getUnIndexTaskSegments()
|
||||
segments := s.getUnIndexTaskSegments(context.TODO())
|
||||
assert.Equal(t, 1, len(segments))
|
||||
assert.Equal(t, segID, segments[0].ID)
|
||||
})
|
||||
|
@ -2417,7 +2417,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
|||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
|
||||
segments := s.getUnIndexTaskSegments()
|
||||
segments := s.getUnIndexTaskSegments(context.TODO())
|
||||
assert.Equal(t, 0, len(segments))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func (jm *statsJobManager) triggerStatsTaskLoop() {
|
|||
|
||||
case segID := <-getStatsTaskChSingleton():
|
||||
log.Info("receive new segment to trigger stats task", zap.Int64("segmentID", segID))
|
||||
segment := jm.mt.GetSegment(segID)
|
||||
segment := jm.mt.GetSegment(jm.ctx, segID)
|
||||
if segment == nil {
|
||||
log.Warn("segment is not exist, no need to do stats task", zap.Int64("segmentID", segID))
|
||||
continue
|
||||
|
@ -96,7 +96,7 @@ func (jm *statsJobManager) triggerStatsTaskLoop() {
|
|||
}
|
||||
|
||||
func (jm *statsJobManager) triggerSortStatsTask() {
|
||||
segments := jm.mt.SelectSegments(SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
segments := jm.mt.SelectSegments(jm.ctx, SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
return isFlush(seg) && seg.GetLevel() != datapb.SegmentLevel_L0 && !seg.GetIsSorted() && !seg.GetIsImporting()
|
||||
}))
|
||||
for _, segment := range segments {
|
||||
|
@ -156,7 +156,7 @@ func (jm *statsJobManager) triggerTextStatsTask() {
|
|||
}
|
||||
needTriggerFieldIDs = append(needTriggerFieldIDs, field.GetFieldID())
|
||||
}
|
||||
segments := jm.mt.SelectSegments(WithCollection(collection.ID), SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
segments := jm.mt.SelectSegments(jm.ctx, WithCollection(collection.ID), SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
return needDoTextIndex(seg, needTriggerFieldIDs)
|
||||
}))
|
||||
|
||||
|
@ -180,7 +180,7 @@ func (jm *statsJobManager) triggerBM25StatsTask() {
|
|||
needTriggerFieldIDs = append(needTriggerFieldIDs, field.GetFieldID())
|
||||
}
|
||||
}
|
||||
segments := jm.mt.SelectSegments(WithCollection(collection.ID), SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
segments := jm.mt.SelectSegments(jm.ctx, WithCollection(collection.ID), SegmentFilterFunc(func(seg *SegmentInfo) bool {
|
||||
return needDoBM25(seg, needTriggerFieldIDs)
|
||||
}))
|
||||
|
||||
|
@ -229,7 +229,7 @@ func (jm *statsJobManager) cleanupStatsTasksLoop() {
|
|||
func (jm *statsJobManager) SubmitStatsTask(originSegmentID, targetSegmentID int64,
|
||||
subJobType indexpb.StatsSubJob, canRecycle bool,
|
||||
) error {
|
||||
originSegment := jm.mt.GetHealthySegment(originSegmentID)
|
||||
originSegment := jm.mt.GetHealthySegment(jm.ctx, originSegmentID)
|
||||
if originSegment == nil {
|
||||
return merr.WrapErrSegmentNotFound(originSegmentID)
|
||||
}
|
||||
|
|
|
@ -54,19 +54,19 @@ import (
|
|||
)
|
||||
|
||||
type CompactionMeta interface {
|
||||
GetSegment(segID UniqueID) *SegmentInfo
|
||||
SelectSegments(filters ...SegmentFilter) []*SegmentInfo
|
||||
GetHealthySegment(segID UniqueID) *SegmentInfo
|
||||
UpdateSegmentsInfo(operators ...UpdateOperator) error
|
||||
SetSegmentsCompacting(segmentID []int64, compacting bool)
|
||||
CheckAndSetSegmentsCompacting(segmentIDs []int64) (bool, bool)
|
||||
CompleteCompactionMutation(t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error)
|
||||
CleanPartitionStatsInfo(info *datapb.PartitionStatsInfo) error
|
||||
GetSegment(ctx context.Context, segID UniqueID) *SegmentInfo
|
||||
SelectSegments(ctx context.Context, filters ...SegmentFilter) []*SegmentInfo
|
||||
GetHealthySegment(ctx context.Context, segID UniqueID) *SegmentInfo
|
||||
UpdateSegmentsInfo(ctx context.Context, operators ...UpdateOperator) error
|
||||
SetSegmentsCompacting(ctx context.Context, segmentID []int64, compacting bool)
|
||||
CheckAndSetSegmentsCompacting(ctx context.Context, segmentIDs []int64) (bool, bool)
|
||||
CompleteCompactionMutation(ctx context.Context, t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error)
|
||||
CleanPartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error
|
||||
|
||||
SaveCompactionTask(task *datapb.CompactionTask) error
|
||||
DropCompactionTask(task *datapb.CompactionTask) error
|
||||
GetCompactionTasks() map[int64][]*datapb.CompactionTask
|
||||
GetCompactionTasksByTriggerID(triggerID int64) []*datapb.CompactionTask
|
||||
SaveCompactionTask(ctx context.Context, task *datapb.CompactionTask) error
|
||||
DropCompactionTask(ctx context.Context, task *datapb.CompactionTask) error
|
||||
GetCompactionTasks(ctx context.Context) map[int64][]*datapb.CompactionTask
|
||||
GetCompactionTasksByTriggerID(ctx context.Context, triggerID int64) []*datapb.CompactionTask
|
||||
|
||||
GetIndexMeta() *indexMeta
|
||||
GetAnalyzeMeta() *analyzeMeta
|
||||
|
@ -493,7 +493,7 @@ func (m *meta) AddSegment(ctx context.Context, segment *SegmentInfo) error {
|
|||
log.Info("meta update: adding segment - Start", zap.Int64("segmentID", segment.GetID()))
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if err := m.catalog.AddSegment(m.ctx, segment.SegmentInfo); err != nil {
|
||||
if err := m.catalog.AddSegment(ctx, segment.SegmentInfo); err != nil {
|
||||
log.Error("meta update: adding segment failed",
|
||||
zap.Int64("segmentID", segment.GetID()),
|
||||
zap.Error(err))
|
||||
|
@ -507,7 +507,7 @@ func (m *meta) AddSegment(ctx context.Context, segment *SegmentInfo) error {
|
|||
}
|
||||
|
||||
// DropSegment remove segment with provided id, etcd persistence also removed
|
||||
func (m *meta) DropSegment(segmentID UniqueID) error {
|
||||
func (m *meta) DropSegment(ctx context.Context, segmentID UniqueID) error {
|
||||
log.Debug("meta update: dropping segment", zap.Int64("segmentID", segmentID))
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
@ -517,7 +517,7 @@ func (m *meta) DropSegment(segmentID UniqueID) error {
|
|||
zap.Int64("segmentID", segmentID))
|
||||
return nil
|
||||
}
|
||||
if err := m.catalog.DropSegment(m.ctx, segment.SegmentInfo); err != nil {
|
||||
if err := m.catalog.DropSegment(ctx, segment.SegmentInfo); err != nil {
|
||||
log.Warn("meta update: dropping segment failed",
|
||||
zap.Int64("segmentID", segmentID),
|
||||
zap.Error(err))
|
||||
|
@ -533,7 +533,7 @@ func (m *meta) DropSegment(segmentID UniqueID) error {
|
|||
|
||||
// GetHealthySegment returns segment info with provided id
|
||||
// if not segment is found, nil will be returned
|
||||
func (m *meta) GetHealthySegment(segID UniqueID) *SegmentInfo {
|
||||
func (m *meta) GetHealthySegment(ctx context.Context, segID UniqueID) *SegmentInfo {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
segment := m.segments.GetSegment(segID)
|
||||
|
@ -560,7 +560,7 @@ func (m *meta) GetSegments(segIDs []UniqueID, filterFunc SegmentInfoSelector) []
|
|||
// GetSegment returns segment info with provided id
|
||||
// include the unhealthy segment
|
||||
// if not segment is found, nil will be returned
|
||||
func (m *meta) GetSegment(segID UniqueID) *SegmentInfo {
|
||||
func (m *meta) GetSegment(ctx context.Context, segID UniqueID) *SegmentInfo {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.segments.GetSegment(segID)
|
||||
|
@ -603,7 +603,7 @@ func (m *meta) GetSegmentsChannels(segmentIDs []UniqueID) (map[int64]string, err
|
|||
}
|
||||
|
||||
// SetState setting segment with provided ID state
|
||||
func (m *meta) SetState(segmentID UniqueID, targetState commonpb.SegmentState) error {
|
||||
func (m *meta) SetState(ctx context.Context, segmentID UniqueID, targetState commonpb.SegmentState) error {
|
||||
log.Debug("meta update: setting segment state",
|
||||
zap.Int64("segmentID", segmentID),
|
||||
zap.Any("target state", targetState))
|
||||
|
@ -628,7 +628,7 @@ func (m *meta) SetState(segmentID UniqueID, targetState commonpb.SegmentState) e
|
|||
if clonedSegment != nil && isSegmentHealthy(clonedSegment) {
|
||||
// Update segment state and prepare segment metric update.
|
||||
updateSegStateAndPrepareMetrics(clonedSegment, targetState, metricMutation)
|
||||
if err := m.catalog.AlterSegments(m.ctx, []*datapb.SegmentInfo{clonedSegment.SegmentInfo}); err != nil {
|
||||
if err := m.catalog.AlterSegments(ctx, []*datapb.SegmentInfo{clonedSegment.SegmentInfo}); err != nil {
|
||||
log.Warn("meta update: setting segment state - failed to alter segments",
|
||||
zap.Int64("segmentID", segmentID),
|
||||
zap.String("target state", targetState.String()),
|
||||
|
@ -1030,7 +1030,7 @@ func UpdateAsDroppedIfEmptyWhenFlushing(segmentID int64) UpdateOperator {
|
|||
|
||||
// updateSegmentsInfo update segment infos
|
||||
// will exec all operators, and update all changed segments
|
||||
func (m *meta) UpdateSegmentsInfo(operators ...UpdateOperator) error {
|
||||
func (m *meta) UpdateSegmentsInfo(ctx context.Context, operators ...UpdateOperator) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
updatePack := &updateSegmentPack{
|
||||
|
@ -1054,7 +1054,7 @@ func (m *meta) UpdateSegmentsInfo(operators ...UpdateOperator) error {
|
|||
segments := lo.MapToSlice(updatePack.segments, func(_ int64, segment *SegmentInfo) *datapb.SegmentInfo { return segment.SegmentInfo })
|
||||
increments := lo.Values(updatePack.increments)
|
||||
|
||||
if err := m.catalog.AlterSegments(m.ctx, segments, increments...); err != nil {
|
||||
if err := m.catalog.AlterSegments(ctx, segments, increments...); err != nil {
|
||||
log.Error("meta update: update flush segments info - failed to store flush segment info into Etcd",
|
||||
zap.Error(err))
|
||||
return err
|
||||
|
@ -1071,7 +1071,7 @@ func (m *meta) UpdateSegmentsInfo(operators ...UpdateOperator) error {
|
|||
|
||||
// UpdateDropChannelSegmentInfo updates segment checkpoints and binlogs before drop
|
||||
// reusing segment info to pass segment id, binlogs, statslog, deltalog, start position and checkpoint
|
||||
func (m *meta) UpdateDropChannelSegmentInfo(channel string, segments []*SegmentInfo) error {
|
||||
func (m *meta) UpdateDropChannelSegmentInfo(ctx context.Context, channel string, segments []*SegmentInfo) error {
|
||||
log.Debug("meta update: update drop channel segment info",
|
||||
zap.String("channel", channel))
|
||||
m.Lock()
|
||||
|
@ -1103,7 +1103,7 @@ func (m *meta) UpdateDropChannelSegmentInfo(channel string, segments []*SegmentI
|
|||
modSegments[seg.ID] = clonedSeg
|
||||
}
|
||||
}
|
||||
err := m.batchSaveDropSegments(channel, modSegments)
|
||||
err := m.batchSaveDropSegments(ctx, channel, modSegments)
|
||||
if err != nil {
|
||||
log.Warn("meta update: update drop channel segment info failed",
|
||||
zap.String("channel", channel),
|
||||
|
@ -1188,7 +1188,7 @@ func (m *meta) mergeDropSegment(seg2Drop *SegmentInfo) (*SegmentInfo, *segMetric
|
|||
// 1. when failure occurs between batches, failover mechanism will continue with the earliest checkpoint of this channel
|
||||
// since the flag is not marked so DataNode can re-consume the drop collection msg
|
||||
// 2. when failure occurs between save meta and unwatch channel, the removal flag shall be check before let datanode watch this channel
|
||||
func (m *meta) batchSaveDropSegments(channel string, modSegments map[int64]*SegmentInfo) error {
|
||||
func (m *meta) batchSaveDropSegments(ctx context.Context, channel string, modSegments map[int64]*SegmentInfo) error {
|
||||
var modSegIDs []int64
|
||||
for k := range modSegments {
|
||||
modSegIDs = append(modSegIDs, k)
|
||||
|
@ -1199,12 +1199,12 @@ func (m *meta) batchSaveDropSegments(channel string, modSegments map[int64]*Segm
|
|||
for _, seg := range modSegments {
|
||||
segments = append(segments, seg.SegmentInfo)
|
||||
}
|
||||
err := m.catalog.SaveDroppedSegmentsInBatch(m.ctx, segments)
|
||||
err := m.catalog.SaveDroppedSegmentsInBatch(ctx, segments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = m.catalog.MarkChannelDeleted(m.ctx, channel); err != nil {
|
||||
if err = m.catalog.MarkChannelDeleted(ctx, channel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1218,17 +1218,17 @@ func (m *meta) batchSaveDropSegments(channel string, modSegments map[int64]*Segm
|
|||
|
||||
// GetSegmentsByChannel returns all segment info which insert channel equals provided `dmlCh`
|
||||
func (m *meta) GetSegmentsByChannel(channel string) []*SegmentInfo {
|
||||
return m.SelectSegments(SegmentFilterFunc(isSegmentHealthy), WithChannel(channel))
|
||||
return m.SelectSegments(m.ctx, SegmentFilterFunc(isSegmentHealthy), WithChannel(channel))
|
||||
}
|
||||
|
||||
// GetSegmentsOfCollection get all segments of collection
|
||||
func (m *meta) GetSegmentsOfCollection(collectionID UniqueID) []*SegmentInfo {
|
||||
return m.SelectSegments(SegmentFilterFunc(isSegmentHealthy), WithCollection(collectionID))
|
||||
func (m *meta) GetSegmentsOfCollection(ctx context.Context, collectionID UniqueID) []*SegmentInfo {
|
||||
return m.SelectSegments(ctx, SegmentFilterFunc(isSegmentHealthy), WithCollection(collectionID))
|
||||
}
|
||||
|
||||
// GetSegmentsIDOfCollection returns all segment ids which collection equals to provided `collectionID`
|
||||
func (m *meta) GetSegmentsIDOfCollection(collectionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(SegmentFilterFunc(isSegmentHealthy), WithCollection(collectionID))
|
||||
func (m *meta) GetSegmentsIDOfCollection(ctx context.Context, collectionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(ctx, SegmentFilterFunc(isSegmentHealthy), WithCollection(collectionID))
|
||||
|
||||
return lo.Map(segments, func(segment *SegmentInfo, _ int) int64 {
|
||||
return segment.ID
|
||||
|
@ -1236,8 +1236,8 @@ func (m *meta) GetSegmentsIDOfCollection(collectionID UniqueID) []UniqueID {
|
|||
}
|
||||
|
||||
// GetSegmentsIDOfCollectionWithDropped returns all dropped segment ids which collection equals to provided `collectionID`
|
||||
func (m *meta) GetSegmentsIDOfCollectionWithDropped(collectionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(WithCollection(collectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
func (m *meta) GetSegmentsIDOfCollectionWithDropped(ctx context.Context, collectionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(ctx, WithCollection(collectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return segment != nil &&
|
||||
segment.GetState() != commonpb.SegmentState_SegmentStateNone &&
|
||||
segment.GetState() != commonpb.SegmentState_NotExist
|
||||
|
@ -1249,8 +1249,8 @@ func (m *meta) GetSegmentsIDOfCollectionWithDropped(collectionID UniqueID) []Uni
|
|||
}
|
||||
|
||||
// GetSegmentsIDOfPartition returns all segments ids which collection & partition equals to provided `collectionID`, `partitionID`
|
||||
func (m *meta) GetSegmentsIDOfPartition(collectionID, partitionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(WithCollection(collectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
func (m *meta) GetSegmentsIDOfPartition(ctx context.Context, collectionID, partitionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(ctx, WithCollection(collectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return isSegmentHealthy(segment) &&
|
||||
segment.PartitionID == partitionID
|
||||
}))
|
||||
|
@ -1261,8 +1261,8 @@ func (m *meta) GetSegmentsIDOfPartition(collectionID, partitionID UniqueID) []Un
|
|||
}
|
||||
|
||||
// GetSegmentsIDOfPartitionWithDropped returns all dropped segments ids which collection & partition equals to provided `collectionID`, `partitionID`
|
||||
func (m *meta) GetSegmentsIDOfPartitionWithDropped(collectionID, partitionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(WithCollection(collectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
func (m *meta) GetSegmentsIDOfPartitionWithDropped(ctx context.Context, collectionID, partitionID UniqueID) []UniqueID {
|
||||
segments := m.SelectSegments(ctx, WithCollection(collectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return segment.GetState() != commonpb.SegmentState_SegmentStateNone &&
|
||||
segment.GetState() != commonpb.SegmentState_NotExist &&
|
||||
segment.PartitionID == partitionID
|
||||
|
@ -1274,9 +1274,9 @@ func (m *meta) GetSegmentsIDOfPartitionWithDropped(collectionID, partitionID Uni
|
|||
}
|
||||
|
||||
// GetNumRowsOfPartition returns row count of segments belongs to provided collection & partition
|
||||
func (m *meta) GetNumRowsOfPartition(collectionID UniqueID, partitionID UniqueID) int64 {
|
||||
func (m *meta) GetNumRowsOfPartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID) int64 {
|
||||
var ret int64
|
||||
segments := m.SelectSegments(WithCollection(collectionID), SegmentFilterFunc(func(si *SegmentInfo) bool {
|
||||
segments := m.SelectSegments(ctx, WithCollection(collectionID), SegmentFilterFunc(func(si *SegmentInfo) bool {
|
||||
return isSegmentHealthy(si) && si.GetPartitionID() == partitionID
|
||||
}))
|
||||
for _, segment := range segments {
|
||||
|
@ -1287,20 +1287,20 @@ func (m *meta) GetNumRowsOfPartition(collectionID UniqueID, partitionID UniqueID
|
|||
|
||||
// GetUnFlushedSegments get all segments which state is not `Flushing` nor `Flushed`
|
||||
func (m *meta) GetUnFlushedSegments() []*SegmentInfo {
|
||||
return m.SelectSegments(SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return m.SelectSegments(m.ctx, SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return segment.GetState() == commonpb.SegmentState_Growing || segment.GetState() == commonpb.SegmentState_Sealed
|
||||
}))
|
||||
}
|
||||
|
||||
// GetFlushingSegments get all segments which state is `Flushing`
|
||||
func (m *meta) GetFlushingSegments() []*SegmentInfo {
|
||||
return m.SelectSegments(SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return m.SelectSegments(m.ctx, SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return segment.GetState() == commonpb.SegmentState_Flushing
|
||||
}))
|
||||
}
|
||||
|
||||
// SelectSegments select segments with selector
|
||||
func (m *meta) SelectSegments(filters ...SegmentFilter) []*SegmentInfo {
|
||||
func (m *meta) SelectSegments(ctx context.Context, filters ...SegmentFilter) []*SegmentInfo {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.segments.GetSegmentsBySelector(filters...)
|
||||
|
@ -1377,7 +1377,7 @@ func (m *meta) SetSegmentCompacting(segmentID UniqueID, compacting bool) {
|
|||
// CheckAndSetSegmentsCompacting check all segments are not compacting
|
||||
// if true, set them compacting and return true
|
||||
// if false, skip setting and
|
||||
func (m *meta) CheckAndSetSegmentsCompacting(segmentIDs []UniqueID) (exist, canDo bool) {
|
||||
func (m *meta) CheckAndSetSegmentsCompacting(ctx context.Context, segmentIDs []UniqueID) (exist, canDo bool) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
var hasCompacting bool
|
||||
|
@ -1402,7 +1402,7 @@ func (m *meta) CheckAndSetSegmentsCompacting(segmentIDs []UniqueID) (exist, canD
|
|||
return exist, canDo
|
||||
}
|
||||
|
||||
func (m *meta) SetSegmentsCompacting(segmentIDs []UniqueID, compacting bool) {
|
||||
func (m *meta) SetSegmentsCompacting(ctx context.Context, segmentIDs []UniqueID, compacting bool) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
for _, segmentID := range segmentIDs {
|
||||
|
@ -1617,7 +1617,7 @@ func (m *meta) completeMixCompactionMutation(t *datapb.CompactionTask, result *d
|
|||
return compactToSegments, metricMutation, nil
|
||||
}
|
||||
|
||||
func (m *meta) CompleteCompactionMutation(t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error) {
|
||||
func (m *meta) CompleteCompactionMutation(ctx context.Context, t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
switch t.GetType() {
|
||||
|
@ -1670,7 +1670,7 @@ func (m *meta) GetCompactionTo(segmentID int64) ([]*SegmentInfo, bool) {
|
|||
}
|
||||
|
||||
// UpdateChannelCheckpoint updates and saves channel checkpoint.
|
||||
func (m *meta) UpdateChannelCheckpoint(vChannel string, pos *msgpb.MsgPosition) error {
|
||||
func (m *meta) UpdateChannelCheckpoint(ctx context.Context, vChannel string, pos *msgpb.MsgPosition) error {
|
||||
if pos == nil || pos.GetMsgID() == nil {
|
||||
return fmt.Errorf("channelCP is nil, vChannel=%s", vChannel)
|
||||
}
|
||||
|
@ -1680,7 +1680,7 @@ func (m *meta) UpdateChannelCheckpoint(vChannel string, pos *msgpb.MsgPosition)
|
|||
|
||||
oldPosition, ok := m.channelCPs.checkpoints[vChannel]
|
||||
if !ok || oldPosition.Timestamp < pos.Timestamp {
|
||||
err := m.catalog.SaveChannelCheckpoint(m.ctx, vChannel, pos)
|
||||
err := m.catalog.SaveChannelCheckpoint(ctx, vChannel, pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1720,7 +1720,7 @@ func (m *meta) MarkChannelCheckpointDropped(ctx context.Context, channel string)
|
|||
}
|
||||
|
||||
// UpdateChannelCheckpoints updates and saves channel checkpoints.
|
||||
func (m *meta) UpdateChannelCheckpoints(positions []*msgpb.MsgPosition) error {
|
||||
func (m *meta) UpdateChannelCheckpoints(ctx context.Context, positions []*msgpb.MsgPosition) error {
|
||||
m.channelCPs.Lock()
|
||||
defer m.channelCPs.Unlock()
|
||||
toUpdates := lo.Filter(positions, func(pos *msgpb.MsgPosition, _ int) bool {
|
||||
|
@ -1732,7 +1732,7 @@ func (m *meta) UpdateChannelCheckpoints(positions []*msgpb.MsgPosition) error {
|
|||
oldPosition, ok := m.channelCPs.checkpoints[vChannel]
|
||||
return !ok || oldPosition.Timestamp < pos.Timestamp
|
||||
})
|
||||
err := m.catalog.SaveChannelCheckpoints(m.ctx, toUpdates)
|
||||
err := m.catalog.SaveChannelCheckpoints(ctx, toUpdates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1787,7 +1787,7 @@ func (m *meta) GcConfirm(ctx context.Context, collectionID, partitionID UniqueID
|
|||
}
|
||||
|
||||
func (m *meta) GetCompactableSegmentGroupByCollection() map[int64][]*SegmentInfo {
|
||||
allSegs := m.SelectSegments(SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
allSegs := m.SelectSegments(m.ctx, SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return isSegmentHealthy(segment) &&
|
||||
isFlush(segment) && // sealed segment
|
||||
!segment.isCompacting && // not compacting now
|
||||
|
@ -1807,7 +1807,7 @@ func (m *meta) GetCompactableSegmentGroupByCollection() map[int64][]*SegmentInfo
|
|||
}
|
||||
|
||||
func (m *meta) GetEarliestStartPositionOfGrowingSegments(label *CompactionGroupLabel) *msgpb.MsgPosition {
|
||||
segments := m.SelectSegments(WithCollection(label.CollectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
segments := m.SelectSegments(m.ctx, WithCollection(label.CollectionID), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return segment.GetState() == commonpb.SegmentState_Growing &&
|
||||
(label.PartitionID == common.AllPartitionsID || segment.GetPartitionID() == label.PartitionID) &&
|
||||
segment.GetInsertChannel() == label.Channel
|
||||
|
@ -1899,23 +1899,23 @@ func (m *meta) ListCollections() []int64 {
|
|||
return lo.Keys(m.collections)
|
||||
}
|
||||
|
||||
func (m *meta) DropCompactionTask(task *datapb.CompactionTask) error {
|
||||
return m.compactionTaskMeta.DropCompactionTask(task)
|
||||
func (m *meta) DropCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
return m.compactionTaskMeta.DropCompactionTask(ctx, task)
|
||||
}
|
||||
|
||||
func (m *meta) SaveCompactionTask(task *datapb.CompactionTask) error {
|
||||
return m.compactionTaskMeta.SaveCompactionTask(task)
|
||||
func (m *meta) SaveCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
return m.compactionTaskMeta.SaveCompactionTask(ctx, task)
|
||||
}
|
||||
|
||||
func (m *meta) GetCompactionTasks() map[int64][]*datapb.CompactionTask {
|
||||
func (m *meta) GetCompactionTasks(ctx context.Context) map[int64][]*datapb.CompactionTask {
|
||||
return m.compactionTaskMeta.GetCompactionTasks()
|
||||
}
|
||||
|
||||
func (m *meta) GetCompactionTasksByTriggerID(triggerID int64) []*datapb.CompactionTask {
|
||||
func (m *meta) GetCompactionTasksByTriggerID(ctx context.Context, triggerID int64) []*datapb.CompactionTask {
|
||||
return m.compactionTaskMeta.GetCompactionTasksByTriggerID(triggerID)
|
||||
}
|
||||
|
||||
func (m *meta) CleanPartitionStatsInfo(info *datapb.PartitionStatsInfo) error {
|
||||
func (m *meta) CleanPartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error {
|
||||
removePaths := make([]string, 0)
|
||||
partitionStatsPath := path.Join(m.chunkManager.RootPath(), common.PartitionStatsPath,
|
||||
metautil.JoinIDPath(info.CollectionID, info.PartitionID),
|
||||
|
@ -1952,13 +1952,13 @@ func (m *meta) CleanPartitionStatsInfo(info *datapb.PartitionStatsInfo) error {
|
|||
}
|
||||
|
||||
// first clean analyze task
|
||||
if err = m.analyzeMeta.DropAnalyzeTask(info.GetAnalyzeTaskID()); err != nil {
|
||||
if err = m.analyzeMeta.DropAnalyzeTask(ctx, info.GetAnalyzeTaskID()); err != nil {
|
||||
log.Warn("remove analyze task failed", zap.Int64("analyzeTaskID", info.GetAnalyzeTaskID()), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// finally, clean up the partition stats info, and make sure the analysis task is cleaned up
|
||||
err = m.partitionStatsMeta.DropPartitionStatsInfo(info)
|
||||
err = m.partitionStatsMeta.DropPartitionStatsInfo(ctx, info)
|
||||
log.Debug("drop partition stats meta",
|
||||
zap.Int64("collectionID", info.GetCollectionID()),
|
||||
zap.Int64("partitionID", info.GetPartitionID()),
|
||||
|
|
|
@ -243,7 +243,7 @@ func (suite *MetaBasicSuite) TestCompleteCompactionMutation() {
|
|||
chunkManager: mockChMgr,
|
||||
}
|
||||
|
||||
infos, mutation, err := m.CompleteCompactionMutation(task, result)
|
||||
infos, mutation, err := m.CompleteCompactionMutation(context.TODO(), task, result)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.Equal(1, len(infos))
|
||||
info := infos[0]
|
||||
|
@ -261,7 +261,7 @@ func (suite *MetaBasicSuite) TestCompleteCompactionMutation() {
|
|||
|
||||
// check compactFrom segments
|
||||
for _, segID := range []int64{1, 2} {
|
||||
seg := m.GetSegment(segID)
|
||||
seg := m.GetSegment(context.TODO(), segID)
|
||||
suite.Equal(commonpb.SegmentState_Dropped, seg.GetState())
|
||||
suite.NotEmpty(seg.GetDroppedAt())
|
||||
|
||||
|
@ -304,7 +304,7 @@ func (suite *MetaBasicSuite) TestCompleteCompactionMutation() {
|
|||
chunkManager: mockChMgr,
|
||||
}
|
||||
|
||||
infos, mutation, err := m.CompleteCompactionMutation(task, result)
|
||||
infos, mutation, err := m.CompleteCompactionMutation(context.TODO(), task, result)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.Equal(1, len(infos))
|
||||
info := infos[0]
|
||||
|
@ -335,7 +335,7 @@ func (suite *MetaBasicSuite) TestCompleteCompactionMutation() {
|
|||
|
||||
// check compactFrom segments
|
||||
for _, segID := range []int64{1, 2} {
|
||||
seg := m.GetSegment(segID)
|
||||
seg := m.GetSegment(context.TODO(), segID)
|
||||
suite.Equal(commonpb.SegmentState_Dropped, seg.GetState())
|
||||
suite.NotEmpty(seg.GetDroppedAt())
|
||||
|
||||
|
@ -498,42 +498,42 @@ func TestMeta_Basic(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// check GetSegment
|
||||
info0_0 := meta.GetHealthySegment(segID0_0)
|
||||
info0_0 := meta.GetHealthySegment(context.TODO(), segID0_0)
|
||||
assert.NotNil(t, info0_0)
|
||||
assert.True(t, proto.Equal(info0_0, segInfo0_0))
|
||||
info1_0 := meta.GetHealthySegment(segID1_0)
|
||||
info1_0 := meta.GetHealthySegment(context.TODO(), segID1_0)
|
||||
assert.NotNil(t, info1_0)
|
||||
assert.True(t, proto.Equal(info1_0, segInfo1_0))
|
||||
|
||||
// check GetSegmentsOfCollection
|
||||
segIDs := meta.GetSegmentsIDOfCollection(collID)
|
||||
segIDs := meta.GetSegmentsIDOfCollection(context.TODO(), collID)
|
||||
assert.EqualValues(t, 3, len(segIDs))
|
||||
assert.Contains(t, segIDs, segID0_0)
|
||||
assert.Contains(t, segIDs, segID1_0)
|
||||
assert.Contains(t, segIDs, segID1_1)
|
||||
|
||||
// check GetSegmentsOfPartition
|
||||
segIDs = meta.GetSegmentsIDOfPartition(collID, partID0)
|
||||
segIDs = meta.GetSegmentsIDOfPartition(context.TODO(), collID, partID0)
|
||||
assert.EqualValues(t, 1, len(segIDs))
|
||||
assert.Contains(t, segIDs, segID0_0)
|
||||
segIDs = meta.GetSegmentsIDOfPartition(collID, partID1)
|
||||
segIDs = meta.GetSegmentsIDOfPartition(context.TODO(), collID, partID1)
|
||||
assert.EqualValues(t, 2, len(segIDs))
|
||||
assert.Contains(t, segIDs, segID1_0)
|
||||
assert.Contains(t, segIDs, segID1_1)
|
||||
|
||||
// check DropSegment
|
||||
err = meta.DropSegment(segID1_0)
|
||||
err = meta.DropSegment(context.TODO(), segID1_0)
|
||||
assert.NoError(t, err)
|
||||
segIDs = meta.GetSegmentsIDOfPartition(collID, partID1)
|
||||
segIDs = meta.GetSegmentsIDOfPartition(context.TODO(), collID, partID1)
|
||||
assert.EqualValues(t, 1, len(segIDs))
|
||||
assert.Contains(t, segIDs, segID1_1)
|
||||
|
||||
err = meta.SetState(segID0_0, commonpb.SegmentState_Sealed)
|
||||
err = meta.SetState(context.TODO(), segID0_0, commonpb.SegmentState_Sealed)
|
||||
assert.NoError(t, err)
|
||||
err = meta.SetState(segID0_0, commonpb.SegmentState_Flushed)
|
||||
err = meta.SetState(context.TODO(), segID0_0, commonpb.SegmentState_Flushed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
info0_0 = meta.GetHealthySegment(segID0_0)
|
||||
info0_0 = meta.GetHealthySegment(context.TODO(), segID0_0)
|
||||
assert.NotNil(t, info0_0)
|
||||
assert.EqualValues(t, commonpb.SegmentState_Flushed, info0_0.State)
|
||||
})
|
||||
|
@ -564,13 +564,13 @@ func TestMeta_Basic(t *testing.T) {
|
|||
meta, err = newMeta(context.TODO(), catalog, nil)
|
||||
assert.NoError(t, err)
|
||||
// nil, since no segment yet
|
||||
err = meta.DropSegment(0)
|
||||
err = meta.DropSegment(context.TODO(), 0)
|
||||
assert.NoError(t, err)
|
||||
// nil, since Save error not injected
|
||||
err = meta.AddSegment(context.TODO(), NewSegmentInfo(&datapb.SegmentInfo{}))
|
||||
assert.NoError(t, err)
|
||||
// error injected
|
||||
err = meta.DropSegment(0)
|
||||
err = meta.DropSegment(context.TODO(), 0)
|
||||
assert.Error(t, err)
|
||||
|
||||
catalog = datacoord.NewCatalog(metakv, "", "")
|
||||
|
@ -602,7 +602,7 @@ func TestMeta_Basic(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// check partition/collection statistics
|
||||
nums = meta.GetNumRowsOfPartition(collID, partID0)
|
||||
nums = meta.GetNumRowsOfPartition(context.TODO(), collID, partID0)
|
||||
assert.EqualValues(t, (rowCount0 + rowCount1), nums)
|
||||
nums = meta.GetNumRowsOfCollection(collID)
|
||||
assert.EqualValues(t, (rowCount0 + rowCount1), nums)
|
||||
|
@ -748,6 +748,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
assert.EqualValues(t, 0, segment1.getDeltaCount())
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateStatusOperator(1, commonpb.SegmentState_Flushing),
|
||||
AddBinlogsOperator(1,
|
||||
[]*datapb.FieldBinlog{getFieldBinlogIDsWithEntry(1, 10, 1)},
|
||||
|
@ -760,7 +761,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
updated := meta.GetHealthySegment(1)
|
||||
updated := meta.GetHealthySegment(context.TODO(), 1)
|
||||
assert.EqualValues(t, -1, updated.deltaRowcount.Load())
|
||||
assert.EqualValues(t, 1, updated.getDeltaCount())
|
||||
|
||||
|
@ -789,6 +790,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
|
||||
// segment not found
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateCompactedOperator(1),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
@ -803,6 +805,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateCompactedOperator(1),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
@ -812,51 +815,60 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateStatusOperator(1, commonpb.SegmentState_Flushing),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
AddBinlogsOperator(1, nil, nil, nil, nil),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateStartPosition([]*datapb.SegmentStartPosition{{SegmentID: 1, StartPosition: &msgpb.MsgPosition{MsgID: []byte{1, 2, 3}}}}),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateCheckPointOperator(1, []*datapb.CheckPoint{{SegmentID: 1, NumOfRows: 10}}),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateBinlogsOperator(1, nil, nil, nil),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateDmlPosition(1, nil),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateDmlPosition(1, &msgpb.MsgPosition{MsgID: []byte{1}}),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateImportedRows(1, 0),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateIsImporting(1, true),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(UpdateAsDroppedIfEmptyWhenFlushing(1))
|
||||
err = meta.UpdateSegmentsInfo(context.TODO(), UpdateAsDroppedIfEmptyWhenFlushing(1))
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
|
@ -865,6 +877,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
meta.AddSegment(context.Background(), &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{ID: 1, State: commonpb.SegmentState_Growing}})
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateStatusOperator(1, commonpb.SegmentState_Flushing),
|
||||
UpdateAsDroppedIfEmptyWhenFlushing(1),
|
||||
)
|
||||
|
@ -880,11 +893,12 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateCheckPointOperator(1, []*datapb.CheckPoint{{SegmentID: 2, NumOfRows: 10}}),
|
||||
)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, meta.GetHealthySegment(2))
|
||||
assert.Nil(t, meta.GetHealthySegment(context.TODO(), 2))
|
||||
})
|
||||
|
||||
t.Run("test save etcd failed", func(t *testing.T) {
|
||||
|
@ -907,6 +921,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
meta.segments.SetSegment(1, segmentInfo)
|
||||
|
||||
err = meta.UpdateSegmentsInfo(
|
||||
context.TODO(),
|
||||
UpdateStatusOperator(1, commonpb.SegmentState_Flushing),
|
||||
AddBinlogsOperator(1,
|
||||
[]*datapb.FieldBinlog{getFieldBinlogIDs(1, 2)},
|
||||
|
@ -920,7 +935,7 @@ func TestUpdateSegmentsInfo(t *testing.T) {
|
|||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "mocked fail", err.Error())
|
||||
segmentInfo = meta.GetHealthySegment(1)
|
||||
segmentInfo = meta.GetHealthySegment(context.TODO(), 1)
|
||||
assert.EqualValues(t, 0, segmentInfo.NumOfRows)
|
||||
assert.Equal(t, commonpb.SegmentState_Growing, segmentInfo.State)
|
||||
assert.Nil(t, segmentInfo.Binlogs)
|
||||
|
@ -971,8 +986,8 @@ func Test_meta_SetSegmentsCompacting(t *testing.T) {
|
|||
catalog: &datacoord.Catalog{MetaKv: tt.fields.client},
|
||||
segments: tt.fields.segments,
|
||||
}
|
||||
m.SetSegmentsCompacting([]UniqueID{tt.args.segmentID}, tt.args.compacting)
|
||||
segment := m.GetHealthySegment(tt.args.segmentID)
|
||||
m.SetSegmentsCompacting(context.TODO(), []UniqueID{tt.args.segmentID}, tt.args.compacting)
|
||||
segment := m.GetHealthySegment(context.TODO(), tt.args.segmentID)
|
||||
assert.Equal(t, tt.args.compacting, segment.isCompacting)
|
||||
})
|
||||
}
|
||||
|
@ -1008,7 +1023,7 @@ func Test_meta_GetSegmentsOfCollection(t *testing.T) {
|
|||
}
|
||||
expectedSeg := map[int64]commonpb.SegmentState{1: commonpb.SegmentState_Flushed, 2: commonpb.SegmentState_Growing}
|
||||
m := &meta{segments: storedSegments}
|
||||
got := m.GetSegmentsOfCollection(1)
|
||||
got := m.GetSegmentsOfCollection(context.TODO(), 1)
|
||||
assert.Equal(t, len(expectedSeg), len(got))
|
||||
for _, gotInfo := range got {
|
||||
expected, ok := expectedSeg[gotInfo.ID]
|
||||
|
@ -1016,10 +1031,10 @@ func Test_meta_GetSegmentsOfCollection(t *testing.T) {
|
|||
assert.Equal(t, expected, gotInfo.GetState())
|
||||
}
|
||||
|
||||
got = m.GetSegmentsOfCollection(-1)
|
||||
got = m.GetSegmentsOfCollection(context.TODO(), -1)
|
||||
assert.Equal(t, 3, len(got))
|
||||
|
||||
got = m.GetSegmentsOfCollection(10)
|
||||
got = m.GetSegmentsOfCollection(context.TODO(), 10)
|
||||
assert.Equal(t, 0, len(got))
|
||||
}
|
||||
|
||||
|
@ -1066,7 +1081,7 @@ func Test_meta_GetSegmentsWithChannel(t *testing.T) {
|
|||
got = m.GetSegmentsByChannel("h3")
|
||||
assert.Equal(t, 0, len(got))
|
||||
|
||||
got = m.SelectSegments(WithCollection(1), WithChannel("h1"), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
got = m.SelectSegments(context.TODO(), WithCollection(1), WithChannel("h1"), SegmentFilterFunc(func(segment *SegmentInfo) bool {
|
||||
return segment != nil && segment.GetState() == commonpb.SegmentState_Flushed
|
||||
}))
|
||||
assert.Equal(t, 1, len(got))
|
||||
|
@ -1151,10 +1166,10 @@ func TestMeta_GetAllSegments(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seg1 := m.GetHealthySegment(1)
|
||||
seg1All := m.GetSegment(1)
|
||||
seg2 := m.GetHealthySegment(2)
|
||||
seg2All := m.GetSegment(2)
|
||||
seg1 := m.GetHealthySegment(context.TODO(), 1)
|
||||
seg1All := m.GetSegment(context.TODO(), 1)
|
||||
seg2 := m.GetHealthySegment(context.TODO(), 2)
|
||||
seg2All := m.GetSegment(context.TODO(), 2)
|
||||
assert.NotNil(t, seg1)
|
||||
assert.NotNil(t, seg1All)
|
||||
assert.Nil(t, seg2)
|
||||
|
@ -1190,10 +1205,10 @@ func TestChannelCP(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// nil position
|
||||
err = meta.UpdateChannelCheckpoint(mockVChannel, nil)
|
||||
err = meta.UpdateChannelCheckpoint(context.TODO(), mockVChannel, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = meta.UpdateChannelCheckpoint(mockVChannel, pos)
|
||||
err = meta.UpdateChannelCheckpoint(context.TODO(), mockVChannel, pos)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
|
@ -1202,11 +1217,11 @@ func TestChannelCP(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(meta.channelCPs.checkpoints))
|
||||
|
||||
err = meta.UpdateChannelCheckpoints(nil)
|
||||
err = meta.UpdateChannelCheckpoints(context.TODO(), nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(meta.channelCPs.checkpoints))
|
||||
|
||||
err = meta.UpdateChannelCheckpoints([]*msgpb.MsgPosition{pos, {
|
||||
err = meta.UpdateChannelCheckpoints(context.TODO(), []*msgpb.MsgPosition{pos, {
|
||||
ChannelName: "",
|
||||
}})
|
||||
assert.NoError(t, err)
|
||||
|
@ -1220,7 +1235,7 @@ func TestChannelCP(t *testing.T) {
|
|||
position := meta.GetChannelCheckpoint(mockVChannel)
|
||||
assert.Nil(t, position)
|
||||
|
||||
err = meta.UpdateChannelCheckpoint(mockVChannel, pos)
|
||||
err = meta.UpdateChannelCheckpoint(context.TODO(), mockVChannel, pos)
|
||||
assert.NoError(t, err)
|
||||
position = meta.GetChannelCheckpoint(mockVChannel)
|
||||
assert.NotNil(t, position)
|
||||
|
@ -1235,7 +1250,7 @@ func TestChannelCP(t *testing.T) {
|
|||
err = meta.DropChannelCheckpoint(mockVChannel)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = meta.UpdateChannelCheckpoint(mockVChannel, pos)
|
||||
err = meta.UpdateChannelCheckpoint(context.TODO(), mockVChannel, pos)
|
||||
assert.NoError(t, err)
|
||||
err = meta.DropChannelCheckpoint(mockVChannel)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
package datacoord
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
datapb "github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
@ -20,9 +22,9 @@ func (_m *MockCompactionMeta) EXPECT() *MockCompactionMeta_Expecter {
|
|||
return &MockCompactionMeta_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// CheckAndSetSegmentsCompacting provides a mock function with given fields: segmentIDs
|
||||
func (_m *MockCompactionMeta) CheckAndSetSegmentsCompacting(segmentIDs []int64) (bool, bool) {
|
||||
ret := _m.Called(segmentIDs)
|
||||
// CheckAndSetSegmentsCompacting provides a mock function with given fields: ctx, segmentIDs
|
||||
func (_m *MockCompactionMeta) CheckAndSetSegmentsCompacting(ctx context.Context, segmentIDs []int64) (bool, bool) {
|
||||
ret := _m.Called(ctx, segmentIDs)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CheckAndSetSegmentsCompacting")
|
||||
|
@ -30,17 +32,17 @@ func (_m *MockCompactionMeta) CheckAndSetSegmentsCompacting(segmentIDs []int64)
|
|||
|
||||
var r0 bool
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(0).(func([]int64) (bool, bool)); ok {
|
||||
return rf(segmentIDs)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []int64) (bool, bool)); ok {
|
||||
return rf(ctx, segmentIDs)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func([]int64) bool); ok {
|
||||
r0 = rf(segmentIDs)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []int64) bool); ok {
|
||||
r0 = rf(ctx, segmentIDs)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func([]int64) bool); ok {
|
||||
r1 = rf(segmentIDs)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, []int64) bool); ok {
|
||||
r1 = rf(ctx, segmentIDs)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
@ -54,14 +56,15 @@ type MockCompactionMeta_CheckAndSetSegmentsCompacting_Call struct {
|
|||
}
|
||||
|
||||
// CheckAndSetSegmentsCompacting is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - segmentIDs []int64
|
||||
func (_e *MockCompactionMeta_Expecter) CheckAndSetSegmentsCompacting(segmentIDs interface{}) *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call {
|
||||
return &MockCompactionMeta_CheckAndSetSegmentsCompacting_Call{Call: _e.mock.On("CheckAndSetSegmentsCompacting", segmentIDs)}
|
||||
func (_e *MockCompactionMeta_Expecter) CheckAndSetSegmentsCompacting(ctx interface{}, segmentIDs interface{}) *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call {
|
||||
return &MockCompactionMeta_CheckAndSetSegmentsCompacting_Call{Call: _e.mock.On("CheckAndSetSegmentsCompacting", ctx, segmentIDs)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call) Run(run func(segmentIDs []int64)) *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call {
|
||||
func (_c *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call) Run(run func(ctx context.Context, segmentIDs []int64)) *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].([]int64))
|
||||
run(args[0].(context.Context), args[1].([]int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -71,22 +74,22 @@ func (_c *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call) Return(_a0 bool
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call) RunAndReturn(run func([]int64) (bool, bool)) *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call {
|
||||
func (_c *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call) RunAndReturn(run func(context.Context, []int64) (bool, bool)) *MockCompactionMeta_CheckAndSetSegmentsCompacting_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// CleanPartitionStatsInfo provides a mock function with given fields: info
|
||||
func (_m *MockCompactionMeta) CleanPartitionStatsInfo(info *datapb.PartitionStatsInfo) error {
|
||||
ret := _m.Called(info)
|
||||
// CleanPartitionStatsInfo provides a mock function with given fields: ctx, info
|
||||
func (_m *MockCompactionMeta) CleanPartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error {
|
||||
ret := _m.Called(ctx, info)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CleanPartitionStatsInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.PartitionStatsInfo) error); ok {
|
||||
r0 = rf(info)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.PartitionStatsInfo) error); ok {
|
||||
r0 = rf(ctx, info)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -100,14 +103,15 @@ type MockCompactionMeta_CleanPartitionStatsInfo_Call struct {
|
|||
}
|
||||
|
||||
// CleanPartitionStatsInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - info *datapb.PartitionStatsInfo
|
||||
func (_e *MockCompactionMeta_Expecter) CleanPartitionStatsInfo(info interface{}) *MockCompactionMeta_CleanPartitionStatsInfo_Call {
|
||||
return &MockCompactionMeta_CleanPartitionStatsInfo_Call{Call: _e.mock.On("CleanPartitionStatsInfo", info)}
|
||||
func (_e *MockCompactionMeta_Expecter) CleanPartitionStatsInfo(ctx interface{}, info interface{}) *MockCompactionMeta_CleanPartitionStatsInfo_Call {
|
||||
return &MockCompactionMeta_CleanPartitionStatsInfo_Call{Call: _e.mock.On("CleanPartitionStatsInfo", ctx, info)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_CleanPartitionStatsInfo_Call) Run(run func(info *datapb.PartitionStatsInfo)) *MockCompactionMeta_CleanPartitionStatsInfo_Call {
|
||||
func (_c *MockCompactionMeta_CleanPartitionStatsInfo_Call) Run(run func(ctx context.Context, info *datapb.PartitionStatsInfo)) *MockCompactionMeta_CleanPartitionStatsInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.PartitionStatsInfo))
|
||||
run(args[0].(context.Context), args[1].(*datapb.PartitionStatsInfo))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -117,14 +121,14 @@ func (_c *MockCompactionMeta_CleanPartitionStatsInfo_Call) Return(_a0 error) *Mo
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_CleanPartitionStatsInfo_Call) RunAndReturn(run func(*datapb.PartitionStatsInfo) error) *MockCompactionMeta_CleanPartitionStatsInfo_Call {
|
||||
func (_c *MockCompactionMeta_CleanPartitionStatsInfo_Call) RunAndReturn(run func(context.Context, *datapb.PartitionStatsInfo) error) *MockCompactionMeta_CleanPartitionStatsInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// CompleteCompactionMutation provides a mock function with given fields: t, result
|
||||
func (_m *MockCompactionMeta) CompleteCompactionMutation(t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error) {
|
||||
ret := _m.Called(t, result)
|
||||
// CompleteCompactionMutation provides a mock function with given fields: ctx, t, result
|
||||
func (_m *MockCompactionMeta) CompleteCompactionMutation(ctx context.Context, t *datapb.CompactionTask, result *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error) {
|
||||
ret := _m.Called(ctx, t, result)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CompleteCompactionMutation")
|
||||
|
@ -133,27 +137,27 @@ func (_m *MockCompactionMeta) CompleteCompactionMutation(t *datapb.CompactionTas
|
|||
var r0 []*SegmentInfo
|
||||
var r1 *segMetricMutation
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.CompactionTask, *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error)); ok {
|
||||
return rf(t, result)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.CompactionTask, *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error)); ok {
|
||||
return rf(ctx, t, result)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*datapb.CompactionTask, *datapb.CompactionPlanResult) []*SegmentInfo); ok {
|
||||
r0 = rf(t, result)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.CompactionTask, *datapb.CompactionPlanResult) []*SegmentInfo); ok {
|
||||
r0 = rf(ctx, t, result)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*SegmentInfo)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(*datapb.CompactionTask, *datapb.CompactionPlanResult) *segMetricMutation); ok {
|
||||
r1 = rf(t, result)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *datapb.CompactionTask, *datapb.CompactionPlanResult) *segMetricMutation); ok {
|
||||
r1 = rf(ctx, t, result)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(*segMetricMutation)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(*datapb.CompactionTask, *datapb.CompactionPlanResult) error); ok {
|
||||
r2 = rf(t, result)
|
||||
if rf, ok := ret.Get(2).(func(context.Context, *datapb.CompactionTask, *datapb.CompactionPlanResult) error); ok {
|
||||
r2 = rf(ctx, t, result)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
@ -167,15 +171,16 @@ type MockCompactionMeta_CompleteCompactionMutation_Call struct {
|
|||
}
|
||||
|
||||
// CompleteCompactionMutation is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - t *datapb.CompactionTask
|
||||
// - result *datapb.CompactionPlanResult
|
||||
func (_e *MockCompactionMeta_Expecter) CompleteCompactionMutation(t interface{}, result interface{}) *MockCompactionMeta_CompleteCompactionMutation_Call {
|
||||
return &MockCompactionMeta_CompleteCompactionMutation_Call{Call: _e.mock.On("CompleteCompactionMutation", t, result)}
|
||||
func (_e *MockCompactionMeta_Expecter) CompleteCompactionMutation(ctx interface{}, t interface{}, result interface{}) *MockCompactionMeta_CompleteCompactionMutation_Call {
|
||||
return &MockCompactionMeta_CompleteCompactionMutation_Call{Call: _e.mock.On("CompleteCompactionMutation", ctx, t, result)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_CompleteCompactionMutation_Call) Run(run func(t *datapb.CompactionTask, result *datapb.CompactionPlanResult)) *MockCompactionMeta_CompleteCompactionMutation_Call {
|
||||
func (_c *MockCompactionMeta_CompleteCompactionMutation_Call) Run(run func(ctx context.Context, t *datapb.CompactionTask, result *datapb.CompactionPlanResult)) *MockCompactionMeta_CompleteCompactionMutation_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.CompactionTask), args[1].(*datapb.CompactionPlanResult))
|
||||
run(args[0].(context.Context), args[1].(*datapb.CompactionTask), args[2].(*datapb.CompactionPlanResult))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -185,22 +190,22 @@ func (_c *MockCompactionMeta_CompleteCompactionMutation_Call) Return(_a0 []*Segm
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_CompleteCompactionMutation_Call) RunAndReturn(run func(*datapb.CompactionTask, *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error)) *MockCompactionMeta_CompleteCompactionMutation_Call {
|
||||
func (_c *MockCompactionMeta_CompleteCompactionMutation_Call) RunAndReturn(run func(context.Context, *datapb.CompactionTask, *datapb.CompactionPlanResult) ([]*SegmentInfo, *segMetricMutation, error)) *MockCompactionMeta_CompleteCompactionMutation_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// DropCompactionTask provides a mock function with given fields: task
|
||||
func (_m *MockCompactionMeta) DropCompactionTask(task *datapb.CompactionTask) error {
|
||||
ret := _m.Called(task)
|
||||
// DropCompactionTask provides a mock function with given fields: ctx, task
|
||||
func (_m *MockCompactionMeta) DropCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropCompactionTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.CompactionTask) error); ok {
|
||||
r0 = rf(task)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.CompactionTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -214,14 +219,15 @@ type MockCompactionMeta_DropCompactionTask_Call struct {
|
|||
}
|
||||
|
||||
// DropCompactionTask is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - task *datapb.CompactionTask
|
||||
func (_e *MockCompactionMeta_Expecter) DropCompactionTask(task interface{}) *MockCompactionMeta_DropCompactionTask_Call {
|
||||
return &MockCompactionMeta_DropCompactionTask_Call{Call: _e.mock.On("DropCompactionTask", task)}
|
||||
func (_e *MockCompactionMeta_Expecter) DropCompactionTask(ctx interface{}, task interface{}) *MockCompactionMeta_DropCompactionTask_Call {
|
||||
return &MockCompactionMeta_DropCompactionTask_Call{Call: _e.mock.On("DropCompactionTask", ctx, task)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_DropCompactionTask_Call) Run(run func(task *datapb.CompactionTask)) *MockCompactionMeta_DropCompactionTask_Call {
|
||||
func (_c *MockCompactionMeta_DropCompactionTask_Call) Run(run func(ctx context.Context, task *datapb.CompactionTask)) *MockCompactionMeta_DropCompactionTask_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.CompactionTask))
|
||||
run(args[0].(context.Context), args[1].(*datapb.CompactionTask))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -231,7 +237,7 @@ func (_c *MockCompactionMeta_DropCompactionTask_Call) Return(_a0 error) *MockCom
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_DropCompactionTask_Call) RunAndReturn(run func(*datapb.CompactionTask) error) *MockCompactionMeta_DropCompactionTask_Call {
|
||||
func (_c *MockCompactionMeta_DropCompactionTask_Call) RunAndReturn(run func(context.Context, *datapb.CompactionTask) error) *MockCompactionMeta_DropCompactionTask_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -330,17 +336,17 @@ func (_c *MockCompactionMeta_GetCompactionTaskMeta_Call) RunAndReturn(run func()
|
|||
return _c
|
||||
}
|
||||
|
||||
// GetCompactionTasks provides a mock function with given fields:
|
||||
func (_m *MockCompactionMeta) GetCompactionTasks() map[int64][]*datapb.CompactionTask {
|
||||
ret := _m.Called()
|
||||
// GetCompactionTasks provides a mock function with given fields: ctx
|
||||
func (_m *MockCompactionMeta) GetCompactionTasks(ctx context.Context) map[int64][]*datapb.CompactionTask {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetCompactionTasks")
|
||||
}
|
||||
|
||||
var r0 map[int64][]*datapb.CompactionTask
|
||||
if rf, ok := ret.Get(0).(func() map[int64][]*datapb.CompactionTask); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) map[int64][]*datapb.CompactionTask); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[int64][]*datapb.CompactionTask)
|
||||
|
@ -356,13 +362,14 @@ type MockCompactionMeta_GetCompactionTasks_Call struct {
|
|||
}
|
||||
|
||||
// GetCompactionTasks is a helper method to define mock.On call
|
||||
func (_e *MockCompactionMeta_Expecter) GetCompactionTasks() *MockCompactionMeta_GetCompactionTasks_Call {
|
||||
return &MockCompactionMeta_GetCompactionTasks_Call{Call: _e.mock.On("GetCompactionTasks")}
|
||||
// - ctx context.Context
|
||||
func (_e *MockCompactionMeta_Expecter) GetCompactionTasks(ctx interface{}) *MockCompactionMeta_GetCompactionTasks_Call {
|
||||
return &MockCompactionMeta_GetCompactionTasks_Call{Call: _e.mock.On("GetCompactionTasks", ctx)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetCompactionTasks_Call) Run(run func()) *MockCompactionMeta_GetCompactionTasks_Call {
|
||||
func (_c *MockCompactionMeta_GetCompactionTasks_Call) Run(run func(ctx context.Context)) *MockCompactionMeta_GetCompactionTasks_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -372,22 +379,22 @@ func (_c *MockCompactionMeta_GetCompactionTasks_Call) Return(_a0 map[int64][]*da
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetCompactionTasks_Call) RunAndReturn(run func() map[int64][]*datapb.CompactionTask) *MockCompactionMeta_GetCompactionTasks_Call {
|
||||
func (_c *MockCompactionMeta_GetCompactionTasks_Call) RunAndReturn(run func(context.Context) map[int64][]*datapb.CompactionTask) *MockCompactionMeta_GetCompactionTasks_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetCompactionTasksByTriggerID provides a mock function with given fields: triggerID
|
||||
func (_m *MockCompactionMeta) GetCompactionTasksByTriggerID(triggerID int64) []*datapb.CompactionTask {
|
||||
ret := _m.Called(triggerID)
|
||||
// GetCompactionTasksByTriggerID provides a mock function with given fields: ctx, triggerID
|
||||
func (_m *MockCompactionMeta) GetCompactionTasksByTriggerID(ctx context.Context, triggerID int64) []*datapb.CompactionTask {
|
||||
ret := _m.Called(ctx, triggerID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetCompactionTasksByTriggerID")
|
||||
}
|
||||
|
||||
var r0 []*datapb.CompactionTask
|
||||
if rf, ok := ret.Get(0).(func(int64) []*datapb.CompactionTask); ok {
|
||||
r0 = rf(triggerID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) []*datapb.CompactionTask); ok {
|
||||
r0 = rf(ctx, triggerID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*datapb.CompactionTask)
|
||||
|
@ -403,14 +410,15 @@ type MockCompactionMeta_GetCompactionTasksByTriggerID_Call struct {
|
|||
}
|
||||
|
||||
// GetCompactionTasksByTriggerID is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - triggerID int64
|
||||
func (_e *MockCompactionMeta_Expecter) GetCompactionTasksByTriggerID(triggerID interface{}) *MockCompactionMeta_GetCompactionTasksByTriggerID_Call {
|
||||
return &MockCompactionMeta_GetCompactionTasksByTriggerID_Call{Call: _e.mock.On("GetCompactionTasksByTriggerID", triggerID)}
|
||||
func (_e *MockCompactionMeta_Expecter) GetCompactionTasksByTriggerID(ctx interface{}, triggerID interface{}) *MockCompactionMeta_GetCompactionTasksByTriggerID_Call {
|
||||
return &MockCompactionMeta_GetCompactionTasksByTriggerID_Call{Call: _e.mock.On("GetCompactionTasksByTriggerID", ctx, triggerID)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetCompactionTasksByTriggerID_Call) Run(run func(triggerID int64)) *MockCompactionMeta_GetCompactionTasksByTriggerID_Call {
|
||||
func (_c *MockCompactionMeta_GetCompactionTasksByTriggerID_Call) Run(run func(ctx context.Context, triggerID int64)) *MockCompactionMeta_GetCompactionTasksByTriggerID_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -420,22 +428,22 @@ func (_c *MockCompactionMeta_GetCompactionTasksByTriggerID_Call) Return(_a0 []*d
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetCompactionTasksByTriggerID_Call) RunAndReturn(run func(int64) []*datapb.CompactionTask) *MockCompactionMeta_GetCompactionTasksByTriggerID_Call {
|
||||
func (_c *MockCompactionMeta_GetCompactionTasksByTriggerID_Call) RunAndReturn(run func(context.Context, int64) []*datapb.CompactionTask) *MockCompactionMeta_GetCompactionTasksByTriggerID_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetHealthySegment provides a mock function with given fields: segID
|
||||
func (_m *MockCompactionMeta) GetHealthySegment(segID int64) *SegmentInfo {
|
||||
ret := _m.Called(segID)
|
||||
// GetHealthySegment provides a mock function with given fields: ctx, segID
|
||||
func (_m *MockCompactionMeta) GetHealthySegment(ctx context.Context, segID int64) *SegmentInfo {
|
||||
ret := _m.Called(ctx, segID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetHealthySegment")
|
||||
}
|
||||
|
||||
var r0 *SegmentInfo
|
||||
if rf, ok := ret.Get(0).(func(int64) *SegmentInfo); ok {
|
||||
r0 = rf(segID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) *SegmentInfo); ok {
|
||||
r0 = rf(ctx, segID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*SegmentInfo)
|
||||
|
@ -451,14 +459,15 @@ type MockCompactionMeta_GetHealthySegment_Call struct {
|
|||
}
|
||||
|
||||
// GetHealthySegment is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - segID int64
|
||||
func (_e *MockCompactionMeta_Expecter) GetHealthySegment(segID interface{}) *MockCompactionMeta_GetHealthySegment_Call {
|
||||
return &MockCompactionMeta_GetHealthySegment_Call{Call: _e.mock.On("GetHealthySegment", segID)}
|
||||
func (_e *MockCompactionMeta_Expecter) GetHealthySegment(ctx interface{}, segID interface{}) *MockCompactionMeta_GetHealthySegment_Call {
|
||||
return &MockCompactionMeta_GetHealthySegment_Call{Call: _e.mock.On("GetHealthySegment", ctx, segID)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetHealthySegment_Call) Run(run func(segID int64)) *MockCompactionMeta_GetHealthySegment_Call {
|
||||
func (_c *MockCompactionMeta_GetHealthySegment_Call) Run(run func(ctx context.Context, segID int64)) *MockCompactionMeta_GetHealthySegment_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -468,7 +477,7 @@ func (_c *MockCompactionMeta_GetHealthySegment_Call) Return(_a0 *SegmentInfo) *M
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetHealthySegment_Call) RunAndReturn(run func(int64) *SegmentInfo) *MockCompactionMeta_GetHealthySegment_Call {
|
||||
func (_c *MockCompactionMeta_GetHealthySegment_Call) RunAndReturn(run func(context.Context, int64) *SegmentInfo) *MockCompactionMeta_GetHealthySegment_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -567,17 +576,17 @@ func (_c *MockCompactionMeta_GetPartitionStatsMeta_Call) RunAndReturn(run func()
|
|||
return _c
|
||||
}
|
||||
|
||||
// GetSegment provides a mock function with given fields: segID
|
||||
func (_m *MockCompactionMeta) GetSegment(segID int64) *SegmentInfo {
|
||||
ret := _m.Called(segID)
|
||||
// GetSegment provides a mock function with given fields: ctx, segID
|
||||
func (_m *MockCompactionMeta) GetSegment(ctx context.Context, segID int64) *SegmentInfo {
|
||||
ret := _m.Called(ctx, segID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetSegment")
|
||||
}
|
||||
|
||||
var r0 *SegmentInfo
|
||||
if rf, ok := ret.Get(0).(func(int64) *SegmentInfo); ok {
|
||||
r0 = rf(segID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) *SegmentInfo); ok {
|
||||
r0 = rf(ctx, segID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*SegmentInfo)
|
||||
|
@ -593,14 +602,15 @@ type MockCompactionMeta_GetSegment_Call struct {
|
|||
}
|
||||
|
||||
// GetSegment is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - segID int64
|
||||
func (_e *MockCompactionMeta_Expecter) GetSegment(segID interface{}) *MockCompactionMeta_GetSegment_Call {
|
||||
return &MockCompactionMeta_GetSegment_Call{Call: _e.mock.On("GetSegment", segID)}
|
||||
func (_e *MockCompactionMeta_Expecter) GetSegment(ctx interface{}, segID interface{}) *MockCompactionMeta_GetSegment_Call {
|
||||
return &MockCompactionMeta_GetSegment_Call{Call: _e.mock.On("GetSegment", ctx, segID)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetSegment_Call) Run(run func(segID int64)) *MockCompactionMeta_GetSegment_Call {
|
||||
func (_c *MockCompactionMeta_GetSegment_Call) Run(run func(ctx context.Context, segID int64)) *MockCompactionMeta_GetSegment_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -610,22 +620,22 @@ func (_c *MockCompactionMeta_GetSegment_Call) Return(_a0 *SegmentInfo) *MockComp
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_GetSegment_Call) RunAndReturn(run func(int64) *SegmentInfo) *MockCompactionMeta_GetSegment_Call {
|
||||
func (_c *MockCompactionMeta_GetSegment_Call) RunAndReturn(run func(context.Context, int64) *SegmentInfo) *MockCompactionMeta_GetSegment_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SaveCompactionTask provides a mock function with given fields: task
|
||||
func (_m *MockCompactionMeta) SaveCompactionTask(task *datapb.CompactionTask) error {
|
||||
ret := _m.Called(task)
|
||||
// SaveCompactionTask provides a mock function with given fields: ctx, task
|
||||
func (_m *MockCompactionMeta) SaveCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveCompactionTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.CompactionTask) error); ok {
|
||||
r0 = rf(task)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.CompactionTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -639,14 +649,15 @@ type MockCompactionMeta_SaveCompactionTask_Call struct {
|
|||
}
|
||||
|
||||
// SaveCompactionTask is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - task *datapb.CompactionTask
|
||||
func (_e *MockCompactionMeta_Expecter) SaveCompactionTask(task interface{}) *MockCompactionMeta_SaveCompactionTask_Call {
|
||||
return &MockCompactionMeta_SaveCompactionTask_Call{Call: _e.mock.On("SaveCompactionTask", task)}
|
||||
func (_e *MockCompactionMeta_Expecter) SaveCompactionTask(ctx interface{}, task interface{}) *MockCompactionMeta_SaveCompactionTask_Call {
|
||||
return &MockCompactionMeta_SaveCompactionTask_Call{Call: _e.mock.On("SaveCompactionTask", ctx, task)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_SaveCompactionTask_Call) Run(run func(task *datapb.CompactionTask)) *MockCompactionMeta_SaveCompactionTask_Call {
|
||||
func (_c *MockCompactionMeta_SaveCompactionTask_Call) Run(run func(ctx context.Context, task *datapb.CompactionTask)) *MockCompactionMeta_SaveCompactionTask_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.CompactionTask))
|
||||
run(args[0].(context.Context), args[1].(*datapb.CompactionTask))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -656,18 +667,19 @@ func (_c *MockCompactionMeta_SaveCompactionTask_Call) Return(_a0 error) *MockCom
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_SaveCompactionTask_Call) RunAndReturn(run func(*datapb.CompactionTask) error) *MockCompactionMeta_SaveCompactionTask_Call {
|
||||
func (_c *MockCompactionMeta_SaveCompactionTask_Call) RunAndReturn(run func(context.Context, *datapb.CompactionTask) error) *MockCompactionMeta_SaveCompactionTask_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SelectSegments provides a mock function with given fields: filters
|
||||
func (_m *MockCompactionMeta) SelectSegments(filters ...SegmentFilter) []*SegmentInfo {
|
||||
// SelectSegments provides a mock function with given fields: ctx, filters
|
||||
func (_m *MockCompactionMeta) SelectSegments(ctx context.Context, filters ...SegmentFilter) []*SegmentInfo {
|
||||
_va := make([]interface{}, len(filters))
|
||||
for _i := range filters {
|
||||
_va[_i] = filters[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
|
@ -676,8 +688,8 @@ func (_m *MockCompactionMeta) SelectSegments(filters ...SegmentFilter) []*Segmen
|
|||
}
|
||||
|
||||
var r0 []*SegmentInfo
|
||||
if rf, ok := ret.Get(0).(func(...SegmentFilter) []*SegmentInfo); ok {
|
||||
r0 = rf(filters...)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, ...SegmentFilter) []*SegmentInfo); ok {
|
||||
r0 = rf(ctx, filters...)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*SegmentInfo)
|
||||
|
@ -693,21 +705,22 @@ type MockCompactionMeta_SelectSegments_Call struct {
|
|||
}
|
||||
|
||||
// SelectSegments is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - filters ...SegmentFilter
|
||||
func (_e *MockCompactionMeta_Expecter) SelectSegments(filters ...interface{}) *MockCompactionMeta_SelectSegments_Call {
|
||||
func (_e *MockCompactionMeta_Expecter) SelectSegments(ctx interface{}, filters ...interface{}) *MockCompactionMeta_SelectSegments_Call {
|
||||
return &MockCompactionMeta_SelectSegments_Call{Call: _e.mock.On("SelectSegments",
|
||||
append([]interface{}{}, filters...)...)}
|
||||
append([]interface{}{ctx}, filters...)...)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_SelectSegments_Call) Run(run func(filters ...SegmentFilter)) *MockCompactionMeta_SelectSegments_Call {
|
||||
func (_c *MockCompactionMeta_SelectSegments_Call) Run(run func(ctx context.Context, filters ...SegmentFilter)) *MockCompactionMeta_SelectSegments_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
variadicArgs := make([]SegmentFilter, len(args)-0)
|
||||
for i, a := range args[0:] {
|
||||
variadicArgs := make([]SegmentFilter, len(args)-1)
|
||||
for i, a := range args[1:] {
|
||||
if a != nil {
|
||||
variadicArgs[i] = a.(SegmentFilter)
|
||||
}
|
||||
}
|
||||
run(variadicArgs...)
|
||||
run(args[0].(context.Context), variadicArgs...)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -717,14 +730,14 @@ func (_c *MockCompactionMeta_SelectSegments_Call) Return(_a0 []*SegmentInfo) *Mo
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_SelectSegments_Call) RunAndReturn(run func(...SegmentFilter) []*SegmentInfo) *MockCompactionMeta_SelectSegments_Call {
|
||||
func (_c *MockCompactionMeta_SelectSegments_Call) RunAndReturn(run func(context.Context, ...SegmentFilter) []*SegmentInfo) *MockCompactionMeta_SelectSegments_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSegmentsCompacting provides a mock function with given fields: segmentID, compacting
|
||||
func (_m *MockCompactionMeta) SetSegmentsCompacting(segmentID []int64, compacting bool) {
|
||||
_m.Called(segmentID, compacting)
|
||||
// SetSegmentsCompacting provides a mock function with given fields: ctx, segmentID, compacting
|
||||
func (_m *MockCompactionMeta) SetSegmentsCompacting(ctx context.Context, segmentID []int64, compacting bool) {
|
||||
_m.Called(ctx, segmentID, compacting)
|
||||
}
|
||||
|
||||
// MockCompactionMeta_SetSegmentsCompacting_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetSegmentsCompacting'
|
||||
|
@ -733,15 +746,16 @@ type MockCompactionMeta_SetSegmentsCompacting_Call struct {
|
|||
}
|
||||
|
||||
// SetSegmentsCompacting is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - segmentID []int64
|
||||
// - compacting bool
|
||||
func (_e *MockCompactionMeta_Expecter) SetSegmentsCompacting(segmentID interface{}, compacting interface{}) *MockCompactionMeta_SetSegmentsCompacting_Call {
|
||||
return &MockCompactionMeta_SetSegmentsCompacting_Call{Call: _e.mock.On("SetSegmentsCompacting", segmentID, compacting)}
|
||||
func (_e *MockCompactionMeta_Expecter) SetSegmentsCompacting(ctx interface{}, segmentID interface{}, compacting interface{}) *MockCompactionMeta_SetSegmentsCompacting_Call {
|
||||
return &MockCompactionMeta_SetSegmentsCompacting_Call{Call: _e.mock.On("SetSegmentsCompacting", ctx, segmentID, compacting)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_SetSegmentsCompacting_Call) Run(run func(segmentID []int64, compacting bool)) *MockCompactionMeta_SetSegmentsCompacting_Call {
|
||||
func (_c *MockCompactionMeta_SetSegmentsCompacting_Call) Run(run func(ctx context.Context, segmentID []int64, compacting bool)) *MockCompactionMeta_SetSegmentsCompacting_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].([]int64), args[1].(bool))
|
||||
run(args[0].(context.Context), args[1].([]int64), args[2].(bool))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -751,18 +765,19 @@ func (_c *MockCompactionMeta_SetSegmentsCompacting_Call) Return() *MockCompactio
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_SetSegmentsCompacting_Call) RunAndReturn(run func([]int64, bool)) *MockCompactionMeta_SetSegmentsCompacting_Call {
|
||||
func (_c *MockCompactionMeta_SetSegmentsCompacting_Call) RunAndReturn(run func(context.Context, []int64, bool)) *MockCompactionMeta_SetSegmentsCompacting_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateSegmentsInfo provides a mock function with given fields: operators
|
||||
func (_m *MockCompactionMeta) UpdateSegmentsInfo(operators ...UpdateOperator) error {
|
||||
// UpdateSegmentsInfo provides a mock function with given fields: ctx, operators
|
||||
func (_m *MockCompactionMeta) UpdateSegmentsInfo(ctx context.Context, operators ...UpdateOperator) error {
|
||||
_va := make([]interface{}, len(operators))
|
||||
for _i := range operators {
|
||||
_va[_i] = operators[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
|
@ -771,8 +786,8 @@ func (_m *MockCompactionMeta) UpdateSegmentsInfo(operators ...UpdateOperator) er
|
|||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(...UpdateOperator) error); ok {
|
||||
r0 = rf(operators...)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, ...UpdateOperator) error); ok {
|
||||
r0 = rf(ctx, operators...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -786,21 +801,22 @@ type MockCompactionMeta_UpdateSegmentsInfo_Call struct {
|
|||
}
|
||||
|
||||
// UpdateSegmentsInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - operators ...UpdateOperator
|
||||
func (_e *MockCompactionMeta_Expecter) UpdateSegmentsInfo(operators ...interface{}) *MockCompactionMeta_UpdateSegmentsInfo_Call {
|
||||
func (_e *MockCompactionMeta_Expecter) UpdateSegmentsInfo(ctx interface{}, operators ...interface{}) *MockCompactionMeta_UpdateSegmentsInfo_Call {
|
||||
return &MockCompactionMeta_UpdateSegmentsInfo_Call{Call: _e.mock.On("UpdateSegmentsInfo",
|
||||
append([]interface{}{}, operators...)...)}
|
||||
append([]interface{}{ctx}, operators...)...)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_UpdateSegmentsInfo_Call) Run(run func(operators ...UpdateOperator)) *MockCompactionMeta_UpdateSegmentsInfo_Call {
|
||||
func (_c *MockCompactionMeta_UpdateSegmentsInfo_Call) Run(run func(ctx context.Context, operators ...UpdateOperator)) *MockCompactionMeta_UpdateSegmentsInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
variadicArgs := make([]UpdateOperator, len(args)-0)
|
||||
for i, a := range args[0:] {
|
||||
variadicArgs := make([]UpdateOperator, len(args)-1)
|
||||
for i, a := range args[1:] {
|
||||
if a != nil {
|
||||
variadicArgs[i] = a.(UpdateOperator)
|
||||
}
|
||||
}
|
||||
run(variadicArgs...)
|
||||
run(args[0].(context.Context), variadicArgs...)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -810,7 +826,7 @@ func (_c *MockCompactionMeta_UpdateSegmentsInfo_Call) Return(_a0 error) *MockCom
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionMeta_UpdateSegmentsInfo_Call) RunAndReturn(run func(...UpdateOperator) error) *MockCompactionMeta_UpdateSegmentsInfo_Call {
|
||||
func (_c *MockCompactionMeta_UpdateSegmentsInfo_Call) RunAndReturn(run func(context.Context, ...UpdateOperator) error) *MockCompactionMeta_UpdateSegmentsInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
package datacoord
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
datapb "github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
@ -66,17 +68,17 @@ func (_c *MockCompactionPlanContext_enqueueCompaction_Call) RunAndReturn(run fun
|
|||
return _c
|
||||
}
|
||||
|
||||
// getCompactionInfo provides a mock function with given fields: signalID
|
||||
func (_m *MockCompactionPlanContext) getCompactionInfo(signalID int64) *compactionInfo {
|
||||
ret := _m.Called(signalID)
|
||||
// getCompactionInfo provides a mock function with given fields: ctx, signalID
|
||||
func (_m *MockCompactionPlanContext) getCompactionInfo(ctx context.Context, signalID int64) *compactionInfo {
|
||||
ret := _m.Called(ctx, signalID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for getCompactionInfo")
|
||||
}
|
||||
|
||||
var r0 *compactionInfo
|
||||
if rf, ok := ret.Get(0).(func(int64) *compactionInfo); ok {
|
||||
r0 = rf(signalID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) *compactionInfo); ok {
|
||||
r0 = rf(ctx, signalID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*compactionInfo)
|
||||
|
@ -92,14 +94,15 @@ type MockCompactionPlanContext_getCompactionInfo_Call struct {
|
|||
}
|
||||
|
||||
// getCompactionInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - signalID int64
|
||||
func (_e *MockCompactionPlanContext_Expecter) getCompactionInfo(signalID interface{}) *MockCompactionPlanContext_getCompactionInfo_Call {
|
||||
return &MockCompactionPlanContext_getCompactionInfo_Call{Call: _e.mock.On("getCompactionInfo", signalID)}
|
||||
func (_e *MockCompactionPlanContext_Expecter) getCompactionInfo(ctx interface{}, signalID interface{}) *MockCompactionPlanContext_getCompactionInfo_Call {
|
||||
return &MockCompactionPlanContext_getCompactionInfo_Call{Call: _e.mock.On("getCompactionInfo", ctx, signalID)}
|
||||
}
|
||||
|
||||
func (_c *MockCompactionPlanContext_getCompactionInfo_Call) Run(run func(signalID int64)) *MockCompactionPlanContext_getCompactionInfo_Call {
|
||||
func (_c *MockCompactionPlanContext_getCompactionInfo_Call) Run(run func(ctx context.Context, signalID int64)) *MockCompactionPlanContext_getCompactionInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -109,7 +112,7 @@ func (_c *MockCompactionPlanContext_getCompactionInfo_Call) Return(_a0 *compacti
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockCompactionPlanContext_getCompactionInfo_Call) RunAndReturn(run func(int64) *compactionInfo) *MockCompactionPlanContext_getCompactionInfo_Call {
|
||||
func (_c *MockCompactionPlanContext_getCompactionInfo_Call) RunAndReturn(run func(context.Context, int64) *compactionInfo) *MockCompactionPlanContext_getCompactionInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
|
|
@ -124,10 +124,10 @@ func (psm *partitionStatsMeta) SavePartitionStatsInfo(info *datapb.PartitionStat
|
|||
return nil
|
||||
}
|
||||
|
||||
func (psm *partitionStatsMeta) DropPartitionStatsInfo(info *datapb.PartitionStatsInfo) error {
|
||||
func (psm *partitionStatsMeta) DropPartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error {
|
||||
psm.Lock()
|
||||
defer psm.Unlock()
|
||||
if err := psm.catalog.DropPartitionStatsInfo(psm.ctx, info); err != nil {
|
||||
if err := psm.catalog.DropPartitionStatsInfo(ctx, info); err != nil {
|
||||
log.Error("meta update: drop PartitionStatsInfo info fail",
|
||||
zap.Int64("collectionID", info.GetCollectionID()),
|
||||
zap.Int64("partitionID", info.GetPartitionID()),
|
||||
|
|
|
@ -87,7 +87,7 @@ type Manager interface {
|
|||
// GetFlushableSegments returns flushable segment ids
|
||||
GetFlushableSegments(ctx context.Context, channel string, ts Timestamp) ([]UniqueID, error)
|
||||
// ExpireAllocations notifies segment status to expire old allocations
|
||||
ExpireAllocations(channel string, ts Timestamp) error
|
||||
ExpireAllocations(ctx context.Context, channel string, ts Timestamp) error
|
||||
// DropSegmentsOfChannel drops all segments in a channel
|
||||
DropSegmentsOfChannel(ctx context.Context, channel string)
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ func (s *SegmentManager) maybeResetLastExpireForSegments() error {
|
|||
return errors.New("global max expire ts is unavailable for segment manager")
|
||||
}
|
||||
for _, sID := range s.segments {
|
||||
if segment := s.meta.GetSegment(sID); segment != nil && segment.GetState() == commonpb.SegmentState_Growing {
|
||||
if segment := s.meta.GetSegment(context.TODO(), sID); segment != nil && segment.GetState() == commonpb.SegmentState_Growing {
|
||||
s.meta.SetLastExpire(sID, latestTs)
|
||||
}
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func (s *SegmentManager) AllocSegment(ctx context.Context, collectionID UniqueID
|
|||
invalidSegments := make(map[UniqueID]struct{})
|
||||
segments := make([]*SegmentInfo, 0)
|
||||
for _, segmentID := range s.segments {
|
||||
segment := s.meta.GetHealthySegment(segmentID)
|
||||
segment := s.meta.GetHealthySegment(context.TODO(), segmentID)
|
||||
if segment == nil {
|
||||
invalidSegments[segmentID] = struct{}{}
|
||||
continue
|
||||
|
@ -435,7 +435,7 @@ func (s *SegmentManager) DropSegment(ctx context.Context, segmentID UniqueID) {
|
|||
break
|
||||
}
|
||||
}
|
||||
segment := s.meta.GetHealthySegment(segmentID)
|
||||
segment := s.meta.GetHealthySegment(context.TODO(), segmentID)
|
||||
if segment == nil {
|
||||
log.Warn("Failed to get segment", zap.Int64("id", segmentID))
|
||||
return
|
||||
|
@ -468,7 +468,7 @@ func (s *SegmentManager) SealAllSegments(ctx context.Context, collectionID Uniqu
|
|||
ret = append(ret, sealedSegments...)
|
||||
|
||||
for _, id := range growingSegments {
|
||||
if err := s.meta.SetState(id, commonpb.SegmentState_Sealed); err != nil {
|
||||
if err := s.meta.SetState(ctx, id, commonpb.SegmentState_Sealed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = append(ret, id)
|
||||
|
@ -483,15 +483,15 @@ func (s *SegmentManager) GetFlushableSegments(ctx context.Context, channel strin
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
// TODO:move tryToSealSegment and dropEmptySealedSegment outside
|
||||
if err := s.tryToSealSegment(t, channel); err != nil {
|
||||
if err := s.tryToSealSegment(ctx, t, channel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.cleanupSealedSegment(t, channel)
|
||||
s.cleanupSealedSegment(ctx, t, channel)
|
||||
|
||||
ret := make([]UniqueID, 0, len(s.segments))
|
||||
for _, id := range s.segments {
|
||||
info := s.meta.GetHealthySegment(id)
|
||||
info := s.meta.GetHealthySegment(ctx, id)
|
||||
if info == nil || info.InsertChannel != channel {
|
||||
continue
|
||||
}
|
||||
|
@ -504,11 +504,11 @@ func (s *SegmentManager) GetFlushableSegments(ctx context.Context, channel strin
|
|||
}
|
||||
|
||||
// ExpireAllocations notify segment status to expire old allocations
|
||||
func (s *SegmentManager) ExpireAllocations(channel string, ts Timestamp) error {
|
||||
func (s *SegmentManager) ExpireAllocations(ctx context.Context, channel string, ts Timestamp) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, id := range s.segments {
|
||||
segment := s.meta.GetHealthySegment(id)
|
||||
segment := s.meta.GetHealthySegment(ctx, id)
|
||||
if segment == nil || segment.InsertChannel != channel {
|
||||
continue
|
||||
}
|
||||
|
@ -526,10 +526,10 @@ func (s *SegmentManager) ExpireAllocations(channel string, ts Timestamp) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *SegmentManager) cleanupSealedSegment(ts Timestamp, channel string) {
|
||||
func (s *SegmentManager) cleanupSealedSegment(ctx context.Context, ts Timestamp, channel string) {
|
||||
valids := make([]int64, 0, len(s.segments))
|
||||
for _, id := range s.segments {
|
||||
segment := s.meta.GetHealthySegment(id)
|
||||
segment := s.meta.GetHealthySegment(ctx, id)
|
||||
if segment == nil || segment.InsertChannel != channel {
|
||||
valids = append(valids, id)
|
||||
continue
|
||||
|
@ -537,7 +537,7 @@ func (s *SegmentManager) cleanupSealedSegment(ts Timestamp, channel string) {
|
|||
|
||||
if isEmptySealedSegment(segment, ts) {
|
||||
log.Info("remove empty sealed segment", zap.Int64("collection", segment.CollectionID), zap.Int64("segment", id))
|
||||
s.meta.SetState(id, commonpb.SegmentState_Dropped)
|
||||
s.meta.SetState(ctx, id, commonpb.SegmentState_Dropped)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -551,11 +551,11 @@ func isEmptySealedSegment(segment *SegmentInfo, ts Timestamp) bool {
|
|||
}
|
||||
|
||||
// tryToSealSegment applies segment & channel seal policies
|
||||
func (s *SegmentManager) tryToSealSegment(ts Timestamp, channel string) error {
|
||||
func (s *SegmentManager) tryToSealSegment(ctx context.Context, ts Timestamp, channel string) error {
|
||||
channelInfo := make(map[string][]*SegmentInfo)
|
||||
sealedSegments := make(map[int64]struct{})
|
||||
for _, id := range s.segments {
|
||||
info := s.meta.GetHealthySegment(id)
|
||||
info := s.meta.GetHealthySegment(ctx, id)
|
||||
if info == nil || info.InsertChannel != channel {
|
||||
continue
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ func (s *SegmentManager) tryToSealSegment(ts Timestamp, channel string) error {
|
|||
for _, policy := range s.segmentSealPolicies {
|
||||
if shouldSeal, reason := policy.ShouldSeal(info, ts); shouldSeal {
|
||||
log.Info("Seal Segment for policy matched", zap.Int64("segmentID", info.GetID()), zap.String("reason", reason))
|
||||
if err := s.meta.SetState(id, commonpb.SegmentState_Sealed); err != nil {
|
||||
if err := s.meta.SetState(ctx, id, commonpb.SegmentState_Sealed); err != nil {
|
||||
return err
|
||||
}
|
||||
sealedSegments[id] = struct{}{}
|
||||
|
@ -585,7 +585,7 @@ func (s *SegmentManager) tryToSealSegment(ts Timestamp, channel string) error {
|
|||
if info.State != commonpb.SegmentState_Growing {
|
||||
continue
|
||||
}
|
||||
if err := s.meta.SetState(info.GetID(), commonpb.SegmentState_Sealed); err != nil {
|
||||
if err := s.meta.SetState(ctx, info.GetID(), commonpb.SegmentState_Sealed); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("seal segment for channel seal policy matched",
|
||||
|
@ -604,7 +604,7 @@ func (s *SegmentManager) DropSegmentsOfChannel(ctx context.Context, channel stri
|
|||
|
||||
validSegments := make([]int64, 0, len(s.segments))
|
||||
for _, sid := range s.segments {
|
||||
segment := s.meta.GetHealthySegment(sid)
|
||||
segment := s.meta.GetHealthySegment(ctx, sid)
|
||||
if segment == nil {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ func TestAllocSegment(t *testing.T) {
|
|||
assert.EqualValues(t, 1, len(allocations1))
|
||||
assert.EqualValues(t, 1, len(segmentManager.segments))
|
||||
|
||||
err = meta.SetState(allocations1[0].SegmentID, commonpb.SegmentState_Dropped)
|
||||
err = meta.SetState(context.TODO(), allocations1[0].SegmentID, commonpb.SegmentState_Dropped)
|
||||
assert.NoError(t, err)
|
||||
|
||||
allocations2, err := segmentManager.AllocSegment(ctx, collID, 100, "c1", 100)
|
||||
|
@ -220,10 +220,10 @@ func TestLastExpireReset(t *testing.T) {
|
|||
meta.SetCurrentRows(segmentID1, bigRows)
|
||||
meta.SetCurrentRows(segmentID2, bigRows)
|
||||
meta.SetCurrentRows(segmentID3, smallRows)
|
||||
segmentManager.tryToSealSegment(expire1, channelName)
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, meta.GetSegment(segmentID1).GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, meta.GetSegment(segmentID2).GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Growing, meta.GetSegment(segmentID3).GetState())
|
||||
segmentManager.tryToSealSegment(context.TODO(), expire1, channelName)
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, meta.GetSegment(context.TODO(), segmentID1).GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, meta.GetSegment(context.TODO(), segmentID2).GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Growing, meta.GetSegment(context.TODO(), segmentID3).GetState())
|
||||
|
||||
// pretend that dataCoord break down
|
||||
metaKV.Close()
|
||||
|
@ -246,7 +246,7 @@ func TestLastExpireReset(t *testing.T) {
|
|||
restartedMeta.SetCurrentRows(segmentID3, smallRows)
|
||||
|
||||
// verify lastExpire of growing and sealed segments
|
||||
segment1, segment2, segment3 := restartedMeta.GetSegment(segmentID1), restartedMeta.GetSegment(segmentID2), restartedMeta.GetSegment(segmentID3)
|
||||
segment1, segment2, segment3 := restartedMeta.GetSegment(context.TODO(), segmentID1), restartedMeta.GetSegment(context.TODO(), segmentID2), restartedMeta.GetSegment(context.TODO(), segmentID3)
|
||||
// segmentState should not be altered but growing segment's lastExpire has been reset to the latest
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, segment1.GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, segment2.GetState())
|
||||
|
@ -328,7 +328,7 @@ func TestSaveSegmentsToMeta(t *testing.T) {
|
|||
assert.EqualValues(t, 1, len(allocations))
|
||||
_, err = segmentManager.SealAllSegments(context.Background(), collID, nil)
|
||||
assert.NoError(t, err)
|
||||
segment := meta.GetHealthySegment(allocations[0].SegmentID)
|
||||
segment := meta.GetHealthySegment(context.TODO(), allocations[0].SegmentID)
|
||||
assert.NotNil(t, segment)
|
||||
assert.EqualValues(t, segment.LastExpireTime, allocations[0].ExpireTime)
|
||||
assert.EqualValues(t, commonpb.SegmentState_Sealed, segment.State)
|
||||
|
@ -350,7 +350,7 @@ func TestSaveSegmentsToMetaWithSpecificSegments(t *testing.T) {
|
|||
assert.EqualValues(t, 1, len(allocations))
|
||||
_, err = segmentManager.SealAllSegments(context.Background(), collID, []int64{allocations[0].SegmentID})
|
||||
assert.NoError(t, err)
|
||||
segment := meta.GetHealthySegment(allocations[0].SegmentID)
|
||||
segment := meta.GetHealthySegment(context.TODO(), allocations[0].SegmentID)
|
||||
assert.NotNil(t, segment)
|
||||
assert.EqualValues(t, segment.LastExpireTime, allocations[0].ExpireTime)
|
||||
assert.EqualValues(t, commonpb.SegmentState_Sealed, segment.State)
|
||||
|
@ -371,11 +371,11 @@ func TestDropSegment(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 1, len(allocations))
|
||||
segID := allocations[0].SegmentID
|
||||
segment := meta.GetHealthySegment(segID)
|
||||
segment := meta.GetHealthySegment(context.TODO(), segID)
|
||||
assert.NotNil(t, segment)
|
||||
|
||||
segmentManager.DropSegment(context.Background(), segID)
|
||||
segment = meta.GetHealthySegment(segID)
|
||||
segment = meta.GetHealthySegment(context.TODO(), segID)
|
||||
assert.NotNil(t, segment)
|
||||
}
|
||||
|
||||
|
@ -433,12 +433,12 @@ func TestExpireAllocation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
segment := meta.GetHealthySegment(id)
|
||||
segment := meta.GetHealthySegment(context.TODO(), id)
|
||||
assert.NotNil(t, segment)
|
||||
assert.EqualValues(t, 100, len(segment.allocations))
|
||||
err = segmentManager.ExpireAllocations("ch1", maxts)
|
||||
err = segmentManager.ExpireAllocations(context.TODO(), "ch1", maxts)
|
||||
assert.NoError(t, err)
|
||||
segment = meta.GetHealthySegment(id)
|
||||
segment = meta.GetHealthySegment(context.TODO(), id)
|
||||
assert.NotNil(t, segment)
|
||||
assert.EqualValues(t, 0, len(segment.allocations))
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ func TestGetFlushableSegments(t *testing.T) {
|
|||
ids, err = segmentManager.GetFlushableSegments(context.TODO(), "c1", allocations[0].ExpireTime)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, ids)
|
||||
assert.Nil(t, meta.GetHealthySegment(allocations[0].SegmentID))
|
||||
assert.Nil(t, meta.GetHealthySegment(context.TODO(), allocations[0].SegmentID))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -507,7 +507,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
|
||||
ts, err := segmentManager.allocator.AllocTimestamp(context.Background())
|
||||
assert.NoError(t, err)
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, seg := range segmentManager.meta.segments.segments {
|
||||
|
@ -532,7 +532,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
|
||||
ts, err := segmentManager.allocator.AllocTimestamp(context.Background())
|
||||
assert.NoError(t, err)
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, seg := range segmentManager.meta.segments.segments {
|
||||
|
@ -559,7 +559,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
|
||||
ts, err := segmentManager.allocator.AllocTimestamp(context.Background())
|
||||
assert.NoError(t, err)
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, seg := range segmentManager.meta.segments.segments {
|
||||
|
@ -587,7 +587,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
|
||||
// No seal polices
|
||||
{
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.NoError(t, err)
|
||||
segments := segmentManager.meta.segments.segments
|
||||
assert.Equal(t, 1, len(segments))
|
||||
|
@ -613,7 +613,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.NoError(t, err)
|
||||
seg = segmentManager.meta.segments.segments[seg.ID]
|
||||
assert.Equal(t, commonpb.SegmentState_Growing, seg.GetState())
|
||||
|
@ -641,7 +641,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.NoError(t, err)
|
||||
seg = segmentManager.meta.segments.segments[seg.ID]
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, seg.GetState())
|
||||
|
@ -674,7 +674,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
|
||||
ts, err := segmentManager.allocator.AllocTimestamp(context.Background())
|
||||
assert.NoError(t, err)
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -703,7 +703,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
|
||||
ts, err := segmentManager.allocator.AllocTimestamp(context.Background())
|
||||
assert.NoError(t, err)
|
||||
err = segmentManager.tryToSealSegment(ts, "c1")
|
||||
err = segmentManager.tryToSealSegment(context.TODO(), ts, "c1")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -408,7 +408,7 @@ func (s *Server) initDataCoord() error {
|
|||
|
||||
s.initGarbageCollection(storageCli)
|
||||
|
||||
s.importMeta, err = NewImportMeta(s.meta.catalog)
|
||||
s.importMeta, err = NewImportMeta(s.ctx, s.meta.catalog)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -765,9 +765,9 @@ func (s *Server) startTaskScheduler() {
|
|||
s.startIndexService(s.serverLoopCtx)
|
||||
}
|
||||
|
||||
func (s *Server) updateSegmentStatistics(stats []*commonpb.SegmentStats) {
|
||||
func (s *Server) updateSegmentStatistics(ctx context.Context, stats []*commonpb.SegmentStats) {
|
||||
for _, stat := range stats {
|
||||
segment := s.meta.GetSegment(stat.GetSegmentID())
|
||||
segment := s.meta.GetSegment(ctx, stat.GetSegmentID())
|
||||
if segment == nil {
|
||||
log.Warn("skip updating row number for not exist segment",
|
||||
zap.Int64("segmentID", stat.GetSegmentID()),
|
||||
|
@ -786,7 +786,7 @@ func (s *Server) updateSegmentStatistics(stats []*commonpb.SegmentStats) {
|
|||
if segment.currRows < stat.GetNumRows() {
|
||||
log.Debug("Updating segment number of rows",
|
||||
zap.Int64("segmentID", stat.GetSegmentID()),
|
||||
zap.Int64("old value", s.meta.GetSegment(stat.GetSegmentID()).GetNumOfRows()),
|
||||
zap.Int64("old value", s.meta.GetSegment(ctx, stat.GetSegmentID()).GetNumOfRows()),
|
||||
zap.Int64("new value", stat.GetNumRows()),
|
||||
)
|
||||
s.meta.SetCurrentRows(stat.GetSegmentID(), stat.GetNumRows())
|
||||
|
@ -794,10 +794,10 @@ func (s *Server) updateSegmentStatistics(stats []*commonpb.SegmentStats) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) getFlushableSegmentsInfo(flushableIDs []int64) []*SegmentInfo {
|
||||
func (s *Server) getFlushableSegmentsInfo(ctx context.Context, flushableIDs []int64) []*SegmentInfo {
|
||||
res := make([]*SegmentInfo, 0, len(flushableIDs))
|
||||
for _, id := range flushableIDs {
|
||||
sinfo := s.meta.GetHealthySegment(id)
|
||||
sinfo := s.meta.GetHealthySegment(ctx, id)
|
||||
if sinfo == nil {
|
||||
log.Error("get segment from meta error", zap.Int64("id", id))
|
||||
continue
|
||||
|
@ -1006,7 +1006,7 @@ func (s *Server) startFlushLoop(ctx context.Context) {
|
|||
// 3. change segment state to `Flushed` in meta
|
||||
func (s *Server) postFlush(ctx context.Context, segmentID UniqueID) error {
|
||||
log := log.Ctx(ctx)
|
||||
segment := s.meta.GetHealthySegment(segmentID)
|
||||
segment := s.meta.GetHealthySegment(ctx, segmentID)
|
||||
if segment == nil {
|
||||
return merr.WrapErrSegmentNotFound(segmentID, "segment not found, might be a faked segment, ignore post flush")
|
||||
}
|
||||
|
@ -1014,7 +1014,7 @@ func (s *Server) postFlush(ctx context.Context, segmentID UniqueID) error {
|
|||
var operators []UpdateOperator
|
||||
operators = append(operators, SetSegmentIsInvisible(segmentID, true))
|
||||
operators = append(operators, UpdateStatusOperator(segmentID, commonpb.SegmentState_Flushed))
|
||||
err := s.meta.UpdateSegmentsInfo(operators...)
|
||||
err := s.meta.UpdateSegmentsInfo(ctx, operators...)
|
||||
if err != nil {
|
||||
log.Warn("flush segment complete failed", zap.Error(err))
|
||||
return err
|
||||
|
@ -1152,7 +1152,7 @@ func (s *Server) registerMetricsRequest() {
|
|||
|
||||
s.metricsRequest.RegisterMetricsRequest(metricsinfo.ImportTaskKey,
|
||||
func(ctx context.Context, req *milvuspb.GetMetricsRequest, jsonReq gjson.Result) (string, error) {
|
||||
return s.importMeta.TaskStatsJSON(), nil
|
||||
return s.importMeta.TaskStatsJSON(ctx), nil
|
||||
})
|
||||
|
||||
s.metricsRequest.RegisterMetricsRequest(metricsinfo.CompactionTaskKey,
|
||||
|
|
|
@ -431,7 +431,7 @@ func TestGetSegmentInfo(t *testing.T) {
|
|||
assert.Equal(t, 0, len(resp.GetChannelCheckpoint()))
|
||||
|
||||
// with nil insert channel of segment
|
||||
err = svr.meta.UpdateChannelCheckpoint(mockVChannel, pos)
|
||||
err = svr.meta.UpdateChannelCheckpoint(context.TODO(), mockVChannel, pos)
|
||||
assert.NoError(t, err)
|
||||
resp, err = svr.GetSegmentInfo(svr.ctx, req)
|
||||
assert.NoError(t, err)
|
||||
|
@ -856,7 +856,7 @@ func (s *spySegmentManager) GetFlushableSegments(ctx context.Context, channel st
|
|||
}
|
||||
|
||||
// ExpireAllocations notifies segment status to expire old allocations
|
||||
func (s *spySegmentManager) ExpireAllocations(channel string, ts Timestamp) error {
|
||||
func (s *spySegmentManager) ExpireAllocations(ctx context.Context, channel string, ts Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1117,7 +1117,7 @@ func TestGetChannelSeekPosition(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
if test.channelCP != nil {
|
||||
err := svr.meta.UpdateChannelCheckpoint(test.channelCP.ChannelName, test.channelCP)
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), test.channelCP.ChannelName, test.channelCP)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -1197,14 +1197,14 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 10,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err = svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -1253,7 +1253,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: seg1.ID,
|
||||
BuildID: seg1.ID,
|
||||
})
|
||||
|
@ -1263,7 +1263,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
State: commonpb.IndexState_Finished,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: seg2.ID,
|
||||
BuildID: seg2.ID,
|
||||
})
|
||||
|
@ -1307,7 +1307,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1427,7 +1427,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
err := svr.meta.AddSegment(context.TODO(), NewSegmentInfo(segment))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err = svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -1435,7 +1435,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
IndexName: "",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: segment.ID,
|
||||
BuildID: segment.ID,
|
||||
})
|
||||
|
@ -1481,7 +1481,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1527,7 +1527,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1568,7 +1568,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1592,7 +1592,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg5))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err = svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -1654,7 +1654,7 @@ func TestGetCompactionState(t *testing.T) {
|
|||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
|
||||
mockHandler := NewMockCompactionPlanContext(t)
|
||||
mockHandler.EXPECT().getCompactionInfo(mock.Anything).Return(&compactionInfo{
|
||||
mockHandler.EXPECT().getCompactionInfo(mock.Anything, mock.Anything).Return(&compactionInfo{
|
||||
state: commonpb.CompactionState_Completed,
|
||||
})
|
||||
svr.compactionHandler = mockHandler
|
||||
|
@ -1667,7 +1667,7 @@ func TestGetCompactionState(t *testing.T) {
|
|||
svr := &Server{}
|
||||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
mockMeta := NewMockCompactionMeta(t)
|
||||
mockMeta.EXPECT().GetCompactionTasksByTriggerID(mock.Anything).Return(
|
||||
mockMeta.EXPECT().GetCompactionTasksByTriggerID(mock.Anything, mock.Anything).Return(
|
||||
[]*datapb.CompactionTask{
|
||||
{State: datapb.CompactionTaskState_executing},
|
||||
{State: datapb.CompactionTaskState_executing},
|
||||
|
@ -1771,7 +1771,7 @@ func TestGetCompactionStateWithPlans(t *testing.T) {
|
|||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
|
||||
mockHandler := NewMockCompactionPlanContext(t)
|
||||
mockHandler.EXPECT().getCompactionInfo(mock.Anything).Return(&compactionInfo{
|
||||
mockHandler.EXPECT().getCompactionInfo(mock.Anything, mock.Anything).Return(&compactionInfo{
|
||||
state: commonpb.CompactionState_Executing,
|
||||
executingCnt: 1,
|
||||
})
|
||||
|
@ -2271,7 +2271,7 @@ func TestDataCoord_SegmentStatistics(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, svr.meta.GetHealthySegment(100).currRows, int64(1))
|
||||
assert.Equal(t, svr.meta.GetHealthySegment(context.TODO(), 100).currRows, int64(1))
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, status.GetErrorCode())
|
||||
closeTestServer(t, svr)
|
||||
})
|
||||
|
@ -2298,7 +2298,7 @@ func TestDataCoord_SegmentStatistics(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, svr.meta.GetHealthySegment(100).currRows, int64(0))
|
||||
assert.Equal(t, svr.meta.GetHealthySegment(context.TODO(), 100).currRows, int64(0))
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, status.GetErrorCode())
|
||||
closeTestServer(t, svr)
|
||||
})
|
||||
|
|
|
@ -128,7 +128,7 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
|
|||
sealedSegmentsIDDict[sealedSegmentID] = true
|
||||
}
|
||||
|
||||
segments := s.meta.GetSegmentsOfCollection(req.GetCollectionID())
|
||||
segments := s.meta.GetSegmentsOfCollection(ctx, req.GetCollectionID())
|
||||
flushSegmentIDs := make([]UniqueID, 0, len(segments))
|
||||
for _, segment := range segments {
|
||||
if segment != nil &&
|
||||
|
@ -295,7 +295,7 @@ func (s *Server) GetSegmentStates(ctx context.Context, req *datapb.GetSegmentSta
|
|||
state := &datapb.SegmentStateInfo{
|
||||
SegmentID: segmentID,
|
||||
}
|
||||
segmentInfo := s.meta.GetHealthySegment(segmentID)
|
||||
segmentInfo := s.meta.GetHealthySegment(ctx, segmentID)
|
||||
if segmentInfo == nil {
|
||||
state.State = commonpb.SegmentState_NotExist
|
||||
} else {
|
||||
|
@ -316,7 +316,7 @@ func (s *Server) GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsert
|
|||
}, nil
|
||||
}
|
||||
|
||||
segment := s.meta.GetHealthySegment(req.GetSegmentID())
|
||||
segment := s.meta.GetHealthySegment(ctx, req.GetSegmentID())
|
||||
if segment == nil {
|
||||
return &datapb.GetInsertBinlogPathsResponse{
|
||||
Status: merr.Status(merr.WrapErrSegmentNotFound(req.GetSegmentID())),
|
||||
|
@ -394,7 +394,7 @@ func (s *Server) GetPartitionStatistics(ctx context.Context, req *datapb.GetPart
|
|||
nums = s.meta.GetNumRowsOfCollection(req.CollectionID)
|
||||
}
|
||||
for _, partID := range req.GetPartitionIDs() {
|
||||
num := s.meta.GetNumRowsOfPartition(req.CollectionID, partID)
|
||||
num := s.meta.GetNumRowsOfPartition(ctx, req.CollectionID, partID)
|
||||
nums += num
|
||||
}
|
||||
resp.Stats = append(resp.Stats, &commonpb.KeyValuePair{Key: "row_count", Value: strconv.FormatInt(nums, 10)})
|
||||
|
@ -427,7 +427,7 @@ func (s *Server) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoR
|
|||
for _, id := range req.SegmentIDs {
|
||||
var info *SegmentInfo
|
||||
if req.IncludeUnHealthy {
|
||||
info = s.meta.GetSegment(id)
|
||||
info = s.meta.GetSegment(ctx, id)
|
||||
// TODO: GetCompactionTo should be removed and add into GetSegment method and protected by lock.
|
||||
// Too much modification need to be applied to SegmentInfo, a refactor is needed.
|
||||
children, ok := s.meta.GetCompactionTo(id)
|
||||
|
@ -451,7 +451,7 @@ func (s *Server) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoR
|
|||
segmentutil.ReCalcRowCount(info.SegmentInfo, clonedInfo.SegmentInfo)
|
||||
infos = append(infos, clonedInfo.SegmentInfo)
|
||||
} else {
|
||||
info = s.meta.GetHealthySegment(id)
|
||||
info = s.meta.GetHealthySegment(ctx, id)
|
||||
if info == nil {
|
||||
err := merr.WrapErrSegmentNotFound(id)
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -518,7 +518,7 @@ func (s *Server) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
|
|||
if req.GetSegLevel() == datapb.SegmentLevel_L0 {
|
||||
operators = append(operators, CreateL0Operator(req.GetCollectionID(), req.GetPartitionID(), req.GetSegmentID(), req.GetChannel()))
|
||||
} else {
|
||||
segment := s.meta.GetSegment(req.GetSegmentID())
|
||||
segment := s.meta.GetSegment(ctx, req.GetSegmentID())
|
||||
// validate level one segment
|
||||
if segment == nil {
|
||||
err := merr.WrapErrSegmentNotFound(req.GetSegmentID())
|
||||
|
@ -558,7 +558,7 @@ func (s *Server) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
|
|||
)
|
||||
|
||||
// Update segment info in memory and meta.
|
||||
if err := s.meta.UpdateSegmentsInfo(operators...); err != nil {
|
||||
if err := s.meta.UpdateSegmentsInfo(ctx, operators...); err != nil {
|
||||
log.Error("save binlog and checkpoints failed", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
@ -632,7 +632,7 @@ func (s *Server) DropVirtualChannel(ctx context.Context, req *datapb.DropVirtual
|
|||
segments = append(segments, segment)
|
||||
}
|
||||
|
||||
err := s.meta.UpdateDropChannelSegmentInfo(channel, segments)
|
||||
err := s.meta.UpdateDropChannelSegmentInfo(ctx, channel, segments)
|
||||
if err != nil {
|
||||
log.Error("Update Drop Channel segment info failed", zap.String("channel", channel), zap.Error(err))
|
||||
resp.Status = merr.Status(err)
|
||||
|
@ -665,7 +665,7 @@ func (s *Server) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStat
|
|||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
err := s.meta.SetState(req.GetSegmentId(), req.GetNewState())
|
||||
err := s.meta.SetState(ctx, req.GetSegmentId(), req.GetNewState())
|
||||
if err != nil {
|
||||
log.Error("failed to updated segment state in dataCoord meta",
|
||||
zap.Int64("segmentID", req.SegmentId),
|
||||
|
@ -758,7 +758,7 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
|
|||
segmentsNumOfRows := make(map[UniqueID]int64)
|
||||
segment2TextStatsLogs := make(map[UniqueID]map[UniqueID]*datapb.TextIndexStats)
|
||||
for id := range flushedIDs {
|
||||
segment := s.meta.GetSegment(id)
|
||||
segment := s.meta.GetSegment(ctx, id)
|
||||
if segment == nil {
|
||||
err := merr.WrapErrSegmentNotFound(id)
|
||||
log.Warn("failed to get segment", zap.Int64("segmentID", id))
|
||||
|
@ -882,7 +882,7 @@ func (s *Server) GetRecoveryInfoV2(ctx context.Context, req *datapb.GetRecoveryI
|
|||
|
||||
segmentInfos := make([]*datapb.SegmentInfo, 0)
|
||||
for id := range flushedIDs {
|
||||
segment := s.meta.GetSegment(id)
|
||||
segment := s.meta.GetSegment(ctx, id)
|
||||
if segment == nil {
|
||||
err := merr.WrapErrSegmentNotFound(id)
|
||||
log.Warn("failed to get segment", zap.Int64("segmentID", id))
|
||||
|
@ -993,13 +993,13 @@ func (s *Server) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedS
|
|||
}
|
||||
var segmentIDs []UniqueID
|
||||
if partitionID < 0 {
|
||||
segmentIDs = s.meta.GetSegmentsIDOfCollectionWithDropped(collectionID)
|
||||
segmentIDs = s.meta.GetSegmentsIDOfCollectionWithDropped(ctx, collectionID)
|
||||
} else {
|
||||
segmentIDs = s.meta.GetSegmentsIDOfPartitionWithDropped(collectionID, partitionID)
|
||||
segmentIDs = s.meta.GetSegmentsIDOfPartitionWithDropped(ctx, collectionID, partitionID)
|
||||
}
|
||||
ret := make([]UniqueID, 0, len(segmentIDs))
|
||||
for _, id := range segmentIDs {
|
||||
segment := s.meta.GetSegment(id)
|
||||
segment := s.meta.GetSegment(ctx, id)
|
||||
// if this segment == nil, we assume this segment has been gc
|
||||
if segment == nil ||
|
||||
(segment.GetState() != commonpb.SegmentState_Dropped &&
|
||||
|
@ -1038,9 +1038,9 @@ func (s *Server) GetSegmentsByStates(ctx context.Context, req *datapb.GetSegment
|
|||
}
|
||||
var segmentIDs []UniqueID
|
||||
if partitionID < 0 {
|
||||
segmentIDs = s.meta.GetSegmentsIDOfCollection(collectionID)
|
||||
segmentIDs = s.meta.GetSegmentsIDOfCollection(ctx, collectionID)
|
||||
} else {
|
||||
segmentIDs = s.meta.GetSegmentsIDOfPartition(collectionID, partitionID)
|
||||
segmentIDs = s.meta.GetSegmentsIDOfPartition(ctx, collectionID, partitionID)
|
||||
}
|
||||
ret := make([]UniqueID, 0, len(segmentIDs))
|
||||
|
||||
|
@ -1049,7 +1049,7 @@ func (s *Server) GetSegmentsByStates(ctx context.Context, req *datapb.GetSegment
|
|||
statesDict[state] = true
|
||||
}
|
||||
for _, id := range segmentIDs {
|
||||
segment := s.meta.GetHealthySegment(id)
|
||||
segment := s.meta.GetHealthySegment(ctx, id)
|
||||
if segment != nil && statesDict[segment.GetState()] {
|
||||
ret = append(ret, id)
|
||||
}
|
||||
|
@ -1179,7 +1179,7 @@ func (s *Server) GetCompactionState(ctx context.Context, req *milvuspb.GetCompac
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
info := s.compactionHandler.getCompactionInfo(req.GetCompactionID())
|
||||
info := s.compactionHandler.getCompactionInfo(ctx, req.GetCompactionID())
|
||||
|
||||
resp.State = info.state
|
||||
resp.ExecutingPlanNo = int64(info.executingCnt)
|
||||
|
@ -1213,7 +1213,7 @@ func (s *Server) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
info := s.compactionHandler.getCompactionInfo(req.GetCompactionID())
|
||||
info := s.compactionHandler.getCompactionInfo(ctx, req.GetCompactionID())
|
||||
resp.State = info.state
|
||||
resp.MergeInfos = lo.MapToSlice[int64, *milvuspb.CompactionMergeInfo](info.mergeInfos, func(_ int64, merge *milvuspb.CompactionMergeInfo) *milvuspb.CompactionMergeInfo {
|
||||
return merge
|
||||
|
@ -1260,7 +1260,7 @@ func (s *Server) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
|
|||
startPos := toMsgPosition(channelName, req.GetStartPositions())
|
||||
if startPos != nil {
|
||||
startPos.Timestamp = req.GetCreateTimestamp()
|
||||
if err := s.meta.UpdateChannelCheckpoint(channelName, startPos); err != nil {
|
||||
if err := s.meta.UpdateChannelCheckpoint(ctx, channelName, startPos); err != nil {
|
||||
log.Warn("failed to init channel checkpoint, meta update error", zap.String("channel", channelName), zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
|
@ -1287,7 +1287,7 @@ func (s *Server) GetFlushState(ctx context.Context, req *datapb.GetFlushStateReq
|
|||
if len(req.GetSegmentIDs()) > 0 {
|
||||
var unflushed []UniqueID
|
||||
for _, sid := range req.GetSegmentIDs() {
|
||||
segment := s.meta.GetHealthySegment(sid)
|
||||
segment := s.meta.GetHealthySegment(ctx, sid)
|
||||
// segment is nil if it was compacted, or it's an empty segment and is set to dropped
|
||||
if segment == nil || isFlushState(segment.GetState()) {
|
||||
continue
|
||||
|
@ -1390,7 +1390,7 @@ func (s *Server) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
|
|||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
s.updateSegmentStatistics(req.GetStats())
|
||||
s.updateSegmentStatistics(ctx, req.GetStats())
|
||||
return merr.Success(), nil
|
||||
}
|
||||
|
||||
|
@ -1409,7 +1409,7 @@ func (s *Server) UpdateChannelCheckpoint(ctx context.Context, req *datapb.Update
|
|||
log.Warn("node is not matched with channel", zap.String("channel", channel), zap.Int64("nodeID", nodeID))
|
||||
return merr.Status(merr.WrapErrChannelNotFound(channel, fmt.Sprintf("from node %d", nodeID))), nil
|
||||
}
|
||||
err := s.meta.UpdateChannelCheckpoint(req.GetVChannel(), req.GetPosition())
|
||||
err := s.meta.UpdateChannelCheckpoint(ctx, req.GetVChannel(), req.GetPosition())
|
||||
if err != nil {
|
||||
log.Warn("failed to UpdateChannelCheckpoint", zap.String("vChannel", req.GetVChannel()), zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
|
@ -1426,7 +1426,7 @@ func (s *Server) UpdateChannelCheckpoint(ctx context.Context, req *datapb.Update
|
|||
return matched
|
||||
})
|
||||
|
||||
err := s.meta.UpdateChannelCheckpoints(checkpoints)
|
||||
err := s.meta.UpdateChannelCheckpoints(ctx, checkpoints)
|
||||
if err != nil {
|
||||
log.Warn("failed to update channel checkpoint", zap.Error(err))
|
||||
return merr.Status(err), nil
|
||||
|
@ -1490,9 +1490,9 @@ func (s *Server) handleDataNodeTtMsg(ctx context.Context, ttMsg *msgpb.DataNodeT
|
|||
WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), pChannelName).
|
||||
Set(float64(sub))
|
||||
|
||||
s.updateSegmentStatistics(segmentStats)
|
||||
s.updateSegmentStatistics(ctx, segmentStats)
|
||||
|
||||
if err := s.segmentManager.ExpireAllocations(channel, ts); err != nil {
|
||||
if err := s.segmentManager.ExpireAllocations(ctx, channel, ts); err != nil {
|
||||
log.Warn("failed to expire allocations", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
@ -1502,7 +1502,7 @@ func (s *Server) handleDataNodeTtMsg(ctx context.Context, ttMsg *msgpb.DataNodeT
|
|||
log.Warn("failed to get flushable segments", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
flushableSegments := s.getFlushableSegmentsInfo(flushableIDs)
|
||||
flushableSegments := s.getFlushableSegmentsInfo(ctx, flushableIDs)
|
||||
if len(flushableSegments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -1531,7 +1531,7 @@ func (s *Server) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmen
|
|||
log.Info("marking segments dropped", zap.Int64s("segments", req.GetSegmentIds()))
|
||||
var err error
|
||||
for _, segID := range req.GetSegmentIds() {
|
||||
if err = s.meta.SetState(segID, commonpb.SegmentState_Dropped); err != nil {
|
||||
if err = s.meta.SetState(ctx, segID, commonpb.SegmentState_Dropped); err != nil {
|
||||
// Fail-open.
|
||||
log.Error("failed to set segment state as dropped", zap.Int64("segmentID", segID))
|
||||
break
|
||||
|
@ -1699,7 +1699,7 @@ func (s *Server) ImportV2(ctx context.Context, in *internalpb.ImportRequestInter
|
|||
|
||||
// Check if the number of jobs exceeds the limit.
|
||||
maxNum := paramtable.Get().DataCoordCfg.MaxImportJobNum.GetAsInt()
|
||||
executingNum := s.importMeta.CountJobBy(WithoutJobStates(internalpb.ImportJobState_Completed, internalpb.ImportJobState_Failed))
|
||||
executingNum := s.importMeta.CountJobBy(ctx, WithoutJobStates(internalpb.ImportJobState_Completed, internalpb.ImportJobState_Failed))
|
||||
if executingNum >= maxNum {
|
||||
resp.Status = merr.Status(merr.WrapErrImportFailed(
|
||||
fmt.Sprintf("The number of jobs has reached the limit, please try again later. " +
|
||||
|
@ -1737,7 +1737,7 @@ func (s *Server) ImportV2(ctx context.Context, in *internalpb.ImportRequestInter
|
|||
},
|
||||
tr: timerecord.NewTimeRecorder("import job"),
|
||||
}
|
||||
err = s.importMeta.AddJob(job)
|
||||
err = s.importMeta.AddJob(ctx, job)
|
||||
if err != nil {
|
||||
resp.Status = merr.Status(merr.WrapErrImportFailed(fmt.Sprint("add import job failed, err=%w", err)))
|
||||
return resp, nil
|
||||
|
@ -1764,7 +1764,7 @@ func (s *Server) GetImportProgress(ctx context.Context, in *internalpb.GetImport
|
|||
resp.Status = merr.Status(merr.WrapErrImportFailed(fmt.Sprint("parse job id failed, err=%w", err)))
|
||||
return resp, nil
|
||||
}
|
||||
job := s.importMeta.GetJob(jobID)
|
||||
job := s.importMeta.GetJob(ctx, jobID)
|
||||
if job == nil {
|
||||
resp.Status = merr.Status(merr.WrapErrImportFailed(fmt.Sprintf("import job does not exist, jobID=%d", jobID)))
|
||||
return resp, nil
|
||||
|
@ -1800,9 +1800,9 @@ func (s *Server) ListImports(ctx context.Context, req *internalpb.ListImportsReq
|
|||
|
||||
var jobs []ImportJob
|
||||
if req.GetCollectionID() != 0 {
|
||||
jobs = s.importMeta.GetJobBy(WithCollectionID(req.GetCollectionID()))
|
||||
jobs = s.importMeta.GetJobBy(ctx, WithCollectionID(req.GetCollectionID()))
|
||||
} else {
|
||||
jobs = s.importMeta.GetJobBy()
|
||||
jobs = s.importMeta.GetJobBy(ctx)
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
|
|
|
@ -107,7 +107,7 @@ func (s *ServerSuite) TestGetFlushState_ByFlushTs() {
|
|||
{"channel cp < flush ts", 13, false},
|
||||
}
|
||||
|
||||
err := s.testServer.meta.UpdateChannelCheckpoint("ch1", &msgpb.MsgPosition{
|
||||
err := s.testServer.meta.UpdateChannelCheckpoint(context.TODO(), "ch1", &msgpb.MsgPosition{
|
||||
MsgID: []byte{1},
|
||||
Timestamp: 12,
|
||||
})
|
||||
|
@ -158,7 +158,7 @@ func (s *ServerSuite) TestGetFlushState_BySegment() {
|
|||
})
|
||||
|
||||
s.Require().NoError(err)
|
||||
err = s.testServer.meta.UpdateChannelCheckpoint("ch1", &msgpb.MsgPosition{
|
||||
err = s.testServer.meta.UpdateChannelCheckpoint(context.TODO(), "ch1", &msgpb.MsgPosition{
|
||||
MsgID: []byte{1},
|
||||
Timestamp: 12,
|
||||
})
|
||||
|
@ -296,7 +296,7 @@ func (s *ServerSuite) TestSaveBinlogPath_SaveDroppedSegment() {
|
|||
s.NoError(err)
|
||||
s.EqualValues(resp.ErrorCode, commonpb.ErrorCode_Success)
|
||||
|
||||
segment := s.testServer.meta.GetSegment(test.inSegID)
|
||||
segment := s.testServer.meta.GetSegment(context.TODO(), test.inSegID)
|
||||
s.NotNil(segment)
|
||||
s.EqualValues(0, len(segment.GetBinlogs()))
|
||||
s.EqualValues(segment.NumOfRows, test.numOfRows)
|
||||
|
@ -315,7 +315,7 @@ func (s *ServerSuite) TestSaveBinlogPath_L0Segment() {
|
|||
s.mockChMgr.EXPECT().Match(int64(0), "ch1").Return(true)
|
||||
s.testServer.meta.AddCollection(&collectionInfo{ID: 0})
|
||||
|
||||
segment := s.testServer.meta.GetHealthySegment(1)
|
||||
segment := s.testServer.meta.GetHealthySegment(context.TODO(), 1)
|
||||
s.Require().Nil(segment)
|
||||
ctx := context.Background()
|
||||
resp, err := s.testServer.SaveBinlogPaths(ctx, &datapb.SaveBinlogPathsRequest{
|
||||
|
@ -359,7 +359,7 @@ func (s *ServerSuite) TestSaveBinlogPath_L0Segment() {
|
|||
s.NoError(err)
|
||||
s.EqualValues(resp.ErrorCode, commonpb.ErrorCode_Success)
|
||||
|
||||
segment = s.testServer.meta.GetHealthySegment(1)
|
||||
segment = s.testServer.meta.GetHealthySegment(context.TODO(), 1)
|
||||
s.NotNil(segment)
|
||||
s.EqualValues(datapb.SegmentLevel_L0, segment.GetLevel())
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ func (s *ServerSuite) TestSaveBinlogPath_NormalCase() {
|
|||
s.NoError(err)
|
||||
s.EqualValues(resp.ErrorCode, commonpb.ErrorCode_Success)
|
||||
|
||||
segment := s.testServer.meta.GetHealthySegment(1)
|
||||
segment := s.testServer.meta.GetHealthySegment(context.TODO(), 1)
|
||||
s.NotNil(segment)
|
||||
binlogs := segment.GetBinlogs()
|
||||
s.EqualValues(1, len(binlogs))
|
||||
|
@ -471,7 +471,7 @@ func (s *ServerSuite) TestSaveBinlogPath_NormalCase() {
|
|||
})
|
||||
s.NoError(err)
|
||||
s.EqualValues(resp.ErrorCode, commonpb.ErrorCode_Success)
|
||||
segment = s.testServer.meta.GetSegment(2)
|
||||
segment = s.testServer.meta.GetSegment(context.TODO(), 2)
|
||||
s.NotNil(segment)
|
||||
s.Equal(commonpb.SegmentState_Dropped, segment.GetState())
|
||||
}
|
||||
|
@ -845,14 +845,14 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 10,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err = svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -901,7 +901,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: seg1.ID,
|
||||
BuildID: seg1.ID,
|
||||
})
|
||||
|
@ -911,7 +911,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
State: commonpb.IndexState_Finished,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: seg2.ID,
|
||||
BuildID: seg2.ID,
|
||||
})
|
||||
|
@ -954,7 +954,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1077,7 +1077,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
err := svr.meta.AddSegment(context.TODO(), NewSegmentInfo(segment))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err = svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -1085,7 +1085,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
IndexName: "",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
|
||||
err = svr.meta.indexMeta.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: segment.ID,
|
||||
BuildID: segment.ID,
|
||||
})
|
||||
|
@ -1130,7 +1130,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1175,7 +1175,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1219,7 +1219,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
Schema: newTestSchema(),
|
||||
})
|
||||
|
||||
err := svr.meta.UpdateChannelCheckpoint("vchan1", &msgpb.MsgPosition{
|
||||
err := svr.meta.UpdateChannelCheckpoint(context.TODO(), "vchan1", &msgpb.MsgPosition{
|
||||
ChannelName: "vchan1",
|
||||
Timestamp: 0,
|
||||
MsgID: []byte{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -1243,7 +1243,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg5))
|
||||
assert.NoError(t, err)
|
||||
err = svr.meta.indexMeta.CreateIndex(&model.Index{
|
||||
err = svr.meta.indexMeta.CreateIndex(context.TODO(), &model.Index{
|
||||
TenantID: "",
|
||||
CollectionID: 0,
|
||||
FieldID: 2,
|
||||
|
@ -1349,10 +1349,10 @@ func TestImportV2(t *testing.T) {
|
|||
|
||||
// alloc failed
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
s.importMeta, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
s.importMeta, err = NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
alloc := allocator.NewMockAllocator(t)
|
||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, mockErr)
|
||||
|
@ -1366,11 +1366,11 @@ func TestImportV2(t *testing.T) {
|
|||
|
||||
// add job failed
|
||||
catalog = mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(mockErr)
|
||||
s.importMeta, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(mockErr)
|
||||
s.importMeta, err = NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
resp, err = s.ImportV2(ctx, &internalpb.ImportRequestInternal{
|
||||
Files: []*internalpb.ImportFile{
|
||||
|
@ -1382,12 +1382,12 @@ func TestImportV2(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, errors.Is(merr.Error(resp.GetStatus()), merr.ErrImportFailed))
|
||||
jobs := s.importMeta.GetJobBy()
|
||||
jobs := s.importMeta.GetJobBy(context.TODO())
|
||||
assert.Equal(t, 0, len(jobs))
|
||||
catalog.ExpectedCalls = lo.Filter(catalog.ExpectedCalls, func(call *mock.Call, _ int) bool {
|
||||
return call.Method != "SaveImportJob"
|
||||
})
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
// normal case
|
||||
resp, err = s.ImportV2(ctx, &internalpb.ImportRequestInternal{
|
||||
|
@ -1400,7 +1400,7 @@ func TestImportV2(t *testing.T) {
|
|||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int32(0), resp.GetStatus().GetCode())
|
||||
jobs = s.importMeta.GetJobBy()
|
||||
jobs = s.importMeta.GetJobBy(context.TODO())
|
||||
assert.Equal(t, 1, len(jobs))
|
||||
|
||||
// number of jobs reached the limit
|
||||
|
@ -1429,11 +1429,11 @@ func TestImportV2(t *testing.T) {
|
|||
|
||||
// job does not exist
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
s.importMeta, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
s.importMeta, err = NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
resp, err = s.GetImportProgress(ctx, &internalpb.GetImportProgressRequest{
|
||||
JobID: "-1",
|
||||
|
@ -1449,7 +1449,7 @@ func TestImportV2(t *testing.T) {
|
|||
State: internalpb.ImportJobState_Failed,
|
||||
},
|
||||
}
|
||||
err = s.importMeta.AddJob(job)
|
||||
err = s.importMeta.AddJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
resp, err = s.GetImportProgress(ctx, &internalpb.GetImportProgressRequest{
|
||||
JobID: "0",
|
||||
|
@ -1471,12 +1471,12 @@ func TestImportV2(t *testing.T) {
|
|||
|
||||
// normal case
|
||||
catalog := mocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().ListImportJobs().Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks().Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything).Return(nil)
|
||||
s.importMeta, err = NewImportMeta(catalog)
|
||||
catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil)
|
||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil)
|
||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||
s.importMeta, err = NewImportMeta(context.TODO(), catalog)
|
||||
assert.NoError(t, err)
|
||||
var job ImportJob = &importJob{
|
||||
ImportJob: &datapb.ImportJob{
|
||||
|
@ -1485,7 +1485,7 @@ func TestImportV2(t *testing.T) {
|
|||
Schema: &schemapb.CollectionSchema{},
|
||||
},
|
||||
}
|
||||
err = s.importMeta.AddJob(job)
|
||||
err = s.importMeta.AddJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
var task ImportTask = &preImportTask{
|
||||
PreImportTask: &datapb.PreImportTask{
|
||||
|
@ -1494,7 +1494,7 @@ func TestImportV2(t *testing.T) {
|
|||
State: datapb.ImportTaskStateV2_Failed,
|
||||
},
|
||||
}
|
||||
err = s.importMeta.AddTask(task)
|
||||
err = s.importMeta.AddTask(context.TODO(), task)
|
||||
assert.NoError(t, err)
|
||||
resp, err = s.ListImports(ctx, &internalpb.ListImportsRequestInternal{
|
||||
CollectionID: 1,
|
||||
|
|
|
@ -125,7 +125,7 @@ func (sss *SyncSegmentsScheduler) SyncSegments(ctx context.Context, collectionID
|
|||
// sync all healthy segments, but only check flushed segments on datanode. Because L0 growing segments may not in datacoord's meta.
|
||||
// upon receiving the SyncSegments request, the datanode's segment state may have already transitioned from Growing/Flushing
|
||||
// to Flushed, so the view must include this segment.
|
||||
segments := sss.meta.SelectSegments(WithChannel(channelName), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := sss.meta.SelectSegments(ctx, WithChannel(channelName), SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return info.GetPartitionID() == partitionID && info.GetLevel() != datapb.SegmentLevel_L0 && isSegmentHealthy(info)
|
||||
}))
|
||||
req := &datapb.SyncSegmentsRequest{
|
||||
|
|
|
@ -156,7 +156,7 @@ func (at *analyzeTask) PreCheck(ctx context.Context, dependency *taskScheduler)
|
|||
}
|
||||
|
||||
// When data analyze occurs, segments must not be discarded. Such as compaction, GC, etc.
|
||||
segments := dependency.meta.SelectSegments(SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
segments := dependency.meta.SelectSegments(ctx, SegmentFilterFunc(func(info *SegmentInfo) bool {
|
||||
return isSegmentHealthy(info) && slices.Contains(t.SegmentIDs, info.ID)
|
||||
}))
|
||||
segmentsMap := lo.SliceToMap(segments, func(t *SegmentInfo) (int64, *SegmentInfo) {
|
||||
|
|
|
@ -140,7 +140,7 @@ func (it *indexBuildTask) PreCheck(ctx context.Context, dependency *taskSchedule
|
|||
return false
|
||||
}
|
||||
|
||||
segment := dependency.meta.GetSegment(segIndex.SegmentID)
|
||||
segment := dependency.meta.GetSegment(ctx, segIndex.SegmentID)
|
||||
if !isSegmentHealthy(segment) || !dependency.meta.indexMeta.IsIndexExist(segIndex.CollectionID, segIndex.IndexID) {
|
||||
log.Ctx(ctx).Info("task is no need to build index, remove it", zap.Int64("taskID", it.taskID))
|
||||
it.SetState(indexpb.JobState_JobStateNone, "task is no need to build index")
|
||||
|
|
|
@ -1062,9 +1062,9 @@ func (s *taskSchedulerSuite) Test_analyzeTaskFailCase() {
|
|||
scheduler := newTaskScheduler(ctx, mt, workerManager, nil, nil, handler, nil)
|
||||
|
||||
// remove task in meta
|
||||
err := scheduler.meta.analyzeMeta.DropAnalyzeTask(1)
|
||||
err := scheduler.meta.analyzeMeta.DropAnalyzeTask(context.TODO(), 1)
|
||||
s.NoError(err)
|
||||
err = scheduler.meta.analyzeMeta.DropAnalyzeTask(2)
|
||||
err = scheduler.meta.analyzeMeta.DropAnalyzeTask(context.TODO(), 2)
|
||||
s.NoError(err)
|
||||
|
||||
mt.segments.DropSegment(1000)
|
||||
|
|
|
@ -78,7 +78,7 @@ func (st *statsTask) ResetTask(mt *meta) {
|
|||
st.nodeID = 0
|
||||
// reset isCompacting
|
||||
|
||||
mt.SetSegmentsCompacting([]UniqueID{st.segmentID}, false)
|
||||
mt.SetSegmentsCompacting(context.TODO(), []UniqueID{st.segmentID}, false)
|
||||
}
|
||||
|
||||
func (st *statsTask) SetQueueTime(t time.Time) {
|
||||
|
@ -110,7 +110,7 @@ func (st *statsTask) GetTaskType() string {
|
|||
}
|
||||
|
||||
func (st *statsTask) CheckTaskHealthy(mt *meta) bool {
|
||||
seg := mt.GetHealthySegment(st.segmentID)
|
||||
seg := mt.GetHealthySegment(context.TODO(), st.segmentID)
|
||||
return seg != nil
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ func (st *statsTask) GetFailReason() string {
|
|||
|
||||
func (st *statsTask) UpdateVersion(ctx context.Context, nodeID int64, meta *meta) error {
|
||||
// mark compacting
|
||||
if exist, canDo := meta.CheckAndSetSegmentsCompacting([]UniqueID{st.segmentID}); !exist || !canDo {
|
||||
if exist, canDo := meta.CheckAndSetSegmentsCompacting(ctx, []UniqueID{st.segmentID}); !exist || !canDo {
|
||||
log.Warn("segment is not exist or is compacting, skip stats",
|
||||
zap.Bool("exist", exist), zap.Bool("canDo", canDo))
|
||||
st.SetState(indexpb.JobState_JobStateNone, "segment is not healthy")
|
||||
|
@ -150,7 +150,7 @@ func (st *statsTask) UpdateMetaBuildingState(meta *meta) error {
|
|||
func (st *statsTask) PreCheck(ctx context.Context, dependency *taskScheduler) bool {
|
||||
// set segment compacting
|
||||
log := log.Ctx(ctx).With(zap.Int64("taskID", st.taskID), zap.Int64("segmentID", st.segmentID))
|
||||
segment := dependency.meta.GetHealthySegment(st.segmentID)
|
||||
segment := dependency.meta.GetHealthySegment(ctx, st.segmentID)
|
||||
if segment == nil {
|
||||
log.Warn("segment is node healthy, skip stats")
|
||||
st.SetState(indexpb.JobState_JobStateNone, "segment is not healthy")
|
||||
|
|
|
@ -550,14 +550,14 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||
s.mt.catalog = catalog
|
||||
s.mt.statsTaskMeta.catalog = catalog
|
||||
updateStateOp := UpdateStatusOperator(s.segID, commonpb.SegmentState_Flushed)
|
||||
err := s.mt.UpdateSegmentsInfo(updateStateOp)
|
||||
err := s.mt.UpdateSegmentsInfo(context.TODO(), updateStateOp)
|
||||
s.NoError(err)
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
s.NoError(st.SetJobInfo(s.mt))
|
||||
s.NotNil(s.mt.GetHealthySegment(s.targetID))
|
||||
s.NotNil(s.mt.GetHealthySegment(context.TODO(), s.targetID))
|
||||
s.Equal(indexpb.JobState_JobStateFinished, s.mt.statsTaskMeta.tasks[s.taskID].GetState())
|
||||
s.Equal(datapb.SegmentLevel_L2, s.mt.GetHealthySegment(s.targetID).GetLevel())
|
||||
s.Equal(datapb.SegmentLevel_L2, s.mt.GetHealthySegment(context.TODO(), s.targetID).GetLevel())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -148,15 +148,15 @@ type DataCoordCatalog interface {
|
|||
AlterSegmentIndexes(ctx context.Context, newSegIdxes []*model.SegmentIndex) error
|
||||
DropSegmentIndex(ctx context.Context, collID, partID, segID, buildID typeutil.UniqueID) error
|
||||
|
||||
SaveImportJob(job *datapb.ImportJob) error
|
||||
ListImportJobs() ([]*datapb.ImportJob, error)
|
||||
DropImportJob(jobID int64) error
|
||||
SavePreImportTask(task *datapb.PreImportTask) error
|
||||
ListPreImportTasks() ([]*datapb.PreImportTask, error)
|
||||
DropPreImportTask(taskID int64) error
|
||||
SaveImportTask(task *datapb.ImportTaskV2) error
|
||||
ListImportTasks() ([]*datapb.ImportTaskV2, error)
|
||||
DropImportTask(taskID int64) error
|
||||
SaveImportJob(ctx context.Context, job *datapb.ImportJob) error
|
||||
ListImportJobs(ctx context.Context) ([]*datapb.ImportJob, error)
|
||||
DropImportJob(ctx context.Context, jobID int64) error
|
||||
SavePreImportTask(ctx context.Context, task *datapb.PreImportTask) error
|
||||
ListPreImportTasks(ctx context.Context) ([]*datapb.PreImportTask, error)
|
||||
DropPreImportTask(ctx context.Context, taskID int64) error
|
||||
SaveImportTask(ctx context.Context, task *datapb.ImportTaskV2) error
|
||||
ListImportTasks(ctx context.Context) ([]*datapb.ImportTaskV2, error)
|
||||
DropImportTask(ctx context.Context, taskID int64) error
|
||||
|
||||
GcConfirm(ctx context.Context, collectionID, partitionID typeutil.UniqueID) bool
|
||||
|
||||
|
|
|
@ -698,7 +698,7 @@ func (kc *Catalog) DropSegmentIndex(ctx context.Context, collID, partID, segID,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (kc *Catalog) SaveImportJob(job *datapb.ImportJob) error {
|
||||
func (kc *Catalog) SaveImportJob(ctx context.Context, job *datapb.ImportJob) error {
|
||||
key := buildImportJobKey(job.GetJobID())
|
||||
value, err := proto.Marshal(job)
|
||||
if err != nil {
|
||||
|
@ -707,7 +707,7 @@ func (kc *Catalog) SaveImportJob(job *datapb.ImportJob) error {
|
|||
return kc.MetaKv.Save(key, string(value))
|
||||
}
|
||||
|
||||
func (kc *Catalog) ListImportJobs() ([]*datapb.ImportJob, error) {
|
||||
func (kc *Catalog) ListImportJobs(ctx context.Context) ([]*datapb.ImportJob, error) {
|
||||
jobs := make([]*datapb.ImportJob, 0)
|
||||
_, values, err := kc.MetaKv.LoadWithPrefix(ImportJobPrefix)
|
||||
if err != nil {
|
||||
|
@ -724,12 +724,12 @@ func (kc *Catalog) ListImportJobs() ([]*datapb.ImportJob, error) {
|
|||
return jobs, nil
|
||||
}
|
||||
|
||||
func (kc *Catalog) DropImportJob(jobID int64) error {
|
||||
func (kc *Catalog) DropImportJob(ctx context.Context, jobID int64) error {
|
||||
key := buildImportJobKey(jobID)
|
||||
return kc.MetaKv.Remove(key)
|
||||
}
|
||||
|
||||
func (kc *Catalog) SavePreImportTask(task *datapb.PreImportTask) error {
|
||||
func (kc *Catalog) SavePreImportTask(ctx context.Context, task *datapb.PreImportTask) error {
|
||||
key := buildPreImportTaskKey(task.GetTaskID())
|
||||
value, err := proto.Marshal(task)
|
||||
if err != nil {
|
||||
|
@ -738,7 +738,7 @@ func (kc *Catalog) SavePreImportTask(task *datapb.PreImportTask) error {
|
|||
return kc.MetaKv.Save(key, string(value))
|
||||
}
|
||||
|
||||
func (kc *Catalog) ListPreImportTasks() ([]*datapb.PreImportTask, error) {
|
||||
func (kc *Catalog) ListPreImportTasks(ctx context.Context) ([]*datapb.PreImportTask, error) {
|
||||
tasks := make([]*datapb.PreImportTask, 0)
|
||||
|
||||
_, values, err := kc.MetaKv.LoadWithPrefix(PreImportTaskPrefix)
|
||||
|
@ -757,12 +757,12 @@ func (kc *Catalog) ListPreImportTasks() ([]*datapb.PreImportTask, error) {
|
|||
return tasks, nil
|
||||
}
|
||||
|
||||
func (kc *Catalog) DropPreImportTask(taskID int64) error {
|
||||
func (kc *Catalog) DropPreImportTask(ctx context.Context, taskID int64) error {
|
||||
key := buildPreImportTaskKey(taskID)
|
||||
return kc.MetaKv.Remove(key)
|
||||
}
|
||||
|
||||
func (kc *Catalog) SaveImportTask(task *datapb.ImportTaskV2) error {
|
||||
func (kc *Catalog) SaveImportTask(ctx context.Context, task *datapb.ImportTaskV2) error {
|
||||
key := buildImportTaskKey(task.GetTaskID())
|
||||
value, err := proto.Marshal(task)
|
||||
if err != nil {
|
||||
|
@ -771,7 +771,7 @@ func (kc *Catalog) SaveImportTask(task *datapb.ImportTaskV2) error {
|
|||
return kc.MetaKv.Save(key, string(value))
|
||||
}
|
||||
|
||||
func (kc *Catalog) ListImportTasks() ([]*datapb.ImportTaskV2, error) {
|
||||
func (kc *Catalog) ListImportTasks(ctx context.Context) ([]*datapb.ImportTaskV2, error) {
|
||||
tasks := make([]*datapb.ImportTaskV2, 0)
|
||||
|
||||
_, values, err := kc.MetaKv.LoadWithPrefix(ImportTaskPrefix)
|
||||
|
@ -789,7 +789,7 @@ func (kc *Catalog) ListImportTasks() ([]*datapb.ImportTaskV2, error) {
|
|||
return tasks, nil
|
||||
}
|
||||
|
||||
func (kc *Catalog) DropImportTask(taskID int64) error {
|
||||
func (kc *Catalog) DropImportTask(ctx context.Context, taskID int64) error {
|
||||
key := buildImportTaskKey(taskID)
|
||||
return kc.MetaKv.Remove(key)
|
||||
}
|
||||
|
|
|
@ -1359,16 +1359,16 @@ func TestCatalog_Import(t *testing.T) {
|
|||
txn := mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Save(mock.Anything, mock.Anything).Return(nil)
|
||||
kc.MetaKv = txn
|
||||
err := kc.SaveImportJob(job)
|
||||
err := kc.SaveImportJob(context.TODO(), job)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = kc.SaveImportJob(nil)
|
||||
err = kc.SaveImportJob(context.TODO(), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Save(mock.Anything, mock.Anything).Return(mockErr)
|
||||
kc.MetaKv = txn
|
||||
err = kc.SaveImportJob(job)
|
||||
err = kc.SaveImportJob(context.TODO(), job)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1378,20 +1378,20 @@ func TestCatalog_Import(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, []string{string(value)}, nil)
|
||||
kc.MetaKv = txn
|
||||
jobs, err := kc.ListImportJobs()
|
||||
jobs, err := kc.ListImportJobs(context.TODO())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(jobs))
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, []string{"@#%#^#"}, nil)
|
||||
kc.MetaKv = txn
|
||||
_, err = kc.ListImportJobs()
|
||||
_, err = kc.ListImportJobs(context.TODO())
|
||||
assert.Error(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, mockErr)
|
||||
kc.MetaKv = txn
|
||||
_, err = kc.ListImportJobs()
|
||||
_, err = kc.ListImportJobs(context.TODO())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1399,13 +1399,13 @@ func TestCatalog_Import(t *testing.T) {
|
|||
txn := mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Remove(mock.Anything).Return(nil)
|
||||
kc.MetaKv = txn
|
||||
err := kc.DropImportJob(job.GetJobID())
|
||||
err := kc.DropImportJob(context.TODO(), job.GetJobID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Remove(mock.Anything).Return(mockErr)
|
||||
kc.MetaKv = txn
|
||||
err = kc.DropImportJob(job.GetJobID())
|
||||
err = kc.DropImportJob(context.TODO(), job.GetJobID())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1413,16 +1413,16 @@ func TestCatalog_Import(t *testing.T) {
|
|||
txn := mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Save(mock.Anything, mock.Anything).Return(nil)
|
||||
kc.MetaKv = txn
|
||||
err := kc.SavePreImportTask(pit)
|
||||
err := kc.SavePreImportTask(context.TODO(), pit)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = kc.SavePreImportTask(nil)
|
||||
err = kc.SavePreImportTask(context.TODO(), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Save(mock.Anything, mock.Anything).Return(mockErr)
|
||||
kc.MetaKv = txn
|
||||
err = kc.SavePreImportTask(pit)
|
||||
err = kc.SavePreImportTask(context.TODO(), pit)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1432,20 +1432,20 @@ func TestCatalog_Import(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, []string{string(value)}, nil)
|
||||
kc.MetaKv = txn
|
||||
tasks, err := kc.ListPreImportTasks()
|
||||
tasks, err := kc.ListPreImportTasks(context.TODO())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, []string{"@#%#^#"}, nil)
|
||||
kc.MetaKv = txn
|
||||
_, err = kc.ListPreImportTasks()
|
||||
_, err = kc.ListPreImportTasks(context.TODO())
|
||||
assert.Error(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, mockErr)
|
||||
kc.MetaKv = txn
|
||||
_, err = kc.ListPreImportTasks()
|
||||
_, err = kc.ListPreImportTasks(context.TODO())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1453,13 +1453,13 @@ func TestCatalog_Import(t *testing.T) {
|
|||
txn := mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Remove(mock.Anything).Return(nil)
|
||||
kc.MetaKv = txn
|
||||
err := kc.DropPreImportTask(pit.GetTaskID())
|
||||
err := kc.DropPreImportTask(context.TODO(), pit.GetTaskID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Remove(mock.Anything).Return(mockErr)
|
||||
kc.MetaKv = txn
|
||||
err = kc.DropPreImportTask(pit.GetTaskID())
|
||||
err = kc.DropPreImportTask(context.TODO(), pit.GetTaskID())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1467,16 +1467,16 @@ func TestCatalog_Import(t *testing.T) {
|
|||
txn := mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Save(mock.Anything, mock.Anything).Return(nil)
|
||||
kc.MetaKv = txn
|
||||
err := kc.SaveImportTask(it)
|
||||
err := kc.SaveImportTask(context.TODO(), it)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = kc.SaveImportTask(nil)
|
||||
err = kc.SaveImportTask(context.TODO(), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Save(mock.Anything, mock.Anything).Return(mockErr)
|
||||
kc.MetaKv = txn
|
||||
err = kc.SaveImportTask(it)
|
||||
err = kc.SaveImportTask(context.TODO(), it)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1486,20 +1486,20 @@ func TestCatalog_Import(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, []string{string(value)}, nil)
|
||||
kc.MetaKv = txn
|
||||
tasks, err := kc.ListImportTasks()
|
||||
tasks, err := kc.ListImportTasks(context.TODO())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(tasks))
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, []string{"@#%#^#"}, nil)
|
||||
kc.MetaKv = txn
|
||||
_, err = kc.ListImportTasks()
|
||||
_, err = kc.ListImportTasks(context.TODO())
|
||||
assert.Error(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, mockErr)
|
||||
kc.MetaKv = txn
|
||||
_, err = kc.ListImportTasks()
|
||||
_, err = kc.ListImportTasks(context.TODO())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -1507,13 +1507,13 @@ func TestCatalog_Import(t *testing.T) {
|
|||
txn := mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Remove(mock.Anything).Return(nil)
|
||||
kc.MetaKv = txn
|
||||
err := kc.DropImportTask(it.GetTaskID())
|
||||
err := kc.DropImportTask(context.TODO(), it.GetTaskID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
txn = mocks.NewMetaKv(t)
|
||||
txn.EXPECT().Remove(mock.Anything).Return(mockErr)
|
||||
kc.MetaKv = txn
|
||||
err = kc.DropImportTask(it.GetTaskID())
|
||||
err = kc.DropImportTask(context.TODO(), it.GetTaskID())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.32.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.46.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
@ -34,6 +34,10 @@ func (_m *DataCoordCatalog) EXPECT() *DataCoordCatalog_Expecter {
|
|||
func (_m *DataCoordCatalog) AddSegment(ctx context.Context, segment *datapb.SegmentInfo) error {
|
||||
ret := _m.Called(ctx, segment)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddSegment")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.SegmentInfo) error); ok {
|
||||
r0 = rf(ctx, segment)
|
||||
|
@ -77,6 +81,10 @@ func (_c *DataCoordCatalog_AddSegment_Call) RunAndReturn(run func(context.Contex
|
|||
func (_m *DataCoordCatalog) AlterIndexes(ctx context.Context, newIndexes []*model.Index) error {
|
||||
ret := _m.Called(ctx, newIndexes)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AlterIndexes")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []*model.Index) error); ok {
|
||||
r0 = rf(ctx, newIndexes)
|
||||
|
@ -120,6 +128,10 @@ func (_c *DataCoordCatalog_AlterIndexes_Call) RunAndReturn(run func(context.Cont
|
|||
func (_m *DataCoordCatalog) AlterSegmentIndexes(ctx context.Context, newSegIdxes []*model.SegmentIndex) error {
|
||||
ret := _m.Called(ctx, newSegIdxes)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AlterSegmentIndexes")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []*model.SegmentIndex) error); ok {
|
||||
r0 = rf(ctx, newSegIdxes)
|
||||
|
@ -170,6 +182,10 @@ func (_m *DataCoordCatalog) AlterSegments(ctx context.Context, newSegments []*da
|
|||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AlterSegments")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []*datapb.SegmentInfo, ...metastore.BinlogsIncrement) error); ok {
|
||||
r0 = rf(ctx, newSegments, binlogs...)
|
||||
|
@ -221,6 +237,10 @@ func (_c *DataCoordCatalog_AlterSegments_Call) RunAndReturn(run func(context.Con
|
|||
func (_m *DataCoordCatalog) ChannelExists(ctx context.Context, channel string) bool {
|
||||
ret := _m.Called(ctx, channel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ChannelExists")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok {
|
||||
r0 = rf(ctx, channel)
|
||||
|
@ -264,6 +284,10 @@ func (_c *DataCoordCatalog_ChannelExists_Call) RunAndReturn(run func(context.Con
|
|||
func (_m *DataCoordCatalog) CreateIndex(ctx context.Context, index *model.Index) error {
|
||||
ret := _m.Called(ctx, index)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CreateIndex")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *model.Index) error); ok {
|
||||
r0 = rf(ctx, index)
|
||||
|
@ -307,6 +331,10 @@ func (_c *DataCoordCatalog_CreateIndex_Call) RunAndReturn(run func(context.Conte
|
|||
func (_m *DataCoordCatalog) CreateSegmentIndex(ctx context.Context, segIdx *model.SegmentIndex) error {
|
||||
ret := _m.Called(ctx, segIdx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CreateSegmentIndex")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *model.SegmentIndex) error); ok {
|
||||
r0 = rf(ctx, segIdx)
|
||||
|
@ -350,6 +378,10 @@ func (_c *DataCoordCatalog_CreateSegmentIndex_Call) RunAndReturn(run func(contex
|
|||
func (_m *DataCoordCatalog) DropAnalyzeTask(ctx context.Context, taskID int64) error {
|
||||
ret := _m.Called(ctx, taskID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropAnalyzeTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, taskID)
|
||||
|
@ -393,6 +425,10 @@ func (_c *DataCoordCatalog_DropAnalyzeTask_Call) RunAndReturn(run func(context.C
|
|||
func (_m *DataCoordCatalog) DropChannel(ctx context.Context, channel string) error {
|
||||
ret := _m.Called(ctx, channel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropChannel")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, channel)
|
||||
|
@ -436,6 +472,10 @@ func (_c *DataCoordCatalog_DropChannel_Call) RunAndReturn(run func(context.Conte
|
|||
func (_m *DataCoordCatalog) DropChannelCheckpoint(ctx context.Context, vChannel string) error {
|
||||
ret := _m.Called(ctx, vChannel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropChannelCheckpoint")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, vChannel)
|
||||
|
@ -479,6 +519,10 @@ func (_c *DataCoordCatalog_DropChannelCheckpoint_Call) RunAndReturn(run func(con
|
|||
func (_m *DataCoordCatalog) DropCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropCompactionTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.CompactionTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
|
@ -522,6 +566,10 @@ func (_c *DataCoordCatalog_DropCompactionTask_Call) RunAndReturn(run func(contex
|
|||
func (_m *DataCoordCatalog) DropCurrentPartitionStatsVersion(ctx context.Context, collID int64, partID int64, vChannel string) error {
|
||||
ret := _m.Called(ctx, collID, partID, vChannel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropCurrentPartitionStatsVersion")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64, string) error); ok {
|
||||
r0 = rf(ctx, collID, partID, vChannel)
|
||||
|
@ -563,13 +611,17 @@ func (_c *DataCoordCatalog_DropCurrentPartitionStatsVersion_Call) RunAndReturn(r
|
|||
return _c
|
||||
}
|
||||
|
||||
// DropImportJob provides a mock function with given fields: jobID
|
||||
func (_m *DataCoordCatalog) DropImportJob(jobID int64) error {
|
||||
ret := _m.Called(jobID)
|
||||
// DropImportJob provides a mock function with given fields: ctx, jobID
|
||||
func (_m *DataCoordCatalog) DropImportJob(ctx context.Context, jobID int64) error {
|
||||
ret := _m.Called(ctx, jobID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropImportJob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(int64) error); ok {
|
||||
r0 = rf(jobID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, jobID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -583,14 +635,15 @@ type DataCoordCatalog_DropImportJob_Call struct {
|
|||
}
|
||||
|
||||
// DropImportJob is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - jobID int64
|
||||
func (_e *DataCoordCatalog_Expecter) DropImportJob(jobID interface{}) *DataCoordCatalog_DropImportJob_Call {
|
||||
return &DataCoordCatalog_DropImportJob_Call{Call: _e.mock.On("DropImportJob", jobID)}
|
||||
func (_e *DataCoordCatalog_Expecter) DropImportJob(ctx interface{}, jobID interface{}) *DataCoordCatalog_DropImportJob_Call {
|
||||
return &DataCoordCatalog_DropImportJob_Call{Call: _e.mock.On("DropImportJob", ctx, jobID)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_DropImportJob_Call) Run(run func(jobID int64)) *DataCoordCatalog_DropImportJob_Call {
|
||||
func (_c *DataCoordCatalog_DropImportJob_Call) Run(run func(ctx context.Context, jobID int64)) *DataCoordCatalog_DropImportJob_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -600,18 +653,22 @@ func (_c *DataCoordCatalog_DropImportJob_Call) Return(_a0 error) *DataCoordCatal
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_DropImportJob_Call) RunAndReturn(run func(int64) error) *DataCoordCatalog_DropImportJob_Call {
|
||||
func (_c *DataCoordCatalog_DropImportJob_Call) RunAndReturn(run func(context.Context, int64) error) *DataCoordCatalog_DropImportJob_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// DropImportTask provides a mock function with given fields: taskID
|
||||
func (_m *DataCoordCatalog) DropImportTask(taskID int64) error {
|
||||
ret := _m.Called(taskID)
|
||||
// DropImportTask provides a mock function with given fields: ctx, taskID
|
||||
func (_m *DataCoordCatalog) DropImportTask(ctx context.Context, taskID int64) error {
|
||||
ret := _m.Called(ctx, taskID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropImportTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(int64) error); ok {
|
||||
r0 = rf(taskID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, taskID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -625,14 +682,15 @@ type DataCoordCatalog_DropImportTask_Call struct {
|
|||
}
|
||||
|
||||
// DropImportTask is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - taskID int64
|
||||
func (_e *DataCoordCatalog_Expecter) DropImportTask(taskID interface{}) *DataCoordCatalog_DropImportTask_Call {
|
||||
return &DataCoordCatalog_DropImportTask_Call{Call: _e.mock.On("DropImportTask", taskID)}
|
||||
func (_e *DataCoordCatalog_Expecter) DropImportTask(ctx interface{}, taskID interface{}) *DataCoordCatalog_DropImportTask_Call {
|
||||
return &DataCoordCatalog_DropImportTask_Call{Call: _e.mock.On("DropImportTask", ctx, taskID)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_DropImportTask_Call) Run(run func(taskID int64)) *DataCoordCatalog_DropImportTask_Call {
|
||||
func (_c *DataCoordCatalog_DropImportTask_Call) Run(run func(ctx context.Context, taskID int64)) *DataCoordCatalog_DropImportTask_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -642,7 +700,7 @@ func (_c *DataCoordCatalog_DropImportTask_Call) Return(_a0 error) *DataCoordCata
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_DropImportTask_Call) RunAndReturn(run func(int64) error) *DataCoordCatalog_DropImportTask_Call {
|
||||
func (_c *DataCoordCatalog_DropImportTask_Call) RunAndReturn(run func(context.Context, int64) error) *DataCoordCatalog_DropImportTask_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -651,6 +709,10 @@ func (_c *DataCoordCatalog_DropImportTask_Call) RunAndReturn(run func(int64) err
|
|||
func (_m *DataCoordCatalog) DropIndex(ctx context.Context, collID int64, dropIdxID int64) error {
|
||||
ret := _m.Called(ctx, collID, dropIdxID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropIndex")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64) error); ok {
|
||||
r0 = rf(ctx, collID, dropIdxID)
|
||||
|
@ -695,6 +757,10 @@ func (_c *DataCoordCatalog_DropIndex_Call) RunAndReturn(run func(context.Context
|
|||
func (_m *DataCoordCatalog) DropPartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error {
|
||||
ret := _m.Called(ctx, info)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropPartitionStatsInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.PartitionStatsInfo) error); ok {
|
||||
r0 = rf(ctx, info)
|
||||
|
@ -734,13 +800,17 @@ func (_c *DataCoordCatalog_DropPartitionStatsInfo_Call) RunAndReturn(run func(co
|
|||
return _c
|
||||
}
|
||||
|
||||
// DropPreImportTask provides a mock function with given fields: taskID
|
||||
func (_m *DataCoordCatalog) DropPreImportTask(taskID int64) error {
|
||||
ret := _m.Called(taskID)
|
||||
// DropPreImportTask provides a mock function with given fields: ctx, taskID
|
||||
func (_m *DataCoordCatalog) DropPreImportTask(ctx context.Context, taskID int64) error {
|
||||
ret := _m.Called(ctx, taskID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropPreImportTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(int64) error); ok {
|
||||
r0 = rf(taskID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, taskID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -754,14 +824,15 @@ type DataCoordCatalog_DropPreImportTask_Call struct {
|
|||
}
|
||||
|
||||
// DropPreImportTask is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - taskID int64
|
||||
func (_e *DataCoordCatalog_Expecter) DropPreImportTask(taskID interface{}) *DataCoordCatalog_DropPreImportTask_Call {
|
||||
return &DataCoordCatalog_DropPreImportTask_Call{Call: _e.mock.On("DropPreImportTask", taskID)}
|
||||
func (_e *DataCoordCatalog_Expecter) DropPreImportTask(ctx interface{}, taskID interface{}) *DataCoordCatalog_DropPreImportTask_Call {
|
||||
return &DataCoordCatalog_DropPreImportTask_Call{Call: _e.mock.On("DropPreImportTask", ctx, taskID)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_DropPreImportTask_Call) Run(run func(taskID int64)) *DataCoordCatalog_DropPreImportTask_Call {
|
||||
func (_c *DataCoordCatalog_DropPreImportTask_Call) Run(run func(ctx context.Context, taskID int64)) *DataCoordCatalog_DropPreImportTask_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(int64))
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -771,7 +842,7 @@ func (_c *DataCoordCatalog_DropPreImportTask_Call) Return(_a0 error) *DataCoordC
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_DropPreImportTask_Call) RunAndReturn(run func(int64) error) *DataCoordCatalog_DropPreImportTask_Call {
|
||||
func (_c *DataCoordCatalog_DropPreImportTask_Call) RunAndReturn(run func(context.Context, int64) error) *DataCoordCatalog_DropPreImportTask_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -780,6 +851,10 @@ func (_c *DataCoordCatalog_DropPreImportTask_Call) RunAndReturn(run func(int64)
|
|||
func (_m *DataCoordCatalog) DropSegment(ctx context.Context, segment *datapb.SegmentInfo) error {
|
||||
ret := _m.Called(ctx, segment)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropSegment")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.SegmentInfo) error); ok {
|
||||
r0 = rf(ctx, segment)
|
||||
|
@ -823,6 +898,10 @@ func (_c *DataCoordCatalog_DropSegment_Call) RunAndReturn(run func(context.Conte
|
|||
func (_m *DataCoordCatalog) DropSegmentIndex(ctx context.Context, collID int64, partID int64, segID int64, buildID int64) error {
|
||||
ret := _m.Called(ctx, collID, partID, segID, buildID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropSegmentIndex")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64, int64, int64) error); ok {
|
||||
r0 = rf(ctx, collID, partID, segID, buildID)
|
||||
|
@ -869,6 +948,10 @@ func (_c *DataCoordCatalog_DropSegmentIndex_Call) RunAndReturn(run func(context.
|
|||
func (_m *DataCoordCatalog) DropStatsTask(ctx context.Context, taskID int64) error {
|
||||
ret := _m.Called(ctx, taskID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropStatsTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, taskID)
|
||||
|
@ -912,6 +995,10 @@ func (_c *DataCoordCatalog_DropStatsTask_Call) RunAndReturn(run func(context.Con
|
|||
func (_m *DataCoordCatalog) GcConfirm(ctx context.Context, collectionID int64, partitionID int64) bool {
|
||||
ret := _m.Called(ctx, collectionID, partitionID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GcConfirm")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64) bool); ok {
|
||||
r0 = rf(ctx, collectionID, partitionID)
|
||||
|
@ -956,6 +1043,10 @@ func (_c *DataCoordCatalog_GcConfirm_Call) RunAndReturn(run func(context.Context
|
|||
func (_m *DataCoordCatalog) GetCurrentPartitionStatsVersion(ctx context.Context, collID int64, partID int64, vChannel string) (int64, error) {
|
||||
ret := _m.Called(ctx, collID, partID, vChannel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetCurrentPartitionStatsVersion")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64, string) (int64, error)); ok {
|
||||
|
@ -1011,6 +1102,10 @@ func (_c *DataCoordCatalog_GetCurrentPartitionStatsVersion_Call) RunAndReturn(ru
|
|||
func (_m *DataCoordCatalog) ListAnalyzeTasks(ctx context.Context) ([]*indexpb.AnalyzeTask, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListAnalyzeTasks")
|
||||
}
|
||||
|
||||
var r0 []*indexpb.AnalyzeTask
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*indexpb.AnalyzeTask, error)); ok {
|
||||
|
@ -1065,6 +1160,10 @@ func (_c *DataCoordCatalog_ListAnalyzeTasks_Call) RunAndReturn(run func(context.
|
|||
func (_m *DataCoordCatalog) ListChannelCheckpoint(ctx context.Context) (map[string]*msgpb.MsgPosition, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListChannelCheckpoint")
|
||||
}
|
||||
|
||||
var r0 map[string]*msgpb.MsgPosition
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) (map[string]*msgpb.MsgPosition, error)); ok {
|
||||
|
@ -1119,6 +1218,10 @@ func (_c *DataCoordCatalog_ListChannelCheckpoint_Call) RunAndReturn(run func(con
|
|||
func (_m *DataCoordCatalog) ListCompactionTask(ctx context.Context) ([]*datapb.CompactionTask, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListCompactionTask")
|
||||
}
|
||||
|
||||
var r0 []*datapb.CompactionTask
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.CompactionTask, error)); ok {
|
||||
|
@ -1169,25 +1272,29 @@ func (_c *DataCoordCatalog_ListCompactionTask_Call) RunAndReturn(run func(contex
|
|||
return _c
|
||||
}
|
||||
|
||||
// ListImportJobs provides a mock function with given fields:
|
||||
func (_m *DataCoordCatalog) ListImportJobs() ([]*datapb.ImportJob, error) {
|
||||
ret := _m.Called()
|
||||
// ListImportJobs provides a mock function with given fields: ctx
|
||||
func (_m *DataCoordCatalog) ListImportJobs(ctx context.Context) ([]*datapb.ImportJob, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListImportJobs")
|
||||
}
|
||||
|
||||
var r0 []*datapb.ImportJob
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() ([]*datapb.ImportJob, error)); ok {
|
||||
return rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.ImportJob, error)); ok {
|
||||
return rf(ctx)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() []*datapb.ImportJob); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) []*datapb.ImportJob); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*datapb.ImportJob)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(ctx)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
@ -1201,13 +1308,14 @@ type DataCoordCatalog_ListImportJobs_Call struct {
|
|||
}
|
||||
|
||||
// ListImportJobs is a helper method to define mock.On call
|
||||
func (_e *DataCoordCatalog_Expecter) ListImportJobs() *DataCoordCatalog_ListImportJobs_Call {
|
||||
return &DataCoordCatalog_ListImportJobs_Call{Call: _e.mock.On("ListImportJobs")}
|
||||
// - ctx context.Context
|
||||
func (_e *DataCoordCatalog_Expecter) ListImportJobs(ctx interface{}) *DataCoordCatalog_ListImportJobs_Call {
|
||||
return &DataCoordCatalog_ListImportJobs_Call{Call: _e.mock.On("ListImportJobs", ctx)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_ListImportJobs_Call) Run(run func()) *DataCoordCatalog_ListImportJobs_Call {
|
||||
func (_c *DataCoordCatalog_ListImportJobs_Call) Run(run func(ctx context.Context)) *DataCoordCatalog_ListImportJobs_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -1217,30 +1325,34 @@ func (_c *DataCoordCatalog_ListImportJobs_Call) Return(_a0 []*datapb.ImportJob,
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_ListImportJobs_Call) RunAndReturn(run func() ([]*datapb.ImportJob, error)) *DataCoordCatalog_ListImportJobs_Call {
|
||||
func (_c *DataCoordCatalog_ListImportJobs_Call) RunAndReturn(run func(context.Context) ([]*datapb.ImportJob, error)) *DataCoordCatalog_ListImportJobs_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListImportTasks provides a mock function with given fields:
|
||||
func (_m *DataCoordCatalog) ListImportTasks() ([]*datapb.ImportTaskV2, error) {
|
||||
ret := _m.Called()
|
||||
// ListImportTasks provides a mock function with given fields: ctx
|
||||
func (_m *DataCoordCatalog) ListImportTasks(ctx context.Context) ([]*datapb.ImportTaskV2, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListImportTasks")
|
||||
}
|
||||
|
||||
var r0 []*datapb.ImportTaskV2
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() ([]*datapb.ImportTaskV2, error)); ok {
|
||||
return rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.ImportTaskV2, error)); ok {
|
||||
return rf(ctx)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() []*datapb.ImportTaskV2); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) []*datapb.ImportTaskV2); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*datapb.ImportTaskV2)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(ctx)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
@ -1254,13 +1366,14 @@ type DataCoordCatalog_ListImportTasks_Call struct {
|
|||
}
|
||||
|
||||
// ListImportTasks is a helper method to define mock.On call
|
||||
func (_e *DataCoordCatalog_Expecter) ListImportTasks() *DataCoordCatalog_ListImportTasks_Call {
|
||||
return &DataCoordCatalog_ListImportTasks_Call{Call: _e.mock.On("ListImportTasks")}
|
||||
// - ctx context.Context
|
||||
func (_e *DataCoordCatalog_Expecter) ListImportTasks(ctx interface{}) *DataCoordCatalog_ListImportTasks_Call {
|
||||
return &DataCoordCatalog_ListImportTasks_Call{Call: _e.mock.On("ListImportTasks", ctx)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_ListImportTasks_Call) Run(run func()) *DataCoordCatalog_ListImportTasks_Call {
|
||||
func (_c *DataCoordCatalog_ListImportTasks_Call) Run(run func(ctx context.Context)) *DataCoordCatalog_ListImportTasks_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -1270,7 +1383,7 @@ func (_c *DataCoordCatalog_ListImportTasks_Call) Return(_a0 []*datapb.ImportTask
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_ListImportTasks_Call) RunAndReturn(run func() ([]*datapb.ImportTaskV2, error)) *DataCoordCatalog_ListImportTasks_Call {
|
||||
func (_c *DataCoordCatalog_ListImportTasks_Call) RunAndReturn(run func(context.Context) ([]*datapb.ImportTaskV2, error)) *DataCoordCatalog_ListImportTasks_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -1279,6 +1392,10 @@ func (_c *DataCoordCatalog_ListImportTasks_Call) RunAndReturn(run func() ([]*dat
|
|||
func (_m *DataCoordCatalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListIndexes")
|
||||
}
|
||||
|
||||
var r0 []*model.Index
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*model.Index, error)); ok {
|
||||
|
@ -1333,6 +1450,10 @@ func (_c *DataCoordCatalog_ListIndexes_Call) RunAndReturn(run func(context.Conte
|
|||
func (_m *DataCoordCatalog) ListPartitionStatsInfos(ctx context.Context) ([]*datapb.PartitionStatsInfo, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListPartitionStatsInfos")
|
||||
}
|
||||
|
||||
var r0 []*datapb.PartitionStatsInfo
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.PartitionStatsInfo, error)); ok {
|
||||
|
@ -1383,25 +1504,29 @@ func (_c *DataCoordCatalog_ListPartitionStatsInfos_Call) RunAndReturn(run func(c
|
|||
return _c
|
||||
}
|
||||
|
||||
// ListPreImportTasks provides a mock function with given fields:
|
||||
func (_m *DataCoordCatalog) ListPreImportTasks() ([]*datapb.PreImportTask, error) {
|
||||
ret := _m.Called()
|
||||
// ListPreImportTasks provides a mock function with given fields: ctx
|
||||
func (_m *DataCoordCatalog) ListPreImportTasks(ctx context.Context) ([]*datapb.PreImportTask, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListPreImportTasks")
|
||||
}
|
||||
|
||||
var r0 []*datapb.PreImportTask
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() ([]*datapb.PreImportTask, error)); ok {
|
||||
return rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.PreImportTask, error)); ok {
|
||||
return rf(ctx)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() []*datapb.PreImportTask); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) []*datapb.PreImportTask); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*datapb.PreImportTask)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(ctx)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
@ -1415,13 +1540,14 @@ type DataCoordCatalog_ListPreImportTasks_Call struct {
|
|||
}
|
||||
|
||||
// ListPreImportTasks is a helper method to define mock.On call
|
||||
func (_e *DataCoordCatalog_Expecter) ListPreImportTasks() *DataCoordCatalog_ListPreImportTasks_Call {
|
||||
return &DataCoordCatalog_ListPreImportTasks_Call{Call: _e.mock.On("ListPreImportTasks")}
|
||||
// - ctx context.Context
|
||||
func (_e *DataCoordCatalog_Expecter) ListPreImportTasks(ctx interface{}) *DataCoordCatalog_ListPreImportTasks_Call {
|
||||
return &DataCoordCatalog_ListPreImportTasks_Call{Call: _e.mock.On("ListPreImportTasks", ctx)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_ListPreImportTasks_Call) Run(run func()) *DataCoordCatalog_ListPreImportTasks_Call {
|
||||
func (_c *DataCoordCatalog_ListPreImportTasks_Call) Run(run func(ctx context.Context)) *DataCoordCatalog_ListPreImportTasks_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -1431,7 +1557,7 @@ func (_c *DataCoordCatalog_ListPreImportTasks_Call) Return(_a0 []*datapb.PreImpo
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_ListPreImportTasks_Call) RunAndReturn(run func() ([]*datapb.PreImportTask, error)) *DataCoordCatalog_ListPreImportTasks_Call {
|
||||
func (_c *DataCoordCatalog_ListPreImportTasks_Call) RunAndReturn(run func(context.Context) ([]*datapb.PreImportTask, error)) *DataCoordCatalog_ListPreImportTasks_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -1440,6 +1566,10 @@ func (_c *DataCoordCatalog_ListPreImportTasks_Call) RunAndReturn(run func() ([]*
|
|||
func (_m *DataCoordCatalog) ListSegmentIndexes(ctx context.Context) ([]*model.SegmentIndex, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListSegmentIndexes")
|
||||
}
|
||||
|
||||
var r0 []*model.SegmentIndex
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*model.SegmentIndex, error)); ok {
|
||||
|
@ -1494,6 +1624,10 @@ func (_c *DataCoordCatalog_ListSegmentIndexes_Call) RunAndReturn(run func(contex
|
|||
func (_m *DataCoordCatalog) ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListSegments")
|
||||
}
|
||||
|
||||
var r0 []*datapb.SegmentInfo
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.SegmentInfo, error)); ok {
|
||||
|
@ -1548,6 +1682,10 @@ func (_c *DataCoordCatalog_ListSegments_Call) RunAndReturn(run func(context.Cont
|
|||
func (_m *DataCoordCatalog) ListStatsTasks(ctx context.Context) ([]*indexpb.StatsTask, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListStatsTasks")
|
||||
}
|
||||
|
||||
var r0 []*indexpb.StatsTask
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) ([]*indexpb.StatsTask, error)); ok {
|
||||
|
@ -1602,6 +1740,10 @@ func (_c *DataCoordCatalog_ListStatsTasks_Call) RunAndReturn(run func(context.Co
|
|||
func (_m *DataCoordCatalog) MarkChannelAdded(ctx context.Context, channel string) error {
|
||||
ret := _m.Called(ctx, channel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MarkChannelAdded")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, channel)
|
||||
|
@ -1645,6 +1787,10 @@ func (_c *DataCoordCatalog_MarkChannelAdded_Call) RunAndReturn(run func(context.
|
|||
func (_m *DataCoordCatalog) MarkChannelDeleted(ctx context.Context, channel string) error {
|
||||
ret := _m.Called(ctx, channel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MarkChannelDeleted")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, channel)
|
||||
|
@ -1688,6 +1834,10 @@ func (_c *DataCoordCatalog_MarkChannelDeleted_Call) RunAndReturn(run func(contex
|
|||
func (_m *DataCoordCatalog) SaveAnalyzeTask(ctx context.Context, task *indexpb.AnalyzeTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveAnalyzeTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *indexpb.AnalyzeTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
|
@ -1731,6 +1881,10 @@ func (_c *DataCoordCatalog_SaveAnalyzeTask_Call) RunAndReturn(run func(context.C
|
|||
func (_m *DataCoordCatalog) SaveChannelCheckpoint(ctx context.Context, vChannel string, pos *msgpb.MsgPosition) error {
|
||||
ret := _m.Called(ctx, vChannel, pos)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveChannelCheckpoint")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, *msgpb.MsgPosition) error); ok {
|
||||
r0 = rf(ctx, vChannel, pos)
|
||||
|
@ -1775,6 +1929,10 @@ func (_c *DataCoordCatalog_SaveChannelCheckpoint_Call) RunAndReturn(run func(con
|
|||
func (_m *DataCoordCatalog) SaveChannelCheckpoints(ctx context.Context, positions []*msgpb.MsgPosition) error {
|
||||
ret := _m.Called(ctx, positions)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveChannelCheckpoints")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []*msgpb.MsgPosition) error); ok {
|
||||
r0 = rf(ctx, positions)
|
||||
|
@ -1818,6 +1976,10 @@ func (_c *DataCoordCatalog_SaveChannelCheckpoints_Call) RunAndReturn(run func(co
|
|||
func (_m *DataCoordCatalog) SaveCompactionTask(ctx context.Context, task *datapb.CompactionTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveCompactionTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.CompactionTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
|
@ -1861,6 +2023,10 @@ func (_c *DataCoordCatalog_SaveCompactionTask_Call) RunAndReturn(run func(contex
|
|||
func (_m *DataCoordCatalog) SaveCurrentPartitionStatsVersion(ctx context.Context, collID int64, partID int64, vChannel string, currentVersion int64) error {
|
||||
ret := _m.Called(ctx, collID, partID, vChannel, currentVersion)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveCurrentPartitionStatsVersion")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, int64, string, int64) error); ok {
|
||||
r0 = rf(ctx, collID, partID, vChannel, currentVersion)
|
||||
|
@ -1907,6 +2073,10 @@ func (_c *DataCoordCatalog_SaveCurrentPartitionStatsVersion_Call) RunAndReturn(r
|
|||
func (_m *DataCoordCatalog) SaveDroppedSegmentsInBatch(ctx context.Context, segments []*datapb.SegmentInfo) error {
|
||||
ret := _m.Called(ctx, segments)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveDroppedSegmentsInBatch")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []*datapb.SegmentInfo) error); ok {
|
||||
r0 = rf(ctx, segments)
|
||||
|
@ -1946,13 +2116,17 @@ func (_c *DataCoordCatalog_SaveDroppedSegmentsInBatch_Call) RunAndReturn(run fun
|
|||
return _c
|
||||
}
|
||||
|
||||
// SaveImportJob provides a mock function with given fields: job
|
||||
func (_m *DataCoordCatalog) SaveImportJob(job *datapb.ImportJob) error {
|
||||
ret := _m.Called(job)
|
||||
// SaveImportJob provides a mock function with given fields: ctx, job
|
||||
func (_m *DataCoordCatalog) SaveImportJob(ctx context.Context, job *datapb.ImportJob) error {
|
||||
ret := _m.Called(ctx, job)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveImportJob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.ImportJob) error); ok {
|
||||
r0 = rf(job)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.ImportJob) error); ok {
|
||||
r0 = rf(ctx, job)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -1966,14 +2140,15 @@ type DataCoordCatalog_SaveImportJob_Call struct {
|
|||
}
|
||||
|
||||
// SaveImportJob is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - job *datapb.ImportJob
|
||||
func (_e *DataCoordCatalog_Expecter) SaveImportJob(job interface{}) *DataCoordCatalog_SaveImportJob_Call {
|
||||
return &DataCoordCatalog_SaveImportJob_Call{Call: _e.mock.On("SaveImportJob", job)}
|
||||
func (_e *DataCoordCatalog_Expecter) SaveImportJob(ctx interface{}, job interface{}) *DataCoordCatalog_SaveImportJob_Call {
|
||||
return &DataCoordCatalog_SaveImportJob_Call{Call: _e.mock.On("SaveImportJob", ctx, job)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_SaveImportJob_Call) Run(run func(job *datapb.ImportJob)) *DataCoordCatalog_SaveImportJob_Call {
|
||||
func (_c *DataCoordCatalog_SaveImportJob_Call) Run(run func(ctx context.Context, job *datapb.ImportJob)) *DataCoordCatalog_SaveImportJob_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.ImportJob))
|
||||
run(args[0].(context.Context), args[1].(*datapb.ImportJob))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -1983,18 +2158,22 @@ func (_c *DataCoordCatalog_SaveImportJob_Call) Return(_a0 error) *DataCoordCatal
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_SaveImportJob_Call) RunAndReturn(run func(*datapb.ImportJob) error) *DataCoordCatalog_SaveImportJob_Call {
|
||||
func (_c *DataCoordCatalog_SaveImportJob_Call) RunAndReturn(run func(context.Context, *datapb.ImportJob) error) *DataCoordCatalog_SaveImportJob_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SaveImportTask provides a mock function with given fields: task
|
||||
func (_m *DataCoordCatalog) SaveImportTask(task *datapb.ImportTaskV2) error {
|
||||
ret := _m.Called(task)
|
||||
// SaveImportTask provides a mock function with given fields: ctx, task
|
||||
func (_m *DataCoordCatalog) SaveImportTask(ctx context.Context, task *datapb.ImportTaskV2) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveImportTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.ImportTaskV2) error); ok {
|
||||
r0 = rf(task)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.ImportTaskV2) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -2008,14 +2187,15 @@ type DataCoordCatalog_SaveImportTask_Call struct {
|
|||
}
|
||||
|
||||
// SaveImportTask is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - task *datapb.ImportTaskV2
|
||||
func (_e *DataCoordCatalog_Expecter) SaveImportTask(task interface{}) *DataCoordCatalog_SaveImportTask_Call {
|
||||
return &DataCoordCatalog_SaveImportTask_Call{Call: _e.mock.On("SaveImportTask", task)}
|
||||
func (_e *DataCoordCatalog_Expecter) SaveImportTask(ctx interface{}, task interface{}) *DataCoordCatalog_SaveImportTask_Call {
|
||||
return &DataCoordCatalog_SaveImportTask_Call{Call: _e.mock.On("SaveImportTask", ctx, task)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_SaveImportTask_Call) Run(run func(task *datapb.ImportTaskV2)) *DataCoordCatalog_SaveImportTask_Call {
|
||||
func (_c *DataCoordCatalog_SaveImportTask_Call) Run(run func(ctx context.Context, task *datapb.ImportTaskV2)) *DataCoordCatalog_SaveImportTask_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.ImportTaskV2))
|
||||
run(args[0].(context.Context), args[1].(*datapb.ImportTaskV2))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -2025,7 +2205,7 @@ func (_c *DataCoordCatalog_SaveImportTask_Call) Return(_a0 error) *DataCoordCata
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_SaveImportTask_Call) RunAndReturn(run func(*datapb.ImportTaskV2) error) *DataCoordCatalog_SaveImportTask_Call {
|
||||
func (_c *DataCoordCatalog_SaveImportTask_Call) RunAndReturn(run func(context.Context, *datapb.ImportTaskV2) error) *DataCoordCatalog_SaveImportTask_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -2034,6 +2214,10 @@ func (_c *DataCoordCatalog_SaveImportTask_Call) RunAndReturn(run func(*datapb.Im
|
|||
func (_m *DataCoordCatalog) SavePartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error {
|
||||
ret := _m.Called(ctx, info)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SavePartitionStatsInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.PartitionStatsInfo) error); ok {
|
||||
r0 = rf(ctx, info)
|
||||
|
@ -2073,13 +2257,17 @@ func (_c *DataCoordCatalog_SavePartitionStatsInfo_Call) RunAndReturn(run func(co
|
|||
return _c
|
||||
}
|
||||
|
||||
// SavePreImportTask provides a mock function with given fields: task
|
||||
func (_m *DataCoordCatalog) SavePreImportTask(task *datapb.PreImportTask) error {
|
||||
ret := _m.Called(task)
|
||||
// SavePreImportTask provides a mock function with given fields: ctx, task
|
||||
func (_m *DataCoordCatalog) SavePreImportTask(ctx context.Context, task *datapb.PreImportTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SavePreImportTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*datapb.PreImportTask) error); ok {
|
||||
r0 = rf(task)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *datapb.PreImportTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
@ -2093,14 +2281,15 @@ type DataCoordCatalog_SavePreImportTask_Call struct {
|
|||
}
|
||||
|
||||
// SavePreImportTask is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - task *datapb.PreImportTask
|
||||
func (_e *DataCoordCatalog_Expecter) SavePreImportTask(task interface{}) *DataCoordCatalog_SavePreImportTask_Call {
|
||||
return &DataCoordCatalog_SavePreImportTask_Call{Call: _e.mock.On("SavePreImportTask", task)}
|
||||
func (_e *DataCoordCatalog_Expecter) SavePreImportTask(ctx interface{}, task interface{}) *DataCoordCatalog_SavePreImportTask_Call {
|
||||
return &DataCoordCatalog_SavePreImportTask_Call{Call: _e.mock.On("SavePreImportTask", ctx, task)}
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_SavePreImportTask_Call) Run(run func(task *datapb.PreImportTask)) *DataCoordCatalog_SavePreImportTask_Call {
|
||||
func (_c *DataCoordCatalog_SavePreImportTask_Call) Run(run func(ctx context.Context, task *datapb.PreImportTask)) *DataCoordCatalog_SavePreImportTask_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*datapb.PreImportTask))
|
||||
run(args[0].(context.Context), args[1].(*datapb.PreImportTask))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -2110,7 +2299,7 @@ func (_c *DataCoordCatalog_SavePreImportTask_Call) Return(_a0 error) *DataCoordC
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *DataCoordCatalog_SavePreImportTask_Call) RunAndReturn(run func(*datapb.PreImportTask) error) *DataCoordCatalog_SavePreImportTask_Call {
|
||||
func (_c *DataCoordCatalog_SavePreImportTask_Call) RunAndReturn(run func(context.Context, *datapb.PreImportTask) error) *DataCoordCatalog_SavePreImportTask_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -2119,6 +2308,10 @@ func (_c *DataCoordCatalog_SavePreImportTask_Call) RunAndReturn(run func(*datapb
|
|||
func (_m *DataCoordCatalog) SaveStatsTask(ctx context.Context, task *indexpb.StatsTask) error {
|
||||
ret := _m.Called(ctx, task)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveStatsTask")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *indexpb.StatsTask) error); ok {
|
||||
r0 = rf(ctx, task)
|
||||
|
@ -2162,6 +2355,10 @@ func (_c *DataCoordCatalog_SaveStatsTask_Call) RunAndReturn(run func(context.Con
|
|||
func (_m *DataCoordCatalog) ShouldDropChannel(ctx context.Context, channel string) bool {
|
||||
ret := _m.Called(ctx, channel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ShouldDropChannel")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok {
|
||||
r0 = rf(ctx, channel)
|
||||
|
|
Loading…
Reference in New Issue