mirror of https://github.com/milvus-io/milvus.git
remove pull target rpc from lock (#26054)
Signed-off-by: Wei Liu <wei.liu@zilliz.com>pull/26130/head
parent
49902f1b37
commit
6f89620a43
internal/querycoordv2
task
|
@ -340,7 +340,6 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalance() {
|
|||
suite.SetupSuite()
|
||||
defer suite.TearDownTest()
|
||||
balancer := suite.balancer
|
||||
collection := utils.CreateTestCollection(1, 1)
|
||||
segments := []*datapb.SegmentInfo{
|
||||
{
|
||||
ID: 1,
|
||||
|
@ -364,16 +363,18 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalance() {
|
|||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, segments, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1)
|
||||
collection := utils.CreateTestCollection(1, 1)
|
||||
collection.LoadPercentage = 100
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
collection.LoadType = querypb.LoadType_LoadCollection
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
balancer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, append(c.nodes, c.notExistedNodes...)))
|
||||
suite.broker.ExpectedCalls = nil
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, segments, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1)
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
suite.mockScheduler.Mock.On("GetNodeChannelDelta", mock.Anything).Return(0)
|
||||
for node, s := range c.distributions {
|
||||
balancer.dist.SegmentDistManager.Update(node, s...)
|
||||
|
@ -562,17 +563,18 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalanceOnPartStopping() {
|
|||
balancer := suite.balancer
|
||||
collection := utils.CreateTestCollection(1, 1)
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, c.segmentInCurrent, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1)
|
||||
collection.LoadPercentage = 100
|
||||
collection.LoadType = querypb.LoadType_LoadCollection
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
balancer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, append(c.nodes, c.notExistedNodes...)))
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, c.segmentInCurrent, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1)
|
||||
suite.broker.ExpectedCalls = nil
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, c.segmentInNext, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
suite.mockScheduler.Mock.On("GetNodeChannelDelta", mock.Anything).Return(0)
|
||||
for node, s := range c.distributions {
|
||||
balancer.dist.SegmentDistManager.Update(node, s...)
|
||||
|
@ -673,15 +675,17 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalanceOutboundNodes() {
|
|||
PartitionID: 1,
|
||||
},
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, segments, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1)
|
||||
|
||||
collection.LoadPercentage = 100
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
collection.LoadType = querypb.LoadType_LoadCollection
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
balancer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, append(c.nodes, c.notExistedNodes...)))
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(nil, segments, nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(1)
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
for node, s := range c.distributions {
|
||||
balancer.dist.SegmentDistManager.Update(node, s...)
|
||||
}
|
||||
|
|
|
@ -296,12 +296,13 @@ func (suite *ScoreBasedBalancerTestSuite) TestBalanceOneRound() {
|
|||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, c.collectionID).Return(
|
||||
nil, c.collectionsSegments, nil)
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, c.collectionID).Return([]int64{c.collectionID}, nil).Maybe()
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(c.collectionID, c.collectionID)
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(c.collectionID, c.collectionID)
|
||||
collection.LoadPercentage = 100
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
balancer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(c.collectionID, c.collectionID))
|
||||
balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(c.replicaID, c.collectionID, c.nodes))
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(c.collectionID)
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(c.collectionID)
|
||||
|
||||
//2. set up target for distribution for multi collections
|
||||
for node, s := range c.distributions {
|
||||
|
@ -401,14 +402,16 @@ func (suite *ScoreBasedBalancerTestSuite) TestBalanceMultiRound() {
|
|||
collections = append(collections, collection)
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, balanceCase.collectionIDs[i]).Return(
|
||||
nil, balanceCase.collectionsSegments[i], nil)
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(balanceCase.collectionIDs[i], balanceCase.collectionIDs[i])
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(balanceCase.collectionIDs[i], balanceCase.collectionIDs[i])
|
||||
|
||||
collection.LoadPercentage = 100
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
collection.LoadType = querypb.LoadType_LoadCollection
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
balancer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(balanceCase.collectionIDs[i], balanceCase.collectionIDs[i]))
|
||||
balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(balanceCase.replicaIDs[i], balanceCase.collectionIDs[i],
|
||||
append(balanceCase.nodes, balanceCase.notExistedNodes...)))
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(balanceCase.collectionIDs[i])
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(balanceCase.collectionIDs[i])
|
||||
}
|
||||
|
||||
//2. set up target for distribution for multi collections
|
||||
|
@ -539,12 +542,13 @@ func (suite *ScoreBasedBalancerTestSuite) TestStoppedBalance() {
|
|||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, c.collectionID).Return(
|
||||
nil, c.collectionsSegments, nil)
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, c.collectionID).Return([]int64{c.collectionID}, nil).Maybe()
|
||||
balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(c.collectionID, c.collectionID)
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(c.collectionID, c.collectionID)
|
||||
collection.LoadPercentage = 100
|
||||
collection.Status = querypb.LoadStatus_Loaded
|
||||
balancer.meta.CollectionManager.PutCollection(collection)
|
||||
balancer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(c.collectionID, c.collectionID))
|
||||
balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(c.replicaID, c.collectionID, c.nodes))
|
||||
balancer.targetMgr.UpdateCollectionNextTarget(c.collectionID)
|
||||
balancer.targetMgr.UpdateCollectionCurrentTarget(c.collectionID)
|
||||
|
||||
//2. set up target for distribution for multi collections
|
||||
for node, s := range c.distributions {
|
||||
|
|
|
@ -105,6 +105,7 @@ func (suite *ChannelCheckerTestSuite) createMockBalancer() balance.Balance {
|
|||
func (suite *ChannelCheckerTestSuite) TestLoadChannel() {
|
||||
checker := suite.checker
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1}))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(1, "localhost"))
|
||||
checker.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 1)
|
||||
|
@ -118,7 +119,7 @@ func (suite *ChannelCheckerTestSuite) TestLoadChannel() {
|
|||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, nil, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
|
||||
tasks := checker.Check(context.TODO())
|
||||
suite.Len(tasks, 1)
|
||||
|
@ -151,6 +152,7 @@ func (suite *ChannelCheckerTestSuite) TestReduceChannel() {
|
|||
func (suite *ChannelCheckerTestSuite) TestRepeatedChannels() {
|
||||
checker := suite.checker
|
||||
err := checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
suite.NoError(err)
|
||||
err = checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
suite.NoError(err)
|
||||
|
@ -170,7 +172,7 @@ func (suite *ChannelCheckerTestSuite) TestRepeatedChannels() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
checker.dist.ChannelDistManager.Update(1, utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 2, "test-insert-channel"))
|
||||
|
||||
|
|
|
@ -87,6 +87,7 @@ func (suite *CheckerControllerSuite) TestBasic() {
|
|||
|
||||
// set meta
|
||||
suite.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
suite.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(1, "localhost"))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(2, "localhost"))
|
||||
|
@ -103,7 +104,7 @@ func (suite *CheckerControllerSuite) TestBasic() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
suite.targetManager.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
suite.targetManager.UpdateCollectionNextTarget(int64(1))
|
||||
|
||||
// set dist
|
||||
suite.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
|
|
@ -106,6 +106,7 @@ func (suite *SegmentCheckerTestSuite) TestLoadSegments() {
|
|||
checker := suite.checker
|
||||
// set meta
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(1, "localhost"))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(2, "localhost"))
|
||||
|
@ -122,7 +123,7 @@ func (suite *SegmentCheckerTestSuite) TestLoadSegments() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
|
||||
// set dist
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
@ -143,6 +144,7 @@ func (suite *SegmentCheckerTestSuite) TestSkipCheckReplica() {
|
|||
checker := suite.checker
|
||||
// set meta
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(1, "localhost"))
|
||||
suite.nodeMgr.Add(session.NewNodeInfo(2, "localhost"))
|
||||
|
@ -159,7 +161,7 @@ func (suite *SegmentCheckerTestSuite) TestSkipCheckReplica() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
|
||||
// set dist
|
||||
checker.dist.ChannelDistManager.Update(1, utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
||||
|
@ -197,6 +199,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseRepeatedSegments() {
|
|||
checker := suite.checker
|
||||
// set meta
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
// set target
|
||||
|
@ -209,7 +212,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseRepeatedSegments() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
|
||||
// set dist
|
||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
@ -241,6 +244,7 @@ func (suite *SegmentCheckerTestSuite) TestSkipReleaseSealedSegments() {
|
|||
partitionID := int64(1)
|
||||
// set meta
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(collectionID, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(collectionID, partitionID))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, collectionID, []int64{1, 2}))
|
||||
|
||||
|
@ -248,7 +252,7 @@ func (suite *SegmentCheckerTestSuite) TestSkipReleaseSealedSegments() {
|
|||
segments := []*datapb.SegmentInfo{}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
nil, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(collectionID, partitionID)
|
||||
checker.targetMgr.UpdateCollectionNextTarget(collectionID)
|
||||
checker.targetMgr.UpdateCollectionCurrentTarget(collectionID)
|
||||
readableVersion := checker.targetMgr.GetCollectionTargetVersion(collectionID, meta.CurrentTarget)
|
||||
|
||||
|
@ -285,6 +289,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseGrowingSegments() {
|
|||
// segment3 is compacted from segment2, and node2 has growing segments 2 and 3. checker should generate
|
||||
// 2 tasks to reduce segment 2 and 3.
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
segments := []*datapb.SegmentInfo{
|
||||
|
@ -303,8 +308,8 @@ func (suite *SegmentCheckerTestSuite) TestReleaseGrowingSegments() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionCurrentTarget(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
checker.targetMgr.UpdateCollectionCurrentTarget(int64(1))
|
||||
|
||||
growingSegments := make(map[int64]*meta.Segment)
|
||||
growingSegments[2] = utils.CreateTestSegment(1, 1, 2, 2, 0, "test-insert-channel")
|
||||
|
@ -349,6 +354,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseGrowingSegments() {
|
|||
func (suite *SegmentCheckerTestSuite) TestSkipReleaseGrowingSegments() {
|
||||
checker := suite.checker
|
||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
checker.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
segments := []*datapb.SegmentInfo{}
|
||||
|
@ -361,8 +367,8 @@ func (suite *SegmentCheckerTestSuite) TestSkipReleaseGrowingSegments() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionCurrentTarget(int64(1), int64(1))
|
||||
checker.targetMgr.UpdateCollectionNextTarget(int64(1))
|
||||
checker.targetMgr.UpdateCollectionCurrentTarget(int64(1))
|
||||
|
||||
growingSegments := make(map[int64]*meta.Segment)
|
||||
growingSegments[2] = utils.CreateTestSegment(1, 1, 2, 2, 0, "test-insert-channel")
|
||||
|
|
|
@ -166,16 +166,7 @@ func (job *LoadCollectionJob) Execute() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// 4. update next target
|
||||
_, err = job.targetObserver.UpdateNextTarget(req.GetCollectionID(), partitionIDs...)
|
||||
if err != nil {
|
||||
msg := "failed to update next target"
|
||||
log.Error(msg, zap.Error(err))
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
job.undo.TargetUpdated = true
|
||||
|
||||
// 5. put collection/partitions meta
|
||||
// 4. put collection/partitions meta
|
||||
partitions := lo.Map(lackPartitionIDs, func(partID int64, _ int) *meta.Partition {
|
||||
return &meta.Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
|
@ -204,10 +195,17 @@ func (job *LoadCollectionJob) Execute() error {
|
|||
log.Error(msg, zap.Error(err))
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
|
||||
eventlog.Record(eventlog.NewRawEvt(eventlog.Level_Info, fmt.Sprintf("Start load collection %d", collection.CollectionID)))
|
||||
|
||||
metrics.QueryCoordNumPartitions.WithLabelValues().Add(float64(len(partitions)))
|
||||
|
||||
// 5. update next target, no need to rollback if pull target failed, target observer will pull target in periodically
|
||||
_, err = job.targetObserver.UpdateNextTarget(req.GetCollectionID())
|
||||
if err != nil {
|
||||
msg := "failed to update next target"
|
||||
log.Warn(msg, zap.Error(err))
|
||||
}
|
||||
job.undo.TargetUpdated = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -341,16 +339,7 @@ func (job *LoadPartitionJob) Execute() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// 4. update next target
|
||||
_, err = job.targetObserver.UpdateNextTarget(req.GetCollectionID(), append(loadedPartitionIDs, lackPartitionIDs...)...)
|
||||
if err != nil {
|
||||
msg := "failed to update next target"
|
||||
log.Error(msg, zap.Error(err))
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
job.undo.TargetUpdated = true
|
||||
|
||||
// 5. put collection/partitions meta
|
||||
// 4. put collection/partitions meta
|
||||
partitions := lo.Map(lackPartitionIDs, func(partID int64, _ int) *meta.Partition {
|
||||
return &meta.Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
|
@ -388,8 +377,16 @@ func (job *LoadPartitionJob) Execute() error {
|
|||
return errors.Wrap(err, msg)
|
||||
}
|
||||
}
|
||||
|
||||
metrics.QueryCoordNumPartitions.WithLabelValues().Add(float64(len(partitions)))
|
||||
|
||||
// 5. update next target, no need to rollback if pull target failed, target observer will pull target in periodically
|
||||
_, err = job.targetObserver.UpdateNextTarget(req.GetCollectionID())
|
||||
if err != nil {
|
||||
msg := "failed to update next target"
|
||||
log.Warn(msg, zap.Error(err))
|
||||
}
|
||||
job.undo.TargetUpdated = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -412,7 +412,7 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() {
|
|||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
suite.assertCollectionLoaded(collection)
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
suite.assertCollectionLoaded(collection)
|
||||
}
|
||||
|
||||
|
@ -809,7 +809,7 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() {
|
|||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||
suite.assertCollectionLoaded(collection)
|
||||
}
|
||||
|
||||
|
|
|
@ -482,10 +482,6 @@ func (m *CollectionManager) UpdateLoadPercent(partitionID int64, loadPercent int
|
|||
return 0, merr.WrapErrPartitionNotFound(partitionID)
|
||||
}
|
||||
|
||||
if loadPercent <= oldPartition.LoadPercentage {
|
||||
return m.calculateLoadPercentage(oldPartition.GetCollectionID()), nil
|
||||
}
|
||||
|
||||
// update partition load percentage
|
||||
newPartition := oldPartition.Clone()
|
||||
newPartition.LoadPercentage = loadPercent
|
||||
|
@ -508,9 +504,6 @@ func (m *CollectionManager) UpdateLoadPercent(partitionID int64, loadPercent int
|
|||
return 0, merr.WrapErrCollectionNotFound(newPartition.CollectionID)
|
||||
}
|
||||
collectionPercent := m.calculateLoadPercentage(oldCollection.CollectionID)
|
||||
if collectionPercent <= oldCollection.LoadPercentage {
|
||||
return m.calculateLoadPercentage(oldPartition.GetCollectionID()), nil
|
||||
}
|
||||
newCollection := oldCollection.Clone()
|
||||
newCollection.LoadPercentage = collectionPercent
|
||||
saveCollection := false
|
||||
|
|
|
@ -433,15 +433,6 @@ func (suite *CollectionManagerSuite) TestUpdateLoadPercentage() {
|
|||
collection := mgr.GetCollection(1)
|
||||
suite.Equal(int32(15), collection.LoadPercentage)
|
||||
suite.Equal(querypb.LoadStatus_Loading, collection.Status)
|
||||
// expect nothing changed
|
||||
mgr.UpdateLoadPercent(1, 15)
|
||||
partition = mgr.GetPartition(1)
|
||||
suite.Equal(int32(30), partition.LoadPercentage)
|
||||
suite.Equal(querypb.LoadStatus_Loading, partition.Status)
|
||||
collection = mgr.GetCollection(1)
|
||||
suite.Equal(int32(15), collection.LoadPercentage)
|
||||
suite.Equal(querypb.LoadStatus_Loading, collection.Status)
|
||||
suite.Equal(querypb.LoadStatus_Loading, mgr.CalculateLoadStatus(collection.CollectionID))
|
||||
// test update partition load percentage to 100
|
||||
mgr.UpdateLoadPercent(1, 100)
|
||||
partition = mgr.GetPartition(1)
|
||||
|
|
|
@ -75,6 +75,10 @@ func newTarget() *target {
|
|||
}
|
||||
|
||||
func (t *target) updateCollectionTarget(collectionID int64, target *CollectionTarget) {
|
||||
if t.collectionTargetMap[collectionID] != nil && target.GetTargetVersion() <= t.collectionTargetMap[collectionID].GetTargetVersion() {
|
||||
return
|
||||
}
|
||||
|
||||
t.collectionTargetMap[collectionID] = target
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
|
@ -62,11 +61,10 @@ func NewTargetManager(broker Broker, meta *Meta) *TargetManager {
|
|||
// UpdateCollectionCurrentTarget updates the current target to next target,
|
||||
// WARN: DO NOT call this method for an existing collection as target observer running, or it will lead to a double-update,
|
||||
// which may make the current target not available
|
||||
func (mgr *TargetManager) UpdateCollectionCurrentTarget(collectionID int64, partitionIDs ...int64) bool {
|
||||
func (mgr *TargetManager) UpdateCollectionCurrentTarget(collectionID int64) bool {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
log := log.With(zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("PartitionIDs", partitionIDs))
|
||||
log := log.With(zap.Int64("collectionID", collectionID))
|
||||
|
||||
log.Debug("start to update current target for collection")
|
||||
|
||||
|
@ -86,72 +84,55 @@ func (mgr *TargetManager) UpdateCollectionCurrentTarget(collectionID int64, part
|
|||
return true
|
||||
}
|
||||
|
||||
// UpdateCollectionNextTargetWithPartitions pulls next target from DataCoord,
|
||||
// WARN: DO NOT call this method for an existing collection as target observer running, or it will lead to a double-update,
|
||||
// which may make the current target not available
|
||||
func (mgr *TargetManager) UpdateCollectionNextTargetWithPartitions(collectionID int64, partitionIDs ...int64) error {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
|
||||
if len(partitionIDs) == 0 {
|
||||
msg := "failed to update collection next target, due to no partition specified"
|
||||
log.Warn(msg,
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("partitionIDs", partitionIDs))
|
||||
return errors.New(msg)
|
||||
}
|
||||
|
||||
return mgr.updateCollectionNextTarget(collectionID, partitionIDs...)
|
||||
}
|
||||
|
||||
// UpdateCollectionNextTarget updates the next target with new target pulled from DataCoord,
|
||||
// WARN: DO NOT call this method for an existing collection as target observer running, or it will lead to a double-update,
|
||||
// which may make the current target not available
|
||||
func (mgr *TargetManager) UpdateCollectionNextTarget(collectionID int64) error {
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
|
||||
partitions := mgr.meta.GetPartitionsByCollection(collectionID)
|
||||
partitionIDs := lo.Map(partitions, func(partition *Partition, i int) int64 {
|
||||
return partition.PartitionID
|
||||
})
|
||||
allocatedTarget := NewCollectionTarget(nil, nil)
|
||||
mgr.rwMutex.Unlock()
|
||||
|
||||
return mgr.updateCollectionNextTarget(collectionID, partitionIDs...)
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) updateCollectionNextTarget(collectionID int64, partitionIDs ...int64) error {
|
||||
log := log.With(zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("PartitionIDs", partitionIDs))
|
||||
|
||||
log.Debug("start to update next targets for collection")
|
||||
newTarget, err := mgr.PullNextTarget(mgr.broker, collectionID, partitionIDs...)
|
||||
segments, channels, err := mgr.PullNextTargetV2(mgr.broker, collectionID, partitionIDs...)
|
||||
if err != nil {
|
||||
log.Error("failed to get next targets for collection",
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
mgr.next.updateCollectionTarget(collectionID, newTarget)
|
||||
if len(segments) == 0 && len(channels) == 0 {
|
||||
log.Debug("skip empty next targets for collection")
|
||||
return nil
|
||||
}
|
||||
allocatedTarget.segments = segments
|
||||
allocatedTarget.dmChannels = channels
|
||||
|
||||
mgr.rwMutex.Lock()
|
||||
defer mgr.rwMutex.Unlock()
|
||||
mgr.next.updateCollectionTarget(collectionID, allocatedTarget)
|
||||
log.Debug("finish to update next targets for collection",
|
||||
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||
zap.Int64s("segments", allocatedTarget.GetAllSegmentIDs()),
|
||||
zap.Strings("channels", allocatedTarget.GetAllDmChannelNames()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) PullNextTargetV1(broker Broker, collectionID int64, chosenPartitionIDs ...int64) (*CollectionTarget, error) {
|
||||
func (mgr *TargetManager) PullNextTargetV1(broker Broker, collectionID int64, chosenPartitionIDs ...int64) (map[int64]*datapb.SegmentInfo, map[string]*DmChannel, error) {
|
||||
if len(chosenPartitionIDs) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
channelInfos := make(map[string][]*datapb.VchannelInfo)
|
||||
segments := make(map[int64]*datapb.SegmentInfo, 0)
|
||||
dmChannels := make(map[string]*DmChannel)
|
||||
|
||||
if len(chosenPartitionIDs) == 0 {
|
||||
return NewCollectionTarget(segments, dmChannels), nil
|
||||
}
|
||||
|
||||
fullPartitions, err := broker.GetPartitions(context.Background(), collectionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// we should pull `channel targets` from all partitions because QueryNodes need to load
|
||||
|
@ -162,7 +143,7 @@ func (mgr *TargetManager) PullNextTargetV1(broker Broker, collectionID int64, ch
|
|||
zap.Int64("partitionID", partitionID))
|
||||
vChannelInfos, binlogs, err := broker.GetRecoveryInfo(context.TODO(), collectionID, partitionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, info := range vChannelInfos {
|
||||
channelInfos[info.GetChannelName()] = append(channelInfos[info.GetChannelName()], info)
|
||||
|
@ -188,23 +169,23 @@ func (mgr *TargetManager) PullNextTargetV1(broker Broker, collectionID int64, ch
|
|||
merged := mgr.mergeDmChannelInfo(infos)
|
||||
dmChannels[merged.GetChannelName()] = merged
|
||||
}
|
||||
return NewCollectionTarget(segments, dmChannels), nil
|
||||
|
||||
return segments, dmChannels, nil
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, chosenPartitionIDs ...int64) (*CollectionTarget, error) {
|
||||
func (mgr *TargetManager) PullNextTargetV2(broker Broker, collectionID int64, chosenPartitionIDs ...int64) (map[int64]*datapb.SegmentInfo, map[string]*DmChannel, error) {
|
||||
log.Debug("start to pull next targets for collection",
|
||||
zap.Int64("collectionID", collectionID),
|
||||
zap.Int64s("chosenPartitionIDs", chosenPartitionIDs))
|
||||
|
||||
if len(chosenPartitionIDs) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
channelInfos := make(map[string][]*datapb.VchannelInfo)
|
||||
segments := make(map[int64]*datapb.SegmentInfo, 0)
|
||||
dmChannels := make(map[string]*DmChannel)
|
||||
|
||||
if len(chosenPartitionIDs) == 0 {
|
||||
return NewCollectionTarget(segments, dmChannels), nil
|
||||
}
|
||||
|
||||
var target *CollectionTarget
|
||||
getRecoveryInfo := func() error {
|
||||
var err error
|
||||
|
||||
|
@ -212,9 +193,10 @@ func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, chos
|
|||
if err != nil {
|
||||
// if meet rpc error, for compatibility with previous versions, try pull next target v1
|
||||
if funcutil.IsGrpcErr(err, codes.Unimplemented) {
|
||||
target, err = mgr.PullNextTargetV1(broker, collectionID, chosenPartitionIDs...)
|
||||
segments, dmChannels, err = mgr.PullNextTargetV1(broker, collectionID, chosenPartitionIDs...)
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -233,16 +215,15 @@ func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, chos
|
|||
merged := mgr.mergeDmChannelInfo(infos)
|
||||
dmChannels[merged.GetChannelName()] = merged
|
||||
}
|
||||
target = NewCollectionTarget(segments, dmChannels)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := retry.Do(context.TODO(), getRecoveryInfo, retry.Attempts(10))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return target, nil
|
||||
return segments, dmChannels, nil
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) mergeDmChannelInfo(infos []*datapb.VchannelInfo) *DmChannel {
|
||||
|
|
|
@ -134,7 +134,21 @@ func (suite *TargetManagerSuite) SetupTest() {
|
|||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collection).Return(dmChannels, allSegments, nil)
|
||||
|
||||
suite.mgr.UpdateCollectionNextTargetWithPartitions(collection, suite.partitions[collection]...)
|
||||
suite.meta.PutCollection(&Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: collection,
|
||||
ReplicaNumber: 1},
|
||||
})
|
||||
for _, partition := range suite.partitions[collection] {
|
||||
suite.meta.PutPartition(&Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
CollectionID: collection,
|
||||
PartitionID: partition,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
suite.mgr.UpdateCollectionNextTarget(collection)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,7 +227,7 @@ func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
|||
}
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1))
|
||||
suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.assertSegments([]int64{11, 12}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{"channel-1", "channel-2"}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
|
@ -224,19 +238,16 @@ func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
|||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nil, nil, status.Errorf(codes.Unimplemented, "fake not found"))
|
||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{1}, nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collectionID, int64(1)).Return(nextTargetChannels, nextTargetBinlogs, nil)
|
||||
err := suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1))
|
||||
err := suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.broker.ExpectedCalls = nil
|
||||
// test getRecoveryInfoV2 failed , then retry getRecoveryInfoV2 succeed
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nil, nil, errors.New("fake error")).Times(1)
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
err = suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1))
|
||||
err = suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.NoError(err)
|
||||
|
||||
err = suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID)
|
||||
suite.Error(err)
|
||||
|
||||
err = suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.NoError(err)
|
||||
|
||||
|
@ -333,9 +344,9 @@ func (suite *TargetManagerSuite) TestGetCollectionTargetVersion() {
|
|||
suite.True(t1 <= version)
|
||||
suite.True(t2 >= version)
|
||||
|
||||
collectionID := int64(1)
|
||||
collectionID := suite.collections[0]
|
||||
t3 := time.Now().UnixNano()
|
||||
suite.mgr.updateCollectionNextTarget(collectionID)
|
||||
suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
t4 := time.Now().UnixNano()
|
||||
|
||||
collectionVersion := suite.mgr.GetCollectionTargetVersion(collectionID, NextTarget)
|
||||
|
@ -390,7 +401,7 @@ func (suite *TargetManagerSuite) TestGetSegmentByChannel() {
|
|||
}
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1))
|
||||
suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.Len(suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget), 2)
|
||||
suite.Len(suite.mgr.GetHistoricalSegmentsByChannel(collectionID, "channel-1", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetHistoricalSegmentsByChannel(collectionID, "channel-2", NextTarget), 1)
|
||||
|
|
|
@ -174,40 +174,41 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti
|
|||
|
||||
segmentTargets := ob.targetMgr.GetHistoricalSegmentsByPartition(partition.GetCollectionID(), partition.GetPartitionID(), meta.NextTarget)
|
||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(partition.GetCollectionID(), meta.NextTarget)
|
||||
|
||||
targetNum := len(segmentTargets) + len(channelTargets)
|
||||
if targetNum == 0 {
|
||||
log.Info("segments and channels in target are both empty, waiting for new target content")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("partition targets",
|
||||
zap.Int("segmentTargetNum", len(segmentTargets)),
|
||||
zap.Int("channelTargetNum", len(channelTargets)),
|
||||
zap.Int("totalTargetNum", targetNum),
|
||||
zap.Int32("replicaNum", replicaNum),
|
||||
)
|
||||
|
||||
loadedCount := 0
|
||||
loadPercentage := int32(0)
|
||||
if targetNum == 0 {
|
||||
log.Info("No segment/channel in target need to be loaded!")
|
||||
loadPercentage = 100
|
||||
} else {
|
||||
for _, channel := range channelTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
subChannelCount := loadedCount
|
||||
for _, segment := range segmentTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
if loadedCount > 0 {
|
||||
log.Info("partition load progress",
|
||||
zap.Int("subChannelCount", subChannelCount),
|
||||
zap.Int("loadSegmentCount", loadedCount-subChannelCount))
|
||||
}
|
||||
loadPercentage = int32(loadedCount * 100 / (targetNum * int(replicaNum)))
|
||||
|
||||
for _, channel := range channelTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
subChannelCount := loadedCount
|
||||
for _, segment := range segmentTargets {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
partition.GetCollectionID(),
|
||||
ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
loadedCount += len(group)
|
||||
}
|
||||
if loadedCount > 0 {
|
||||
log.Info("partition load progress",
|
||||
zap.Int("subChannelCount", subChannelCount),
|
||||
zap.Int("loadSegmentCount", loadedCount-subChannelCount))
|
||||
}
|
||||
loadPercentage = int32(loadedCount * 100 / (targetNum * int(replicaNum)))
|
||||
|
||||
if loadedCount <= ob.partitionLoadedCount[partition.GetPartitionID()] && loadPercentage != 100 {
|
||||
ob.partitionLoadedCount[partition.GetPartitionID()] = loadedCount
|
||||
|
|
|
@ -250,34 +250,34 @@ func (suite *CollectionObserverSuite) TestObserve() {
|
|||
Channel: "100-dmc1",
|
||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
suite.dist.LeaderViewManager.Update(3, &meta.LeaderView{
|
||||
view1 := &meta.LeaderView{
|
||||
ID: 3,
|
||||
CollectionID: 102,
|
||||
Channel: "102-dmc0",
|
||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 5, Version: 0}},
|
||||
})
|
||||
}
|
||||
|
||||
segmentsInfo, ok := suite.segments[103]
|
||||
suite.True(ok)
|
||||
lview := &meta.LeaderView{
|
||||
view2 := &meta.LeaderView{
|
||||
ID: 3,
|
||||
CollectionID: 13,
|
||||
Channel: "103-dmc0",
|
||||
Segments: make(map[int64]*querypb.SegmentDist),
|
||||
}
|
||||
for _, segment := range segmentsInfo {
|
||||
lview.Segments[segment.GetID()] = &querypb.SegmentDist{
|
||||
view2.Segments[segment.GetID()] = &querypb.SegmentDist{
|
||||
NodeID: 3, Version: 0,
|
||||
}
|
||||
}
|
||||
suite.dist.LeaderViewManager.Update(3, lview)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.isCollectionLoaded(suite.collections[0])
|
||||
}, timeout*2, timeout/10)
|
||||
suite.dist.LeaderViewManager.Update(3, view1, view2)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.isCollectionLoadedContinue(suite.collections[2], time)
|
||||
}, timeout-1, timeout/10)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.isCollectionLoaded(suite.collections[0])
|
||||
}, timeout*2, timeout/10)
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
|
@ -296,11 +296,18 @@ func (suite *CollectionObserverSuite) TestObservePartition() {
|
|||
paramtable.Get().Save(Params.QueryCoordCfg.LoadTimeoutSeconds.Key, "3")
|
||||
|
||||
// Partition 10 loaded
|
||||
// Partition 11 timeout
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: 100,
|
||||
Channel: "100-dmc0",
|
||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}},
|
||||
}, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: 101,
|
||||
Channel: "",
|
||||
|
||||
Segments: map[int64]*querypb.SegmentDist{},
|
||||
})
|
||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
||||
ID: 2,
|
||||
|
@ -308,13 +315,6 @@ func (suite *CollectionObserverSuite) TestObservePartition() {
|
|||
Channel: "100-dmc1",
|
||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}},
|
||||
})
|
||||
// Partition 11 timeout
|
||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
||||
ID: 1,
|
||||
CollectionID: 101,
|
||||
Channel: "",
|
||||
Segments: map[int64]*querypb.SegmentDist{},
|
||||
})
|
||||
|
||||
suite.Eventually(func() bool {
|
||||
return suite.isPartitionLoaded(suite.partitions[100][0])
|
||||
|
@ -386,7 +386,9 @@ func (suite *CollectionObserverSuite) loadAll() {
|
|||
suite.load(collection)
|
||||
}
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(suite.collections[0])
|
||||
suite.targetMgr.UpdateCollectionNextTarget(suite.collections[0])
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(suite.collections[2])
|
||||
suite.targetMgr.UpdateCollectionNextTarget(suite.collections[2])
|
||||
}
|
||||
|
||||
func (suite *CollectionObserverSuite) load(collection int64) {
|
||||
|
@ -440,9 +442,8 @@ func (suite *CollectionObserverSuite) load(collection int64) {
|
|||
})
|
||||
}
|
||||
|
||||
partitions := suite.partitions[collection]
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collection).Return(dmChannels, allSegments, nil)
|
||||
suite.targetMgr.UpdateCollectionNextTargetWithPartitions(collection, partitions...)
|
||||
suite.targetMgr.UpdateCollectionNextTarget(collection)
|
||||
}
|
||||
|
||||
func TestCollectionObserver(t *testing.T) {
|
||||
|
|
|
@ -90,6 +90,7 @@ func (suite *LeaderObserverTestSuite) TearDownTest() {
|
|||
func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() {
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
segments := []*datapb.SegmentInfo{
|
||||
{
|
||||
|
@ -119,7 +120,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() {
|
|||
&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{info}}, nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionNextTarget(int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 2, 1, "test-insert-channel"))
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
@ -149,7 +150,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() {
|
|||
Schema: schema,
|
||||
LoadMeta: &querypb.LoadMetaInfo{
|
||||
CollectionID: 1,
|
||||
PartitionIDs: []int64{},
|
||||
PartitionIDs: []int64{1},
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
|
@ -179,6 +180,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() {
|
|||
func (suite *LeaderObserverTestSuite) TestIgnoreSyncLoadedSegments() {
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
segments := []*datapb.SegmentInfo{
|
||||
{
|
||||
|
@ -208,7 +210,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncLoadedSegments() {
|
|||
&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{info}}, nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionNextTarget(int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 2, 1, "test-insert-channel"),
|
||||
utils.CreateTestSegment(1, 1, 2, 2, 1, "test-insert-channel"))
|
||||
|
@ -239,7 +241,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncLoadedSegments() {
|
|||
Schema: schema,
|
||||
LoadMeta: &querypb.LoadMetaInfo{
|
||||
CollectionID: 1,
|
||||
PartitionIDs: []int64{},
|
||||
PartitionIDs: []int64{1},
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
|
@ -267,6 +269,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncLoadedSegments() {
|
|||
func (suite *LeaderObserverTestSuite) TestIgnoreBalancedSegment() {
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
segments := []*datapb.SegmentInfo{
|
||||
{
|
||||
|
@ -284,7 +287,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreBalancedSegment() {
|
|||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionNextTarget(int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
@ -307,6 +310,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreBalancedSegment() {
|
|||
func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() {
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 2))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(2, 1, []int64{3, 4}))
|
||||
segments := []*datapb.SegmentInfo{
|
||||
|
@ -337,7 +341,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() {
|
|||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
suite.broker.EXPECT().GetCollectionSchema(mock.Anything, int64(1)).Return(schema, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionNextTarget(int64(1))
|
||||
observer.target.UpdateCollectionCurrentTarget(1)
|
||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||
observer.dist.SegmentDistManager.Update(4, utils.CreateTestSegment(1, 1, 1, 4, 2, "test-insert-channel"))
|
||||
|
@ -371,7 +375,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() {
|
|||
Schema: schema,
|
||||
LoadMeta: &querypb.LoadMetaInfo{
|
||||
CollectionID: 1,
|
||||
PartitionIDs: []int64{},
|
||||
PartitionIDs: []int64{1},
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
|
@ -400,6 +404,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() {
|
|||
func (suite *LeaderObserverTestSuite) TestSyncRemovedSegments() {
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
|
@ -426,7 +431,7 @@ func (suite *LeaderObserverTestSuite) TestSyncRemovedSegments() {
|
|||
Schema: schema,
|
||||
LoadMeta: &querypb.LoadMetaInfo{
|
||||
CollectionID: 1,
|
||||
PartitionIDs: []int64{},
|
||||
PartitionIDs: []int64{1},
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
|
@ -453,6 +458,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncRemovedSegments() {
|
|||
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(1, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||
|
||||
segments := []*datapb.SegmentInfo{
|
||||
|
@ -472,7 +478,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncRemovedSegments() {
|
|||
suite.broker.EXPECT().GetCollectionSchema(mock.Anything, int64(1)).Return(schema, nil)
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, int64(1)).Return(
|
||||
channels, segments, nil)
|
||||
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||
observer.target.UpdateCollectionNextTarget(int64(1))
|
||||
|
||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2, 2: 2}, map[int64]*meta.Segment{}))
|
||||
|
@ -495,7 +501,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncRemovedSegments() {
|
|||
Schema: schema,
|
||||
LoadMeta: &querypb.LoadMetaInfo{
|
||||
CollectionID: 1,
|
||||
PartitionIDs: []int64{},
|
||||
PartitionIDs: []int64{1},
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
|
@ -523,6 +529,7 @@ func (suite *LeaderObserverTestSuite) TestSyncTargetVersion() {
|
|||
|
||||
observer := suite.observer
|
||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(collectionID, 1))
|
||||
observer.meta.CollectionManager.PutPartition(utils.CreateTestPartition(collectionID, 1))
|
||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, collectionID, []int64{1, 2}))
|
||||
|
||||
nextTargetChannels := []*datapb.VchannelInfo{
|
||||
|
@ -552,7 +559,7 @@ func (suite *LeaderObserverTestSuite) TestSyncTargetVersion() {
|
|||
}
|
||||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
suite.observer.target.UpdateCollectionNextTargetWithPartitions(collectionID, 1)
|
||||
suite.observer.target.UpdateCollectionNextTarget(collectionID)
|
||||
suite.observer.target.UpdateCollectionCurrentTarget(collectionID)
|
||||
TargetVersion := suite.observer.target.GetCollectionTargetVersion(collectionID, meta.CurrentTarget)
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ type checkRequest struct {
|
|||
|
||||
type targetUpdateRequest struct {
|
||||
CollectionID int64
|
||||
PartitionIDs []int64
|
||||
Notifier chan error
|
||||
ReadyNotifier chan struct{}
|
||||
}
|
||||
|
@ -109,7 +108,7 @@ func (ob *TargetObserver) schedule(ctx context.Context) {
|
|||
req.Notifier <- ob.targetMgr.IsCurrentTargetExist(req.CollectionID)
|
||||
|
||||
case req := <-ob.updateChan:
|
||||
err := ob.updateNextTarget(req.CollectionID, req.PartitionIDs...)
|
||||
err := ob.updateNextTarget(req.CollectionID)
|
||||
if err != nil {
|
||||
close(req.ReadyNotifier)
|
||||
} else {
|
||||
|
@ -154,14 +153,13 @@ func (ob *TargetObserver) check(collectionID int64) {
|
|||
// UpdateNextTarget updates the next target,
|
||||
// returns a channel which will be closed when the next target is ready,
|
||||
// or returns error if failed to pull target
|
||||
func (ob *TargetObserver) UpdateNextTarget(collectionID int64, partitionIDs ...int64) (chan struct{}, error) {
|
||||
func (ob *TargetObserver) UpdateNextTarget(collectionID int64) (chan struct{}, error) {
|
||||
notifier := make(chan error)
|
||||
readyCh := make(chan struct{})
|
||||
defer close(notifier)
|
||||
|
||||
ob.updateChan <- targetUpdateRequest{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
Notifier: notifier,
|
||||
ReadyNotifier: readyCh,
|
||||
}
|
||||
|
@ -215,16 +213,11 @@ func (ob *TargetObserver) isNextTargetExpired(collectionID int64) bool {
|
|||
return time.Since(ob.nextTargetLastUpdate[collectionID]) > params.Params.QueryCoordCfg.NextTargetSurviveTime.GetAsDuration(time.Second)
|
||||
}
|
||||
|
||||
func (ob *TargetObserver) updateNextTarget(collectionID int64, partitionIDs ...int64) error {
|
||||
log := log.With(zap.Int64("collectionID", collectionID), zap.Int64s("partIDs", partitionIDs))
|
||||
func (ob *TargetObserver) updateNextTarget(collectionID int64) error {
|
||||
log := log.With(zap.Int64("collectionID", collectionID))
|
||||
|
||||
log.Info("observer trigger update next target")
|
||||
var err error
|
||||
if len(partitionIDs) == 0 {
|
||||
err = ob.targetMgr.UpdateCollectionNextTarget(collectionID)
|
||||
} else {
|
||||
err = ob.targetMgr.UpdateCollectionNextTargetWithPartitions(collectionID, partitionIDs...)
|
||||
}
|
||||
err := ob.targetMgr.UpdateCollectionNextTarget(collectionID)
|
||||
if err != nil {
|
||||
log.Error("failed to update next target for collection",
|
||||
zap.Error(err))
|
||||
|
|
|
@ -159,6 +159,7 @@ func (suite *TaskSuite) BeforeTest(suiteName, testName string) {
|
|||
|
||||
switch testName {
|
||||
case "TestSubscribeChannelTask",
|
||||
"TestUnsubscribeChannelTask",
|
||||
"TestLoadSegmentTask",
|
||||
"TestLoadSegmentTaskNotIndex",
|
||||
"TestLoadSegmentTaskFailed",
|
||||
|
@ -175,6 +176,12 @@ func (suite *TaskSuite) BeforeTest(suiteName, testName string) {
|
|||
Status: querypb.LoadStatus_Loading,
|
||||
},
|
||||
})
|
||||
suite.meta.PutPartition(&meta.Partition{
|
||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||
CollectionID: suite.collection,
|
||||
PartitionID: 1,
|
||||
},
|
||||
})
|
||||
suite.meta.ReplicaManager.Put(
|
||||
utils.CreateTestReplica(suite.replica, suite.collection, []int64{1, 2, 3}))
|
||||
}
|
||||
|
@ -242,7 +249,7 @@ func (suite *TaskSuite) TestSubscribeChannelTask() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(dmChannels, nil, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
suite.AssertTaskNum(0, len(suite.subChannels), len(suite.subChannels), 0)
|
||||
|
||||
// Process tasks
|
||||
|
@ -337,7 +344,7 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(dmChannels, nil, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
|
||||
// Only first channel exists
|
||||
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
||||
|
@ -424,7 +431,7 @@ func (suite *TaskSuite) TestLoadSegmentTask() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -518,7 +525,7 @@ func (suite *TaskSuite) TestLoadSegmentTaskNotIndex() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -606,7 +613,7 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -820,8 +827,8 @@ func (suite *TaskSuite) TestMoveSegmentTask() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return([]*datapb.VchannelInfo{vchannel}, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionCurrentTarget(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
suite.target.UpdateCollectionCurrentTarget(suite.collection)
|
||||
suite.dist.SegmentDistManager.Update(sourceNode, segments...)
|
||||
suite.dist.LeaderViewManager.Update(leader, view)
|
||||
segmentsNum := len(suite.moveSegments)
|
||||
|
@ -918,7 +925,8 @@ func (suite *TaskSuite) TestTaskCanceled() {
|
|||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segmentInfos, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, partition)
|
||||
suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(suite.collection, partition))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
|
||||
// Process tasks
|
||||
suite.dispatchAndWait(targetNode)
|
||||
|
@ -1001,7 +1009,8 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(suite.collection, partition))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
@ -1036,7 +1045,9 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||
suite.broker.AssertExpectations(suite.T())
|
||||
suite.broker.ExpectedCalls = suite.broker.ExpectedCalls[:0]
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(2))
|
||||
|
||||
suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(suite.collection, 2))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
suite.dispatchAndWait(targetNode)
|
||||
suite.AssertTaskNum(0, 0, 0, 0)
|
||||
|
||||
|
@ -1240,7 +1251,7 @@ func (suite *TaskSuite) TestNoExecutor() {
|
|||
suite.NoError(err)
|
||||
}
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, suite.collection).Return(nil, segments, nil)
|
||||
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||
suite.target.UpdateCollectionNextTarget(suite.collection)
|
||||
segmentsNum := len(suite.loadSegments)
|
||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||
|
||||
|
|
Loading…
Reference in New Issue