mirror of https://github.com/milvus-io/milvus.git
refine target manager code style (#27883)
Signed-off-by: Wei Liu <wei.liu@zilliz.com>pull/25642/head
parent
4640928280
commit
e0222b2ce3
|
@ -97,8 +97,8 @@ func (b *RowCountBasedBalancer) BalanceReplica(replica *meta.Replica) ([]Segment
|
|||
segments := b.dist.SegmentDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nid)
|
||||
// Only balance segments in targets
|
||||
segments = lo.Filter(segments, func(segment *meta.Segment, _ int) bool {
|
||||
return b.targetMgr.GetHistoricalSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil &&
|
||||
b.targetMgr.GetHistoricalSegment(segment.GetCollectionID(), segment.GetID(), meta.NextTarget) != nil
|
||||
return b.targetMgr.GetSealedSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil &&
|
||||
b.targetMgr.GetSealedSegment(segment.GetCollectionID(), segment.GetID(), meta.NextTarget) != nil
|
||||
})
|
||||
|
||||
if isStopping, err := b.nodeManager.IsStoppingNode(nid); err != nil {
|
||||
|
|
|
@ -122,7 +122,7 @@ func (b *ScoreBasedBalancer) BalanceReplica(replica *meta.Replica) ([]SegmentAss
|
|||
segments := b.dist.SegmentDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nid)
|
||||
// Only balance segments in targets
|
||||
segments = lo.Filter(segments, func(segment *meta.Segment, _ int) bool {
|
||||
return b.targetMgr.GetHistoricalSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil
|
||||
return b.targetMgr.GetSealedSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil
|
||||
})
|
||||
|
||||
if isStopping, err := b.nodeManager.IsStoppingNode(nid); err != nil {
|
||||
|
|
|
@ -82,7 +82,7 @@ func (c *IndexChecker) checkReplica(ctx context.Context, collection *meta.Collec
|
|||
)
|
||||
var tasks []task.Task
|
||||
|
||||
segments := c.getHistoricalSegmentsDist(replica)
|
||||
segments := c.getSealedSegmentsDist(replica)
|
||||
idSegments := make(map[int64]*meta.Segment)
|
||||
|
||||
targets := make(map[int64][]int64) // segmentID => FieldID
|
||||
|
@ -133,7 +133,7 @@ func (c *IndexChecker) checkSegment(ctx context.Context, segment *meta.Segment,
|
|||
return result
|
||||
}
|
||||
|
||||
func (c *IndexChecker) getHistoricalSegmentsDist(replica *meta.Replica) []*meta.Segment {
|
||||
func (c *IndexChecker) getSealedSegmentsDist(replica *meta.Replica) []*meta.Segment {
|
||||
var ret []*meta.Segment
|
||||
for _, node := range replica.GetNodes() {
|
||||
ret = append(ret, c.dist.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...)
|
||||
|
|
|
@ -111,7 +111,7 @@ func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||
}
|
||||
|
||||
// compare with targets to find the lack and redundancy of segments
|
||||
lacks, redundancies := c.getHistoricalSegmentDiff(replica.GetCollectionID(), replica.GetID())
|
||||
lacks, redundancies := c.getSealedSegmentDiff(replica.GetCollectionID(), replica.GetID())
|
||||
tasks := c.createSegmentLoadTasks(ctx, lacks, replica)
|
||||
task.SetReason("lacks of segment", tasks...)
|
||||
ret = append(ret, tasks...)
|
||||
|
@ -122,14 +122,14 @@ func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||
ret = append(ret, tasks...)
|
||||
|
||||
// compare inner dists to find repeated loaded segments
|
||||
redundancies = c.findRepeatedHistoricalSegments(replica.GetID())
|
||||
redundancies = c.findRepeatedSealedSegments(replica.GetID())
|
||||
redundancies = c.filterExistedOnLeader(replica, redundancies)
|
||||
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_Historical)
|
||||
task.SetReason("redundancies of segment", tasks...)
|
||||
ret = append(ret, tasks...)
|
||||
|
||||
// compare with target to find the lack and redundancy of segments
|
||||
_, redundancies = c.getStreamingSegmentDiff(replica.GetCollectionID(), replica.GetID())
|
||||
_, redundancies = c.getGrowingSegmentDiff(replica.GetCollectionID(), replica.GetID())
|
||||
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_Streaming)
|
||||
task.SetReason("streaming segment not exists in target", tasks...)
|
||||
ret = append(ret, tasks...)
|
||||
|
@ -137,8 +137,8 @@ func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||
return ret
|
||||
}
|
||||
|
||||
// GetStreamingSegmentDiff get streaming segment diff between leader view and target
|
||||
func (c *SegmentChecker) getStreamingSegmentDiff(collectionID int64,
|
||||
// GetGrowingSegmentDiff get streaming segment diff between leader view and target
|
||||
func (c *SegmentChecker) getGrowingSegmentDiff(collectionID int64,
|
||||
replicaID int64,
|
||||
) (toLoad []*datapb.SegmentInfo, toRelease []*meta.Segment) {
|
||||
replica := c.meta.Get(replicaID)
|
||||
|
@ -171,8 +171,8 @@ func (c *SegmentChecker) getStreamingSegmentDiff(collectionID int64,
|
|||
continue
|
||||
}
|
||||
|
||||
nextTargetSegmentIDs := c.targetMgr.GetStreamingSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetSegmentIDs := c.targetMgr.GetStreamingSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||
nextTargetSegmentIDs := c.targetMgr.GetGrowingSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetSegmentIDs := c.targetMgr.GetGrowingSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||
currentTargetChannelMap := c.targetMgr.GetDmChannelsByCollection(collectionID, meta.CurrentTarget)
|
||||
|
||||
// get segment which exist on leader view, but not on current target and next target
|
||||
|
@ -196,8 +196,8 @@ func (c *SegmentChecker) getStreamingSegmentDiff(collectionID int64,
|
|||
return
|
||||
}
|
||||
|
||||
// GetHistoricalSegmentDiff get historical segment diff between target and dist
|
||||
func (c *SegmentChecker) getHistoricalSegmentDiff(
|
||||
// GetSealedSegmentDiff get historical segment diff between target and dist
|
||||
func (c *SegmentChecker) getSealedSegmentDiff(
|
||||
collectionID int64,
|
||||
replicaID int64,
|
||||
) (toLoad []*datapb.SegmentInfo, toRelease []*meta.Segment) {
|
||||
|
@ -206,7 +206,7 @@ func (c *SegmentChecker) getHistoricalSegmentDiff(
|
|||
log.Info("replica does not exist, skip it")
|
||||
return
|
||||
}
|
||||
dist := c.getHistoricalSegmentsDist(replica)
|
||||
dist := c.getSealedSegmentsDist(replica)
|
||||
sort.Slice(dist, func(i, j int) bool {
|
||||
return dist[i].Version < dist[j].Version
|
||||
})
|
||||
|
@ -215,8 +215,8 @@ func (c *SegmentChecker) getHistoricalSegmentDiff(
|
|||
distMap[s.GetID()] = s.Node
|
||||
}
|
||||
|
||||
nextTargetMap := c.targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetMap := c.targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||
nextTargetMap := c.targetMgr.GetSealedSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
currentTargetMap := c.targetMgr.GetSealedSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||
|
||||
// Segment which exist on next target, but not on dist
|
||||
for segmentID, segment := range nextTargetMap {
|
||||
|
@ -256,7 +256,7 @@ func (c *SegmentChecker) getHistoricalSegmentDiff(
|
|||
return
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) getHistoricalSegmentsDist(replica *meta.Replica) []*meta.Segment {
|
||||
func (c *SegmentChecker) getSealedSegmentsDist(replica *meta.Replica) []*meta.Segment {
|
||||
ret := make([]*meta.Segment, 0)
|
||||
for _, node := range replica.GetNodes() {
|
||||
ret = append(ret, c.dist.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...)
|
||||
|
@ -264,14 +264,14 @@ func (c *SegmentChecker) getHistoricalSegmentsDist(replica *meta.Replica) []*met
|
|||
return ret
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) findRepeatedHistoricalSegments(replicaID int64) []*meta.Segment {
|
||||
func (c *SegmentChecker) findRepeatedSealedSegments(replicaID int64) []*meta.Segment {
|
||||
segments := make([]*meta.Segment, 0)
|
||||
replica := c.meta.Get(replicaID)
|
||||
if replica == nil {
|
||||
log.Info("replica does not exist, skip it")
|
||||
return segments
|
||||
}
|
||||
dist := c.getHistoricalSegmentsDist(replica)
|
||||
dist := c.getSealedSegmentsDist(replica)
|
||||
versions := make(map[int64]*meta.Segment)
|
||||
for _, s := range dist {
|
||||
maxVer, ok := versions[s.GetID()]
|
||||
|
|
|
@ -114,10 +114,10 @@ func (dh *distHandler) updateSegmentsDistribution(resp *querypb.GetDataDistribut
|
|||
updates := make([]*meta.Segment, 0, len(resp.GetSegments()))
|
||||
for _, s := range resp.GetSegments() {
|
||||
// for collection which is already loaded
|
||||
segmentInfo := dh.target.GetHistoricalSegment(s.GetCollection(), s.GetID(), meta.CurrentTarget)
|
||||
segmentInfo := dh.target.GetSealedSegment(s.GetCollection(), s.GetID(), meta.CurrentTarget)
|
||||
if segmentInfo == nil {
|
||||
// for collection which is loading
|
||||
segmentInfo = dh.target.GetHistoricalSegment(s.GetCollection(), s.GetID(), meta.NextTarget)
|
||||
segmentInfo = dh.target.GetSealedSegment(s.GetCollection(), s.GetID(), meta.NextTarget)
|
||||
}
|
||||
var segment *meta.Segment
|
||||
if segmentInfo == nil {
|
||||
|
|
|
@ -62,7 +62,7 @@ func (s *Server) checkAnyReplicaAvailable(collectionID int64) bool {
|
|||
|
||||
func (s *Server) getCollectionSegmentInfo(collection int64) []*querypb.SegmentInfo {
|
||||
segments := s.dist.SegmentDistManager.GetByCollection(collection)
|
||||
currentTargetSegmentsMap := s.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
currentTargetSegmentsMap := s.targetMgr.GetSealedSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
infos := make(map[int64]*querypb.SegmentInfo)
|
||||
for _, segment := range segments {
|
||||
if _, existCurrentTarget := currentTargetSegmentsMap[segment.GetID()]; !existCurrentTarget {
|
||||
|
@ -109,7 +109,7 @@ func (s *Server) balanceSegments(ctx context.Context, req *querypb.LoadBalanceRe
|
|||
// Only balance segments in targets
|
||||
segments := s.dist.SegmentDistManager.GetByCollectionAndNode(req.GetCollectionID(), srcNode)
|
||||
segments = lo.Filter(segments, func(segment *meta.Segment, _ int) bool {
|
||||
return s.targetMgr.GetHistoricalSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil
|
||||
return s.targetMgr.GetSealedSegment(segment.GetCollectionID(), segment.GetID(), meta.CurrentTarget) != nil
|
||||
})
|
||||
allSegments := make(map[int64]*meta.Segment)
|
||||
for _, segment := range segments {
|
||||
|
|
|
@ -1484,7 +1484,7 @@ func (suite *JobSuite) assertCollectionLoaded(collection int64) {
|
|||
}
|
||||
for _, segments := range suite.segments[collection] {
|
||||
for _, segment := range segments {
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.NotNil(suite.targetMgr.GetSealedSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1501,7 +1501,7 @@ func (suite *JobSuite) assertPartitionLoaded(collection int64, partitionIDs ...i
|
|||
}
|
||||
suite.NotNil(suite.meta.GetPartition(partitionID))
|
||||
for _, segment := range segments {
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.NotNil(suite.targetMgr.GetSealedSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1514,7 +1514,7 @@ func (suite *JobSuite) assertCollectionReleased(collection int64) {
|
|||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.Nil(suite.targetMgr.GetSealedSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1524,7 +1524,7 @@ func (suite *JobSuite) assertPartitionReleased(collection int64, partitionIDs ..
|
|||
suite.Nil(suite.meta.GetPartition(partition))
|
||||
segments := suite.segments[collection][partition]
|
||||
for _, segment := range segments {
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.Nil(suite.targetMgr.GetSealedSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -324,7 +324,7 @@ func (mgr *TargetManager) getTarget(scope TargetScope) *target {
|
|||
return mgr.next
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetStreamingSegmentsByCollection(collectionID int64,
|
||||
func (mgr *TargetManager) GetGrowingSegmentsByCollection(collectionID int64,
|
||||
scope TargetScope,
|
||||
) typeutil.UniqueSet {
|
||||
mgr.rwMutex.RLock()
|
||||
|
@ -345,7 +345,7 @@ func (mgr *TargetManager) GetStreamingSegmentsByCollection(collectionID int64,
|
|||
return segments
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetStreamingSegmentsByChannel(collectionID int64,
|
||||
func (mgr *TargetManager) GetGrowingSegmentsByChannel(collectionID int64,
|
||||
channelName string,
|
||||
scope TargetScope,
|
||||
) typeutil.UniqueSet {
|
||||
|
@ -369,7 +369,7 @@ func (mgr *TargetManager) GetStreamingSegmentsByChannel(collectionID int64,
|
|||
return segments
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetHistoricalSegmentsByCollection(collectionID int64,
|
||||
func (mgr *TargetManager) GetSealedSegmentsByCollection(collectionID int64,
|
||||
scope TargetScope,
|
||||
) map[int64]*datapb.SegmentInfo {
|
||||
mgr.rwMutex.RLock()
|
||||
|
@ -384,7 +384,7 @@ func (mgr *TargetManager) GetHistoricalSegmentsByCollection(collectionID int64,
|
|||
return collectionTarget.GetAllSegments()
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetHistoricalSegmentsByChannel(collectionID int64,
|
||||
func (mgr *TargetManager) GetSealedSegmentsByChannel(collectionID int64,
|
||||
channelName string,
|
||||
scope TargetScope,
|
||||
) map[int64]*datapb.SegmentInfo {
|
||||
|
@ -430,7 +430,7 @@ func (mgr *TargetManager) GetDroppedSegmentsByChannel(collectionID int64,
|
|||
return channel.GetDroppedSegmentIds()
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetHistoricalSegmentsByPartition(collectionID int64,
|
||||
func (mgr *TargetManager) GetSealedSegmentsByPartition(collectionID int64,
|
||||
partitionID int64, scope TargetScope,
|
||||
) map[int64]*datapb.SegmentInfo {
|
||||
mgr.rwMutex.RLock()
|
||||
|
@ -479,7 +479,7 @@ func (mgr *TargetManager) GetDmChannel(collectionID int64, channel string, scope
|
|||
return collectionTarget.GetAllDmChannels()[channel]
|
||||
}
|
||||
|
||||
func (mgr *TargetManager) GetHistoricalSegment(collectionID int64, id int64, scope TargetScope) *datapb.SegmentInfo {
|
||||
func (mgr *TargetManager) GetSealedSegment(collectionID int64, id int64, scope TargetScope) *datapb.SegmentInfo {
|
||||
mgr.rwMutex.RLock()
|
||||
defer mgr.rwMutex.RUnlock()
|
||||
targetMap := mgr.getTarget(scope)
|
||||
|
|
|
@ -162,24 +162,24 @@ func (suite *TargetManagerSuite) TearDownSuite() {
|
|||
func (suite *TargetManagerSuite) TestUpdateCurrentTarget() {
|
||||
collectionID := int64(1000)
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]),
|
||||
suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.UpdateCollectionCurrentTarget(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]),
|
||||
suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
||||
collectionID := int64(1003)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.meta.PutCollection(&Collection{
|
||||
|
@ -232,9 +232,9 @@ func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
|||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.assertSegments([]int64{11, 12}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{11, 12}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{"channel-1", "channel-2"}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.broker.ExpectedCalls = nil
|
||||
|
@ -259,42 +259,42 @@ func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
|||
|
||||
func (suite *TargetManagerSuite) TestRemovePartition() {
|
||||
collectionID := int64(1000)
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.RemovePartition(collectionID, 100)
|
||||
suite.assertSegments([]int64{3, 4}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{3, 4}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
func (suite *TargetManagerSuite) TestRemoveCollection() {
|
||||
collectionID := int64(1000)
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.RemoveCollection(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
collectionID = int64(1001)
|
||||
suite.mgr.UpdateCollectionCurrentTarget(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.mgr.RemoveCollection(collectionID)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
}
|
||||
|
||||
|
@ -360,9 +360,9 @@ func (suite *TargetManagerSuite) TestGetCollectionTargetVersion() {
|
|||
|
||||
func (suite *TargetManagerSuite) TestGetSegmentByChannel() {
|
||||
collectionID := int64(1003)
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertSegments([]int64{}, suite.mgr.GetSealedSegmentsByCollection(collectionID, CurrentTarget))
|
||||
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||
|
||||
suite.meta.PutCollection(&Collection{
|
||||
|
@ -407,11 +407,11 @@ func (suite *TargetManagerSuite) TestGetSegmentByChannel() {
|
|||
|
||||
suite.broker.EXPECT().GetRecoveryInfoV2(mock.Anything, collectionID).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||
suite.mgr.UpdateCollectionNextTarget(collectionID)
|
||||
suite.Len(suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget), 2)
|
||||
suite.Len(suite.mgr.GetHistoricalSegmentsByChannel(collectionID, "channel-1", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetHistoricalSegmentsByChannel(collectionID, "channel-2", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetStreamingSegmentsByChannel(collectionID, "channel-1", NextTarget), 4)
|
||||
suite.Len(suite.mgr.GetStreamingSegmentsByChannel(collectionID, "channel-2", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetSealedSegmentsByCollection(collectionID, NextTarget), 2)
|
||||
suite.Len(suite.mgr.GetSealedSegmentsByChannel(collectionID, "channel-1", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetSealedSegmentsByChannel(collectionID, "channel-2", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetGrowingSegmentsByChannel(collectionID, "channel-1", NextTarget), 4)
|
||||
suite.Len(suite.mgr.GetGrowingSegmentsByChannel(collectionID, "channel-2", NextTarget), 1)
|
||||
suite.Len(suite.mgr.GetDroppedSegmentsByChannel(collectionID, "channel-1", NextTarget), 3)
|
||||
}
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ func (ob *CollectionObserver) observePartitionLoadStatus(ctx context.Context, pa
|
|||
zap.Int64("partitionID", partition.GetPartitionID()),
|
||||
)
|
||||
|
||||
segmentTargets := ob.targetMgr.GetHistoricalSegmentsByPartition(partition.GetCollectionID(), partition.GetPartitionID(), meta.NextTarget)
|
||||
segmentTargets := ob.targetMgr.GetSealedSegmentsByPartition(partition.GetCollectionID(), partition.GetPartitionID(), meta.NextTarget)
|
||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(partition.GetCollectionID(), meta.NextTarget)
|
||||
|
||||
targetNum := len(segmentTargets) + len(channelTargets)
|
||||
|
|
|
@ -328,7 +328,7 @@ func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool
|
|||
status := suite.meta.CalculateLoadStatus(collection)
|
||||
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetSealedSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
|
||||
return exist &&
|
||||
percentage == 100 &&
|
||||
|
@ -347,7 +347,7 @@ func (suite *CollectionObserverSuite) isPartitionLoaded(partitionID int64) bool
|
|||
percentage := suite.meta.GetPartitionLoadPercentage(partitionID)
|
||||
status := partition.GetStatus()
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetHistoricalSegmentsByPartition(collection, partitionID, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetSealedSegmentsByPartition(collection, partitionID, meta.CurrentTarget)
|
||||
expectedSegments := lo.Filter(suite.segments[collection], func(seg *datapb.SegmentInfo, _ int) bool {
|
||||
return seg.PartitionID == partitionID
|
||||
})
|
||||
|
@ -361,7 +361,7 @@ func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool
|
|||
exist := suite.meta.Exist(collection)
|
||||
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetSealedSegmentsByCollection(collection, meta.CurrentTarget)
|
||||
return !(exist ||
|
||||
len(replicas) > 0 ||
|
||||
len(channels) > 0 ||
|
||||
|
@ -370,7 +370,7 @@ func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool
|
|||
|
||||
func (suite *CollectionObserverSuite) isPartitionTimeout(collection int64, partitionID int64) bool {
|
||||
partition := suite.meta.GetPartition(partitionID)
|
||||
segments := suite.targetMgr.GetHistoricalSegmentsByPartition(collection, partitionID, meta.CurrentTarget)
|
||||
segments := suite.targetMgr.GetSealedSegmentsByPartition(collection, partitionID, meta.CurrentTarget)
|
||||
return partition == nil && len(segments) == 0
|
||||
}
|
||||
|
||||
|
|
|
@ -184,8 +184,8 @@ func (o *LeaderObserver) checkNeedUpdateTargetVersion(ctx context.Context, leade
|
|||
zap.Int64("newVersion", targetVersion),
|
||||
)
|
||||
|
||||
sealedSegments := o.target.GetHistoricalSegmentsByChannel(leaderView.CollectionID, leaderView.Channel, meta.CurrentTarget)
|
||||
growingSegments := o.target.GetStreamingSegmentsByChannel(leaderView.CollectionID, leaderView.Channel, meta.CurrentTarget)
|
||||
sealedSegments := o.target.GetSealedSegmentsByChannel(leaderView.CollectionID, leaderView.Channel, meta.CurrentTarget)
|
||||
growingSegments := o.target.GetGrowingSegmentsByChannel(leaderView.CollectionID, leaderView.Channel, meta.CurrentTarget)
|
||||
droppedSegments := o.target.GetDroppedSegmentsByChannel(leaderView.CollectionID, leaderView.Channel, meta.CurrentTarget)
|
||||
|
||||
return &querypb.SyncAction{
|
||||
|
@ -202,9 +202,9 @@ func (o *LeaderObserver) findNeedLoadedSegments(leaderView *meta.LeaderView, dis
|
|||
dists = utils.FindMaxVersionSegments(dists)
|
||||
for _, s := range dists {
|
||||
version, ok := leaderView.Segments[s.GetID()]
|
||||
currentTarget := o.target.GetHistoricalSegment(s.CollectionID, s.GetID(), meta.CurrentTarget)
|
||||
currentTarget := o.target.GetSealedSegment(s.CollectionID, s.GetID(), meta.CurrentTarget)
|
||||
existInCurrentTarget := currentTarget != nil
|
||||
existInNextTarget := o.target.GetHistoricalSegment(s.CollectionID, s.GetID(), meta.NextTarget) != nil
|
||||
existInNextTarget := o.target.GetSealedSegment(s.CollectionID, s.GetID(), meta.NextTarget) != nil
|
||||
|
||||
if !existInCurrentTarget && !existInNextTarget {
|
||||
continue
|
||||
|
@ -246,8 +246,8 @@ func (o *LeaderObserver) findNeedRemovedSegments(leaderView *meta.LeaderView, di
|
|||
}
|
||||
for sid, s := range leaderView.Segments {
|
||||
_, ok := distMap[sid]
|
||||
existInCurrentTarget := o.target.GetHistoricalSegment(leaderView.CollectionID, sid, meta.CurrentTarget) != nil
|
||||
existInNextTarget := o.target.GetHistoricalSegment(leaderView.CollectionID, sid, meta.NextTarget) != nil
|
||||
existInCurrentTarget := o.target.GetSealedSegment(leaderView.CollectionID, sid, meta.CurrentTarget) != nil
|
||||
existInNextTarget := o.target.GetSealedSegment(leaderView.CollectionID, sid, meta.NextTarget) != nil
|
||||
if ok || existInCurrentTarget || existInNextTarget {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -283,8 +283,8 @@ func (ob *TargetObserver) shouldUpdateCurrentTarget(collectionID int64) bool {
|
|||
}
|
||||
|
||||
// and last check historical segment
|
||||
historicalSegments := ob.targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
for _, segment := range historicalSegments {
|
||||
SealedSegments := ob.targetMgr.GetSealedSegmentsByCollection(collectionID, meta.NextTarget)
|
||||
for _, segment := range SealedSegments {
|
||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||
collectionID,
|
||||
ob.distMgr.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||
|
|
|
@ -126,7 +126,7 @@ func (suite *TargetObserverSuite) SetupTest() {
|
|||
|
||||
func (suite *TargetObserverSuite) TestTriggerUpdateTarget() {
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 2 &&
|
||||
return len(suite.targetMgr.GetSealedSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 2 &&
|
||||
len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.NextTarget)) == 2
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
|
@ -168,7 +168,7 @@ func (suite *TargetObserverSuite) TestTriggerUpdateTarget() {
|
|||
GetRecoveryInfoV2(mock.Anything, mock.Anything).
|
||||
Return(suite.nextTargetChannels, suite.nextTargetSegments, nil)
|
||||
suite.Eventually(func() bool {
|
||||
return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 3 &&
|
||||
return len(suite.targetMgr.GetSealedSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 3 &&
|
||||
len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.NextTarget)) == 2
|
||||
}, 7*time.Second, 1*time.Second)
|
||||
suite.broker.AssertExpectations(suite.T())
|
||||
|
@ -206,7 +206,7 @@ func (suite *TargetObserverSuite) TestTriggerUpdateTarget() {
|
|||
default:
|
||||
}
|
||||
return isReady &&
|
||||
len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.CurrentTarget)) == 3 &&
|
||||
len(suite.targetMgr.GetSealedSegmentsByCollection(suite.collectionID, meta.CurrentTarget)) == 3 &&
|
||||
len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.CurrentTarget)) == 2
|
||||
}, 7*time.Second, 1*time.Second)
|
||||
}
|
||||
|
|
|
@ -876,7 +876,7 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
currentTargets := s.targetMgr.GetHistoricalSegmentsByCollection(req.GetCollectionID(), meta.CurrentTarget)
|
||||
currentTargets := s.targetMgr.GetSealedSegmentsByCollection(req.GetCollectionID(), meta.CurrentTarget)
|
||||
for _, channel := range channels {
|
||||
log := log.With(zap.String("channel", channel.GetChannelName()))
|
||||
|
||||
|
|
|
@ -1659,7 +1659,7 @@ func (suite *ServiceSuite) assertLoaded(collection int64) {
|
|||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||
suite.NotNil(suite.targetMgr.GetSealedSegment(collection, segment, meta.NextTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1675,7 +1675,7 @@ func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ..
|
|||
continue
|
||||
}
|
||||
for _, segment := range segments {
|
||||
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.NotNil(suite.targetMgr.GetSealedSegment(collection, segment, meta.CurrentTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1687,8 +1687,8 @@ func (suite *ServiceSuite) assertReleased(collection int64) {
|
|||
}
|
||||
for _, partitions := range suite.segments[collection] {
|
||||
for _, segment := range partitions {
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||
suite.Nil(suite.targetMgr.GetSealedSegment(collection, segment, meta.CurrentTarget))
|
||||
suite.Nil(suite.targetMgr.GetSealedSegment(collection, segment, meta.NextTarget))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -502,7 +502,7 @@ func (scheduler *taskScheduler) GetNodeSegmentCntDelta(nodeID int64) int {
|
|||
continue
|
||||
}
|
||||
segmentAction := action.(*SegmentAction)
|
||||
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), segmentAction.SegmentID(), meta.NextTarget)
|
||||
segment := scheduler.targetMgr.GetSealedSegment(task.CollectionID(), segmentAction.SegmentID(), meta.NextTarget)
|
||||
if action.Type() == ActionTypeGrow {
|
||||
delta += int(segment.GetNumOfRows())
|
||||
} else {
|
||||
|
@ -586,9 +586,9 @@ func (scheduler *taskScheduler) isRelated(task Task, node int64) bool {
|
|||
taskType := GetTaskType(task)
|
||||
var segment *datapb.SegmentInfo
|
||||
if taskType == TaskTypeMove || taskType == TaskTypeUpdate {
|
||||
segment = scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.CurrentTarget)
|
||||
segment = scheduler.targetMgr.GetSealedSegment(task.CollectionID(), task.SegmentID(), meta.CurrentTarget)
|
||||
} else {
|
||||
segment = scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||
segment = scheduler.targetMgr.GetSealedSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||
}
|
||||
if segment == nil {
|
||||
continue
|
||||
|
@ -779,9 +779,9 @@ func (scheduler *taskScheduler) checkSegmentTaskStale(task *SegmentTask) error {
|
|||
taskType := GetTaskType(task)
|
||||
var segment *datapb.SegmentInfo
|
||||
if taskType == TaskTypeMove || taskType == TaskTypeUpdate {
|
||||
segment = scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.CurrentTarget)
|
||||
segment = scheduler.targetMgr.GetSealedSegment(task.CollectionID(), task.SegmentID(), meta.CurrentTarget)
|
||||
} else {
|
||||
segment = scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||
segment = scheduler.targetMgr.GetSealedSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||
}
|
||||
if segment == nil {
|
||||
log.Warn("task stale due to the segment to load not exists in targets",
|
||||
|
|
Loading…
Reference in New Issue