mirror of https://github.com/milvus-io/milvus.git
Skip canceled tasks (#19513)
Signed-off-by: yah01 <yang.cen@zilliz.com> Signed-off-by: yah01 <yang.cen@zilliz.com>pull/19166/head
parent
7ca844ab99
commit
9dcee37e1c
|
@ -169,7 +169,7 @@ queryCoord:
|
|||
distPullInterval: 500
|
||||
loadTimeoutSeconds: 600
|
||||
checkHandoffInterval: 5000
|
||||
taskMergeCap: 2
|
||||
taskMergeCap: 8
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
checkRoundTaskNumLimit = 128
|
||||
checkRoundTaskNumLimit = 256
|
||||
)
|
||||
|
||||
type CheckerController struct {
|
||||
|
@ -86,20 +86,21 @@ func (controller *CheckerController) Stop() {
|
|||
// check is the real implementation of Check
|
||||
func (controller *CheckerController) check(ctx context.Context) {
|
||||
tasks := make([]task.Task, 0)
|
||||
for id, checker := range controller.checkers {
|
||||
log := log.With(zap.Int("checkerID", id))
|
||||
|
||||
for _, checker := range controller.checkers {
|
||||
tasks = append(tasks, checker.Check(ctx)...)
|
||||
if len(tasks) >= checkRoundTaskNumLimit {
|
||||
log.Info("checkers have spawn too many tasks, won't run subsequent checkers, and truncate the spawned tasks",
|
||||
}
|
||||
|
||||
added := 0
|
||||
for _, task := range tasks {
|
||||
err := controller.scheduler.Add(task)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
added++
|
||||
if added >= checkRoundTaskNumLimit {
|
||||
log.Info("checkers have added too many tasks, truncate the subsequent tasks",
|
||||
zap.Int("taskNum", len(tasks)),
|
||||
zap.Int("taskNumLimit", checkRoundTaskNumLimit))
|
||||
tasks = tasks[:checkRoundTaskNumLimit]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
controller.scheduler.Add(task)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ const (
|
|||
TaskTypeReduce
|
||||
TaskTypeMove
|
||||
|
||||
taskPoolSize = 128
|
||||
taskPoolSize = 256
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
|
||||
type task interface {
|
||||
ID() UniqueID // return ReqID
|
||||
Ctx() context.Context
|
||||
Timestamp() Timestamp
|
||||
PreExecute(ctx context.Context) error
|
||||
Execute(ctx context.Context) error
|
||||
|
@ -539,6 +540,12 @@ func (l *loadSegmentsTask) PreExecute(ctx context.Context) error {
|
|||
func (l *loadSegmentsTask) Execute(ctx context.Context) error {
|
||||
log.Info("LoadSegmentTask Execute start", zap.Int64("msgID", l.req.Base.MsgID))
|
||||
|
||||
if len(l.req.Infos) == 0 {
|
||||
log.Info("all segments loaded",
|
||||
zap.Int64("msgID", l.req.GetBase().GetMsgID()))
|
||||
return nil
|
||||
}
|
||||
|
||||
segmentIDs := lo.Map(l.req.Infos, func(info *queryPb.SegmentLoadInfo, idx int) UniqueID { return info.SegmentID })
|
||||
l.node.metaReplica.addSegmentsLoadingList(segmentIDs)
|
||||
defer l.node.metaReplica.removeSegmentsLoadingList(segmentIDs)
|
||||
|
|
|
@ -123,7 +123,13 @@ func (s *taskScheduler) taskLoop() {
|
|||
case <-s.queue.utChan():
|
||||
if !s.queue.utEmpty() {
|
||||
t := s.queue.PopUnissuedTask()
|
||||
s.processTask(t, s.queue)
|
||||
select {
|
||||
case <-t.Ctx().Done():
|
||||
t.Notify(context.Canceled)
|
||||
continue
|
||||
default:
|
||||
s.processTask(t, s.queue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -661,7 +661,7 @@ func (p *queryCoordConfig) initTaskRetryInterval() {
|
|||
}
|
||||
|
||||
func (p *queryCoordConfig) initTaskMergeCap() {
|
||||
p.TaskMergeCap = p.Base.ParseInt32WithDefault("queryCoord.taskMergeCap", 2)
|
||||
p.TaskMergeCap = p.Base.ParseInt32WithDefault("queryCoord.taskMergeCap", 8)
|
||||
}
|
||||
|
||||
func (p *queryCoordConfig) initAutoHandoff() {
|
||||
|
|
Loading…
Reference in New Issue