fix: Avoid datarace in clustering compaction (#34288)

#34289

Signed-off-by: wayblink <anyang.wang@zilliz.com>
pull/34339/head
wayblink 2024-07-03 19:08:09 +08:00 committed by GitHub
parent 8c42f1341d
commit da56880d0f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 20 additions and 10 deletions

View File

@ -100,3 +100,15 @@ func setStartTime(startTime int64) compactionTaskOpt {
task.StartTime = startTime task.StartTime = startTime
} }
} }
func setRetryTimes(retryTimes int32) compactionTaskOpt {
return func(task *datapb.CompactionTask) {
task.RetryTimes = retryTimes
}
}
func setLastStateStartTime(lastStateStartTime int64) compactionTaskOpt {
return func(task *datapb.CompactionTask) {
task.LastStateStartTime = lastStateStartTime
}
}

View File

@ -47,10 +47,9 @@ const (
type clusteringCompactionTask struct { type clusteringCompactionTask struct {
*datapb.CompactionTask *datapb.CompactionTask
plan *datapb.CompactionPlan plan *datapb.CompactionPlan
result *datapb.CompactionPlanResult result *datapb.CompactionPlanResult
span trace.Span span trace.Span
lastUpdateStateTime int64
meta CompactionMeta meta CompactionMeta
sessions SessionManager sessions SessionManager
@ -66,24 +65,22 @@ func (t *clusteringCompactionTask) Process() bool {
log.Warn("fail in process task", zap.Error(err)) log.Warn("fail in process task", zap.Error(err))
if merr.IsRetryableErr(err) && t.RetryTimes < taskMaxRetryTimes { if merr.IsRetryableErr(err) && t.RetryTimes < taskMaxRetryTimes {
// retry in next Process // retry in next Process
t.RetryTimes = t.RetryTimes + 1 t.updateAndSaveTaskMeta(setRetryTimes(t.RetryTimes + 1))
} else { } else {
log.Error("task fail with unretryable reason or meet max retry times", zap.Error(err)) log.Error("task fail with unretryable reason or meet max retry times", zap.Error(err))
t.State = datapb.CompactionTaskState_failed t.updateAndSaveTaskMeta(setState(datapb.CompactionTaskState_failed), setFailReason(err.Error()))
t.FailReason = err.Error()
} }
} }
// task state update, refresh retry times count // task state update, refresh retry times count
currentState := t.State.String() currentState := t.State.String()
if currentState != lastState { if currentState != lastState {
t.RetryTimes = 0
ts := time.Now().UnixMilli() ts := time.Now().UnixMilli()
lastStateDuration := ts - t.lastUpdateStateTime t.updateAndSaveTaskMeta(setRetryTimes(0), setLastStateStartTime(ts))
lastStateDuration := ts - t.GetLastStateStartTime()
log.Info("clustering compaction task state changed", zap.String("lastState", lastState), zap.String("currentState", currentState), zap.Int64("elapse", lastStateDuration)) log.Info("clustering compaction task state changed", zap.String("lastState", lastState), zap.String("currentState", currentState), zap.Int64("elapse", lastStateDuration))
metrics.DataCoordCompactionLatency. metrics.DataCoordCompactionLatency.
WithLabelValues(fmt.Sprint(typeutil.IsVectorType(t.GetClusteringKeyField().DataType)), datapb.CompactionType_ClusteringCompaction.String(), lastState). WithLabelValues(fmt.Sprint(typeutil.IsVectorType(t.GetClusteringKeyField().DataType)), datapb.CompactionType_ClusteringCompaction.String(), lastState).
Observe(float64(lastStateDuration)) Observe(float64(lastStateDuration))
t.lastUpdateStateTime = ts
if t.State == datapb.CompactionTaskState_completed { if t.State == datapb.CompactionTaskState_completed {
t.updateAndSaveTaskMeta(setEndTime(ts)) t.updateAndSaveTaskMeta(setEndTime(ts))

View File

@ -913,6 +913,7 @@ message CompactionTask{
int64 prefer_segment_rows = 22; int64 prefer_segment_rows = 22;
int64 analyzeTaskID = 23; int64 analyzeTaskID = 23;
int64 analyzeVersion = 24; int64 analyzeVersion = 24;
int64 lastStateStartTime = 25;
} }
message PartitionStatsInfo { message PartitionStatsInfo {