Return error when bulkload pending list hit limit (#17570)

Signed-off-by: groot <yihua.mo@zilliz.com>
pull/17584/head
groot 2022-06-16 13:02:10 +08:00 committed by GitHub
parent eb5b0b7fc8
commit b5e62023ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 11 additions and 6 deletions

View File

@ -282,13 +282,11 @@ func (m *importManager) importJob(ctx context.Context, req *milvuspb.ImportReque
taskCount = len(req.Files)
}
// task queue size has a limit, return error if import request contains too many data files
// task queue size has a limit, return error if import request contains too many data files, and skip entire job
if capacity-length < taskCount {
resp.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_IllegalArgument,
Reason: "Import task queue max size is " + strconv.Itoa(capacity) + ", currently there are " + strconv.Itoa(length) + " tasks is pending. Not able to execute this request with " + strconv.Itoa(taskCount) + " tasks.",
}
return
err = fmt.Errorf("import task queue max size is %v, currently there are %v tasks is pending. Not able to execute this request with %v tasks", capacity, length, taskCount)
log.Error(err.Error())
return err
}
bucket := ""

View File

@ -19,6 +19,7 @@ package rootcoord
import (
"context"
"errors"
"strconv"
"sync"
"testing"
"time"
@ -260,6 +261,12 @@ func TestImportManager_ImportJob(t *testing.T) {
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.Equal(t, len(rowReq.Files)-2, len(mgr.pendingTasks))
assert.Equal(t, 2, len(mgr.workingTasks))
for i := 0; i <= 32; i++ {
rowReq.Files = append(rowReq.Files, strconv.Itoa(i))
}
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
}
func TestImportManager_AllDataNodesBusy(t *testing.T) {