Increase bulk insert file size limit (#21526)

/kind improvement

Signed-off-by: Yuchen Gao <yuchen.gao@zilliz.com>
pull/21564/head
Ten Thousand Leaves 2023-01-06 13:09:36 +08:00 committed by GitHub
parent a7384af59f
commit b3da554705
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 5 additions and 3 deletions

View File

@ -47,7 +47,8 @@ const (
// this limitation is to avoid this OOM risk:
// for column-based file, we read all its data into memory, if user input a large file, the read() method may
// cost extra memory and lear to OOM.
MaxFileSize = 1 * 1024 * 1024 * 1024 // 1GB
// TODO: make it configurable.
MaxFileSize = 3 * 1024 * 1024 * 1024 // 3GB
// this limitation is to avoid this OOM risk:
// simetimes system segment max size is a large number, a single segment fields data might cause OOM.
@ -57,7 +58,8 @@ const (
// this limitation is to avoid this OOM risk:
// if the shard number is a large number, although single segment size is small, but there are lot of in-memory segments,
// the total memory size might cause OOM.
MaxTotalSizeInMemory = 2 * 1024 * 1024 * 1024 // 2GB
// TODO: make it configurable.
MaxTotalSizeInMemory = 6 * 1024 * 1024 * 1024 // 6GB
// keywords of import task informations
FailedReason = "failed_reason"

View File

@ -72,7 +72,7 @@ func Test_AdjustBufSize(t *testing.T) {
assert.NotNil(t, parser)
sizePerRecord, _ = typeutil.EstimateSizePerRecord(schema)
assert.Equal(t, 7, MaxFileSize/(sizePerRecord*int(parser.bufSize)))
assert.Equal(t, 16, MaxFileSize/(sizePerRecord*int(parser.bufSize)))
// no change
schema = &schemapb.CollectionSchema{