mirror of https://github.com/milvus-io/milvus.git
enhance: update milvus.yaml (#31832)
See #32168 --------- Signed-off-by: Ted Xu <ted.xu@zilliz.com>pull/32313/head
parent
83da08c388
commit
78d32bd8b2
|
@ -12,8 +12,10 @@ import (
|
|||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var inputFile = "configs/milvus.yaml"
|
||||
var outputPath = os.Getenv("PWD")
|
||||
var (
|
||||
inputFile = "configs/milvus.yaml"
|
||||
outputPath = os.Getenv("PWD")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&inputFile, "i", inputFile, "input file")
|
||||
|
@ -168,13 +170,13 @@ For the convenience of maintenance, Milvus classifies its configurations into %s
|
|||
|
||||
`
|
||||
const fileName = "system_configuration.md"
|
||||
var fileContent = head
|
||||
fileContent := head
|
||||
for _, sec := range secs {
|
||||
fileContent += sec.systemConfiguratinContent()
|
||||
sectionFileContent := sec.sectionPageContent()
|
||||
os.WriteFile(filepath.Join(outputPath, sec.fileName()), []byte(sectionFileContent), 0644)
|
||||
os.WriteFile(filepath.Join(outputPath, sec.fileName()), []byte(sectionFileContent), 0o644)
|
||||
}
|
||||
err := os.WriteFile(filepath.Join(outputPath, fileName), []byte(fileContent), 0644)
|
||||
err := os.WriteFile(filepath.Join(outputPath, fileName), []byte(fileContent), 0o644)
|
||||
return errors.Wrapf(err, "writefile %s", fileName)
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,9 @@ package main
|
|||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/samber/lo"
|
||||
|
@ -47,6 +48,13 @@ func collect() []DocContent {
|
|||
return result
|
||||
}
|
||||
|
||||
func quoteIfNeeded(s string) string {
|
||||
if strings.ContainsAny(s, "[],{}") {
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func collectRecursive(params *paramtable.ComponentParam, data *[]DocContent, val *reflect.Value) {
|
||||
if val.Kind() != reflect.Struct {
|
||||
return
|
||||
|
@ -63,6 +71,7 @@ func collectRecursive(params *paramtable.ComponentParam, data *[]DocContent, val
|
|||
log.Debug("got key", zap.String("key", item.Key), zap.Any("value", defaultValue), zap.String("variable", val.Type().Field(j).Name))
|
||||
*data = append(*data, DocContent{item.Key, defaultValue, item.Version, refreshable, item.Export, item.Doc})
|
||||
for _, fk := range item.FallbackKeys {
|
||||
defaultValue = params.GetWithDefault(fk, item.DefaultValue)
|
||||
log.Debug("got fallback key", zap.String("key", fk), zap.Any("value", defaultValue), zap.String("variable", val.Type().Field(j).Name))
|
||||
*data = append(*data, DocContent{fk, defaultValue, item.Version, refreshable, item.Export, item.Doc})
|
||||
}
|
||||
|
@ -70,20 +79,26 @@ func collectRecursive(params *paramtable.ComponentParam, data *[]DocContent, val
|
|||
item := subVal.Interface().(paramtable.ParamGroup)
|
||||
log.Debug("got key", zap.String("key", item.KeyPrefix), zap.String("variable", val.Type().Field(j).Name))
|
||||
refreshable := tag.Get("refreshable")
|
||||
*data = append(*data, DocContent{item.KeyPrefix, "", item.Version, refreshable, item.Export, item.Doc})
|
||||
|
||||
// Sort group items to stablize the output order
|
||||
m := item.GetValue()
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
value := m[key]
|
||||
log.Debug("got group entry", zap.String("key", key), zap.String("value", value))
|
||||
*data = append(*data, DocContent{fmt.Sprintf("%s%s", item.KeyPrefix, key), quoteIfNeeded(value), item.Version, refreshable, item.Export, ""})
|
||||
}
|
||||
} else {
|
||||
collectRecursive(params, data, &subVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WriteCsv() {
|
||||
f, err := os.Create("configs.csv")
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
log.Error("create file failed", zap.Error(err))
|
||||
os.Exit(-2)
|
||||
}
|
||||
func WriteCsv(f io.Writer) {
|
||||
w := csv.NewWriter(f)
|
||||
w.Write([]string{"key", "defaultValue", "sinceVersion", "refreshable", "exportToUser", "comment"})
|
||||
|
||||
|
@ -101,7 +116,7 @@ type YamlGroup struct {
|
|||
}
|
||||
|
||||
type YamlMarshaller struct {
|
||||
writer *os.File
|
||||
writer io.Writer
|
||||
groups []YamlGroup
|
||||
data []DocContent
|
||||
}
|
||||
|
@ -142,19 +157,19 @@ func (m *YamlMarshaller) writeYamlRecursive(data []DocContent, level int) {
|
|||
isDisabled := slices.Contains(disabledGroups, strings.Split(content.key, ".")[0])
|
||||
if strings.Count(content.key, ".") == level {
|
||||
if isDisabled {
|
||||
m.writer.WriteString("# ")
|
||||
io.WriteString(m.writer, "# ")
|
||||
}
|
||||
m.writeContent(key, content.defaultValue, content.comment, level)
|
||||
continue
|
||||
}
|
||||
extra, ok := extraHeaders[key]
|
||||
if ok {
|
||||
m.writer.WriteString(extra + "\n")
|
||||
io.WriteString(m.writer, extra+"\n")
|
||||
}
|
||||
if isDisabled {
|
||||
m.writer.WriteString("# ")
|
||||
io.WriteString(m.writer, "# ")
|
||||
}
|
||||
m.writer.WriteString(fmt.Sprintf("%s%s:\n", strings.Repeat(" ", level*2), key))
|
||||
io.WriteString(m.writer, fmt.Sprintf("%s%s:\n", strings.Repeat(" ", level*2), key))
|
||||
m.writeYamlRecursive(contents, level+1)
|
||||
}
|
||||
}
|
||||
|
@ -163,27 +178,20 @@ func (m *YamlMarshaller) writeContent(key, value, comment string, level int) {
|
|||
if strings.Contains(comment, "\n") {
|
||||
multilines := strings.Split(comment, "\n")
|
||||
for _, line := range multilines {
|
||||
m.writer.WriteString(fmt.Sprintf("%s# %s\n", strings.Repeat(" ", level*2), line))
|
||||
io.WriteString(m.writer, fmt.Sprintf("%s# %s\n", strings.Repeat(" ", level*2), line))
|
||||
}
|
||||
m.writer.WriteString(fmt.Sprintf("%s%s: %s\n", strings.Repeat(" ", level*2), key, value))
|
||||
io.WriteString(m.writer, fmt.Sprintf("%s%s: %s\n", strings.Repeat(" ", level*2), key, value))
|
||||
} else if comment != "" {
|
||||
m.writer.WriteString(fmt.Sprintf("%s%s: %s # %s\n", strings.Repeat(" ", level*2), key, value, comment))
|
||||
io.WriteString(m.writer, fmt.Sprintf("%s%s: %s # %s\n", strings.Repeat(" ", level*2), key, value, comment))
|
||||
} else {
|
||||
m.writer.WriteString(fmt.Sprintf("%s%s: %s\n", strings.Repeat(" ", level*2), key, value))
|
||||
io.WriteString(m.writer, fmt.Sprintf("%s%s: %s\n", strings.Repeat(" ", level*2), key, value))
|
||||
}
|
||||
}
|
||||
|
||||
func WriteYaml() {
|
||||
f, err := os.Create("milvus.yaml")
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
log.Error("create file failed", zap.Error(err))
|
||||
os.Exit(-2)
|
||||
}
|
||||
|
||||
func WriteYaml(w io.Writer) {
|
||||
result := collect()
|
||||
|
||||
f.WriteString(`# Licensed to the LF AI & Data foundation under one
|
||||
io.WriteString(w, `# Licensed to the LF AI & Data foundation under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
|
@ -207,6 +215,13 @@ func WriteYaml() {
|
|||
{
|
||||
name: "metastore",
|
||||
},
|
||||
{
|
||||
name: "tikv",
|
||||
header: `
|
||||
# Related configuration of tikv, used to store Milvus metadata.
|
||||
# Notice that when TiKV is enabled for metastore, you still need to have etcd for service discovery.
|
||||
# TiKV is a good option when the metadata size requires better horizontal scalability.`,
|
||||
},
|
||||
{
|
||||
name: "localStorage",
|
||||
},
|
||||
|
@ -215,15 +230,19 @@ func WriteYaml() {
|
|||
header: `
|
||||
# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
|
||||
# We refer to the storage service as MinIO/S3 in the following description for simplicity.`,
|
||||
},
|
||||
{
|
||||
name: "mq",
|
||||
header: `
|
||||
# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka.
|
||||
# You can change your mq by setting mq.type field.
|
||||
# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file.
|
||||
# 1. standalone(local) mode: rocksmq(default) > natsmq > Pulsar > Kafka
|
||||
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode)`,
|
||||
},
|
||||
{
|
||||
name: "pulsar",
|
||||
header: `
|
||||
# Milvus supports three MQ: rocksmq(based on RockDB), Pulsar and Kafka, which should be reserved in config what you use.
|
||||
# There is a note about enabling priority if we config multiple mq in this file
|
||||
# 1. standalone(local) mode: rocksmq(default) > Pulsar > Kafka
|
||||
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq is unsupported)
|
||||
|
||||
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.`,
|
||||
},
|
||||
{
|
||||
|
@ -234,6 +253,12 @@ func WriteYaml() {
|
|||
{
|
||||
name: "rocksmq",
|
||||
},
|
||||
{
|
||||
name: "natsmq",
|
||||
header: `
|
||||
# natsmq configuration.
|
||||
# more detail: https://docs.nats.io/running-a-nats-service/configuration`,
|
||||
},
|
||||
{
|
||||
name: "rootCoord",
|
||||
header: "\n# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests",
|
||||
|
@ -294,8 +319,18 @@ func WriteYaml() {
|
|||
{
|
||||
name: "trace",
|
||||
},
|
||||
{
|
||||
name: "gpu",
|
||||
header: `
|
||||
#when using GPU indexing, Milvus will utilize a memory pool to avoid frequent memory allocation and deallocation.
|
||||
#here, you can set the size of the memory occupied by the memory pool, with the unit being MB.
|
||||
#note that there is a possibility of Milvus crashing when the actual memory demand exceeds the value set by maxMemSize.
|
||||
#if initMemSize and MaxMemSize both set zero,
|
||||
#milvus will automatically initialize half of the available GPU memory,
|
||||
#maxMemSize will the whole available GPU memory.`,
|
||||
},
|
||||
}
|
||||
marshller := YamlMarshaller{f, groups, result}
|
||||
marshller := YamlMarshaller{w, groups, result}
|
||||
marshller.writeYamlRecursive(lo.Filter(result, func(d DocContent, _ int) bool {
|
||||
return d.exportToUser
|
||||
}), 0)
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
)
|
||||
|
||||
// Assert the milvus.yaml file is consistent to paramtable
|
||||
//
|
||||
// Please be noted that milvus.yaml is generated by code, so don't edit it directly, instead, change the code in paramtable
|
||||
// and run `make milvus-tools && ./bin/tools/config gen-yaml && mv milvus.yaml configs/milvus.yaml`.
|
||||
func TestYamlFile(t *testing.T) {
|
||||
w := bytes.Buffer{}
|
||||
WriteYaml(&w)
|
||||
|
||||
base := paramtable.NewBaseTable()
|
||||
f, err := os.Open(fmt.Sprintf("%s/%s", base.GetConfigDir(), "milvus.yaml"))
|
||||
assert.NoError(t, err, "expecting configs/milvus.yaml")
|
||||
defer f.Close()
|
||||
fileScanner := bufio.NewScanner(f)
|
||||
codeScanner := bufio.NewScanner(&w)
|
||||
for fileScanner.Scan() && codeScanner.Scan() {
|
||||
if fileScanner.Text() != codeScanner.Text() {
|
||||
assert.FailNow(t, fmt.Sprintf("configs/milvus.yaml is not consistent with paramtable, file: [%s], code: [%s]. Do not edit milvus.yaml directly.",
|
||||
fileScanner.Text(), codeScanner.Text()))
|
||||
}
|
||||
log.Error("", zap.Any("file", fileScanner.Text()), zap.Any("code", codeScanner.Text()))
|
||||
}
|
||||
}
|
|
@ -4,6 +4,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
)
|
||||
|
||||
|
@ -22,9 +24,21 @@ func main() {
|
|||
}
|
||||
switch args[1] {
|
||||
case generateCsv:
|
||||
WriteCsv()
|
||||
f, err := os.Create("configs.csv")
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
log.Error("create file failed", zap.Error(err))
|
||||
os.Exit(-2)
|
||||
}
|
||||
WriteCsv(f)
|
||||
case generateYaml:
|
||||
WriteYaml()
|
||||
f, err := os.Create("milvus.yaml")
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
log.Error("create file failed", zap.Error(err))
|
||||
os.Exit(-2)
|
||||
}
|
||||
WriteYaml(f)
|
||||
case showYaml:
|
||||
var f string
|
||||
if len(args) == 2 {
|
||||
|
|
|
@ -38,25 +38,34 @@ etcd:
|
|||
# Optional values: 1.0, 1.1, 1.2, 1.3。
|
||||
# We recommend using version 1.2 and above.
|
||||
tlsMinVersion: 1.3
|
||||
requestTimeout: 10000 # Etcd operation timeout in milliseconds
|
||||
use:
|
||||
embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
|
||||
data:
|
||||
dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/
|
||||
auth:
|
||||
enabled: false # Whether to enable authentication
|
||||
userName: # username for etcd authentication
|
||||
password: # password for etcd authentication
|
||||
|
||||
metastore:
|
||||
# Default value: etcd
|
||||
# Valid values: [etcd, tikv]
|
||||
type: etcd
|
||||
type: etcd # Default value: etcd, Valid values: [etcd, tikv]
|
||||
|
||||
# Related configuration of tikv, used to store Milvus metadata.
|
||||
# Notice that when TiKV is enabled for metastore, you still need to have etcd for service discovery.
|
||||
# TiKV is a good option when the metadata size requires better horizontal scalability.
|
||||
tikv:
|
||||
# Note that the default pd port of tikv is 2379, which conflicts with etcd.
|
||||
endpoints: 127.0.0.1:2389
|
||||
rootPath: by-dev # The root path where data is stored
|
||||
endpoints: 127.0.0.1:2389 # Note that the default pd port of tikv is 2379, which conflicts with etcd.
|
||||
rootPath: by-dev # The root path where data is stored in tikv
|
||||
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
|
||||
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
|
||||
requestTimeout: 10000 # ms, tikv request timeout
|
||||
snapshotScanSize: 256 # batch size of tikv snapshot scan
|
||||
ssl:
|
||||
enabled: false # Whether to support TiKV secure connection mode
|
||||
tlsCert: # path to your cert file
|
||||
tlsKey: # path to your key file
|
||||
tlsCACert: # path to your CACert file
|
||||
|
||||
localStorage:
|
||||
path: /var/lib/milvus/data/ # please adjust in embedded Milvus: /tmp/milvus/data/
|
||||
|
@ -70,7 +79,7 @@ minio:
|
|||
secretAccessKey: minioadmin # MinIO/S3 encryption string
|
||||
useSSL: false # Access to MinIO/S3 with SSL
|
||||
ssl:
|
||||
tlsCACert: /path/to/public.crt # path to your CACert file, ignore when it is empty
|
||||
tlsCACert: /path/to/public.crt # path to your CACert file
|
||||
bucketName: a-bucket # Bucket name in MinIO/S3
|
||||
rootPath: files # The root path where the message is stored in MinIO/S3
|
||||
# Whether to useIAM role to access S3/GCS instead of access/secret keys
|
||||
|
@ -88,26 +97,25 @@ minio:
|
|||
cloudProvider: aws
|
||||
# Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
|
||||
# Leave it empty if you want to use AWS default endpoint
|
||||
iamEndpoint:
|
||||
# Log level for aws sdk log.
|
||||
# Supported level: off, fatal, error, warn, info, debug, trace
|
||||
logLevel: fatal
|
||||
# Cloud data center region
|
||||
region: ''
|
||||
# Cloud whether use virtual host bucket mode
|
||||
useVirtualHost: false
|
||||
# timeout for request time in milliseconds
|
||||
requestTimeoutMs: 10000
|
||||
iamEndpoint:
|
||||
logLevel: fatal # Log level for aws sdk log. Supported level: off, fatal, error, warn, info, debug, trace
|
||||
region: # Specify minio storage system location region
|
||||
useVirtualHost: false # Whether use virtual host mode for bucket
|
||||
requestTimeoutMs: 10000 # minio timeout for request time in milliseconds
|
||||
|
||||
# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka.
|
||||
# You can change your mq by setting mq.type field.
|
||||
# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file.
|
||||
# 1. standalone(local) mode: rocksmq(default) > Pulsar > Kafka
|
||||
# 1. standalone(local) mode: rocksmq(default) > natsmq > Pulsar > Kafka
|
||||
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode)
|
||||
mq:
|
||||
# Default value: "default"
|
||||
# Valid values: [default, pulsar, kafka, rocksmq, natsmq]
|
||||
type: default
|
||||
enablePursuitMode: true # Default value: "true"
|
||||
pursuitLag: 10 # time tick lag threshold to enter pursuit mode, in seconds
|
||||
pursuitBufferSize: 8388608 # pursuit mode buffer size in bytes
|
||||
mqBufSize: 16 # MQ client consumer buffer length
|
||||
|
||||
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
|
||||
pulsar:
|
||||
|
@ -122,18 +130,18 @@ pulsar:
|
|||
|
||||
# If you want to enable kafka, needs to comment the pulsar configs
|
||||
# kafka:
|
||||
# brokerList:
|
||||
# saslUsername:
|
||||
# saslPassword:
|
||||
# saslMechanisms:
|
||||
# securityProtocol:
|
||||
# readTimeout: 10 # read message timeout in seconds
|
||||
# brokerList:
|
||||
# saslUsername:
|
||||
# saslPassword:
|
||||
# saslMechanisms:
|
||||
# securityProtocol:
|
||||
# ssl:
|
||||
# enabled: false # Whether to support kafka secure connection mode
|
||||
# tlsCert: /path/to/client.pem # path to client's public key
|
||||
# tlsKey: /path/to/client.key # path to client's private key
|
||||
# tlsCACert: /path/to/ca-cert # file or directory path to CA certificate
|
||||
# tlsKeyPassword: "" # private key passphrase for use with private key, if any
|
||||
# enabled: false # whether to enable ssl mode
|
||||
# tlsCert: # path to client's public key (PEM) used for authentication
|
||||
# tlsKey: # path to client's private key (PEM) used for authentication
|
||||
# tlsCaCert: # file or directory path to CA certificate(s) for verifying the broker's key
|
||||
# tlsKeyPassword: # private key passphrase for use with ssl.key.location and set_ssl_cert(), if any
|
||||
# readTimeout: 10
|
||||
|
||||
rocksmq:
|
||||
# The path where the message is stored in rocksmq
|
||||
|
@ -144,54 +152,51 @@ rocksmq:
|
|||
retentionTimeInMinutes: 4320 # 3 days, 3 * 24 * 60 minutes, The retention time of the message in rocksmq.
|
||||
retentionSizeInMB: 8192 # 8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.
|
||||
compactionInterval: 86400 # 1 day, trigger rocksdb compaction every day to remove deleted data
|
||||
# compaction compression type, only support use 0,7.
|
||||
# 0 means not compress, 7 will use zstd
|
||||
# len of types means num of rocksdb level.
|
||||
compressionTypes: [0, 0, 7, 7, 7]
|
||||
compressionTypes: 0,0,7,7,7 # compaction compression type, only support use 0,7. 0 means not compress, 7 will use zstd. Length of types means num of rocksdb level.
|
||||
|
||||
# natsmq configuration.
|
||||
# more detail: https://docs.nats.io/running-a-nats-service/configuration
|
||||
natsmq:
|
||||
server: # server side configuration for natsmq.
|
||||
port: 4222 # 4222 by default, Port for nats server listening.
|
||||
storeDir: /var/lib/milvus/nats # /var/lib/milvus/nats by default, directory to use for JetStream storage of nats.
|
||||
maxFileStore: 17179869184 # (B) 16GB by default, Maximum size of the 'file' storage.
|
||||
maxPayload: 8388608 # (B) 8MB by default, Maximum number of bytes in a message payload.
|
||||
maxPending: 67108864 # (B) 64MB by default, Maximum number of bytes buffered for a connection Applies to client connections.
|
||||
initializeTimeout: 4000 # (ms) 4s by default, waiting for initialization of natsmq finished.
|
||||
server:
|
||||
port: 4222 # Port for nats server listening
|
||||
storeDir: /var/lib/milvus/nats # Directory to use for JetStream storage of nats
|
||||
maxFileStore: 17179869184 # Maximum size of the 'file' storage
|
||||
maxPayload: 8388608 # Maximum number of bytes in a message payload
|
||||
maxPending: 67108864 # Maximum number of bytes buffered for a connection Applies to client connections
|
||||
initializeTimeout: 4000 # waiting for initialization of natsmq finished
|
||||
monitor:
|
||||
trace: false # false by default, If true enable protocol trace log messages.
|
||||
debug: false # false by default, If true enable debug log messages.
|
||||
logTime: true # true by default, If set to false, log without timestamps.
|
||||
logFile: /tmp/milvus/logs/nats.log # /tmp/milvus/logs/nats.log by default, Log file path relative to .. of milvus binary if use relative path.
|
||||
logSizeLimit: 536870912 # (B) 512MB by default, Size in bytes after the log file rolls over to a new one.
|
||||
trace: false # If true enable protocol trace log messages
|
||||
debug: false # If true enable debug log messages
|
||||
logTime: true # If set to false, log without timestamps.
|
||||
logFile: /tmp/milvus/logs/nats.log # Log file path relative to .. of milvus binary if use relative path
|
||||
logSizeLimit: 536870912 # Size in bytes after the log file rolls over to a new one
|
||||
retention:
|
||||
maxAge: 4320 # (min) 3 days by default, Maximum age of any message in the P-channel.
|
||||
maxBytes: # (B) None by default, How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size.
|
||||
maxMsgs: # None by default, How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit.
|
||||
maxAge: 4320 # Maximum age of any message in the P-channel
|
||||
maxBytes: # How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size
|
||||
maxMsgs: # How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit
|
||||
|
||||
# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
|
||||
rootCoord:
|
||||
dmlChannelNum: 16 # The number of dml channels created at system startup
|
||||
maxDatabaseNum: 64 # Maximum number of database
|
||||
maxPartitionNum: 4096 # Maximum number of partitions in a collection
|
||||
minSegmentSizeToEnableIndex: 1024 # It's a threshold. When the segment size is less than this value, the segment will not be indexed
|
||||
enableActiveStandby: false
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
maxDatabaseNum: 64 # Maximum number of database
|
||||
maxGeneralCapacity: 65536 # upper limit for the sum of of product of partitionNumber and shardNumber
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 53100
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
maxGeneralCapacity: 65536
|
||||
|
||||
# Related configuration of proxy, used to validate client requests and reduce the returned results.
|
||||
proxy:
|
||||
timeTickInterval: 200 # ms, the interval that proxy synchronize the time tick
|
||||
healthCheckTimeout: 3000 # ms, the interval that to do component healthy check
|
||||
healthCheckTimetout: 3000 # ms, the interval that to do component healthy check
|
||||
msgStream:
|
||||
timeTick:
|
||||
bufSize: 512
|
||||
|
@ -200,50 +205,42 @@ proxy:
|
|||
# As of today (2.2.0 and after) it is strongly DISCOURAGED to set maxFieldNum >= 64.
|
||||
# So adjust at your risk!
|
||||
maxFieldNum: 64
|
||||
maxVectorFieldNum: 4 # Maximum number of vector fields in a collection, (0, 10].
|
||||
maxVectorFieldNum: 4 # Maximum number of vector fields in a collection.
|
||||
maxShardNum: 16 # Maximum number of shards in a collection
|
||||
maxDimension: 32768 # Maximum dimension of a vector
|
||||
# Whether to produce gin logs.\n
|
||||
# please adjust in embedded Milvus: false
|
||||
ginLogging: true
|
||||
ginLogSkipPaths: "/" # skipped url path for gin log split by comma
|
||||
ginLogSkipPaths: / # skip url path for gin log
|
||||
maxTaskNum: 1024 # max task number of proxy task queue
|
||||
accessLog:
|
||||
enable: false # if use access log
|
||||
minioEnable: false # if upload sealed access log file to minio
|
||||
localPath: /tmp/milvus_access
|
||||
filename: # Log filename, leave empty to use stdout.
|
||||
maxSize: 64 # Max size for a single file, in MB.
|
||||
cacheSize: 10240 # Size of log of memory cache, in B
|
||||
rotatedTime: 0 # Max time for single access log file in seconds
|
||||
remotePath: access_log/ # File path in minIO
|
||||
remoteMaxTime: 0 # Max time for log file in minIO, in hours
|
||||
formatters:
|
||||
base:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]"
|
||||
query:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]"
|
||||
methods: "Query,Search,Delete"
|
||||
connectionCheckIntervalSeconds: 120 # the interval time(in seconds) for connection manager to scan inactive client info
|
||||
connectionClientInfoTTLSeconds: 86400 # inactive client info TTL duration, in seconds
|
||||
maxConnectionNum: 10000 # the max client info numbers that proxy should manage, avoid too many client infos.
|
||||
accessLog:
|
||||
enable: false
|
||||
# Log filename, set as "" to use stdout.
|
||||
# filename: ""
|
||||
# define formatters for access log by XXX:{format: XXX, method:[XXX,XXX]}
|
||||
formatters:
|
||||
# "base" formatter could not set methods
|
||||
# all method will use "base" formatter default
|
||||
base:
|
||||
# will not print access log if set as ""
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]"
|
||||
query:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]"
|
||||
# set formatter owners by method name(method was all milvus external interface)
|
||||
# all method will use base formatter default
|
||||
# one method only could use one formatter
|
||||
# if set a method formatter mutiple times, will use random fomatter.
|
||||
methods: ["Query", "Search", "Delete"]
|
||||
# localPath: /tmp/milvus_accesslog // log file rootpath
|
||||
# maxSize: 64 # max log file size(MB) of singal log file, mean close when time <= 0.
|
||||
# rotatedTime: 0 # max time range of singal log file, mean close when time <= 0;
|
||||
# maxBackups: 8 # num of reserved backups. will rotate and crate a new backup when access log file trigger maxSize or rotatedTime.
|
||||
# cacheSize: 10240 # write cache of accesslog in Byte
|
||||
|
||||
# minioEnable: false # update backups to milvus minio when minioEnable is true.
|
||||
# remotePath: "access_log/" # file path when update backups to minio
|
||||
# remoteMaxTime: 0 # max time range(in Hour) of backups in minio, 0 means close time retention.
|
||||
maxConnectionNum: 10000 # the max client info numbers that proxy should manage, avoid too many client infos
|
||||
gracefulStopTimeout: 30 # seconds. force stop node without graceful stop
|
||||
slowQuerySpanInSeconds: 5 # query whose executed time exceeds the `slowQuerySpanInSeconds` can be considered slow, in seconds.
|
||||
http:
|
||||
enabled: true # Whether to enable the http server
|
||||
debug_mode: false # Whether to enable http server debug mode
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
port: # high-level restful api
|
||||
acceptTypeAllowInt64: true # high-level restful api, whether http client can deal with int64
|
||||
enablePprof: true # Whether to enable pprof middleware on the metrics port
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 19530
|
||||
internalPort: 19529
|
||||
grpc:
|
||||
|
@ -251,81 +248,92 @@ proxy:
|
|||
serverMaxRecvSize: 67108864
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 67108864
|
||||
# query whose executed time exceeds the `slowQuerySpanInSeconds` can be considered slow, in seconds.
|
||||
slowQuerySpanInSeconds: 5
|
||||
|
||||
# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
|
||||
queryCoord:
|
||||
taskMergeCap: 1
|
||||
taskExecutionCap: 256
|
||||
autoHandoff: true # Enable auto handoff
|
||||
autoBalance: true # Enable auto balance
|
||||
balancer: ScoreBasedBalancer # Balancer to use
|
||||
globalRowCountFactor: 0.1 # expert parameters, only used by scoreBasedBalancer
|
||||
scoreUnbalanceTolerationFactor: 0.05 # expert parameters, only used by scoreBasedBalancer
|
||||
reverseUnBalanceTolerationFactor: 1.3 #expert parameters, only used by scoreBasedBalancer
|
||||
autoBalanceChannel: true # Enable auto balance channel
|
||||
balancer: ScoreBasedBalancer # auto balancer used for segments on queryNodes
|
||||
globalRowCountFactor: 0.1 # the weight used when balancing segments among queryNodes
|
||||
scoreUnbalanceTolerationFactor: 0.05 # the least value for unbalanced extent between from and to nodes when doing balance
|
||||
reverseUnBalanceTolerationFactor: 1.3 # the largest value for unbalanced extent between from and to nodes after doing balance
|
||||
overloadedMemoryThresholdPercentage: 90 # The threshold percentage that memory overload
|
||||
balanceIntervalSeconds: 60
|
||||
memoryUsageMaxDifferencePercentage: 30
|
||||
checkInterval: 1000
|
||||
rowCountFactor: 0.4 # the row count weight used when balancing segments among queryNodes
|
||||
segmentCountFactor: 0.4 # the segment count weight used when balancing segments among queryNodes
|
||||
globalSegmentCountFactor: 0.1 # the segment count weight used when balancing segments among queryNodes
|
||||
segmentCountMaxSteps: 50 # segment count based plan generator max steps
|
||||
rowCountMaxSteps: 50 # segment count based plan generator max steps
|
||||
randomMaxSteps: 10 # segment count based plan generator max steps
|
||||
growingRowCountWeight: 4 # the memory weight of growing segment row count
|
||||
balanceCostThreshold: 0.001 # the threshold of balance cost, if the difference of cluster's cost after executing the balance plan is less than this value, the plan will not be executed
|
||||
checkSegmentInterval: 1000
|
||||
checkChannelInterval: 1000
|
||||
checkBalanceInterval: 10000
|
||||
checkIndexInterval: 10000
|
||||
channelTaskTimeout: 60000 # 1 minute
|
||||
segmentTaskTimeout: 120000 # 2 minute
|
||||
distPullInterval: 500
|
||||
heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available
|
||||
loadTimeoutSeconds: 600
|
||||
distRequestTimeout: 5000 # the request timeout for querycoord fetching data distribution from querynodes, in milliseconds
|
||||
heatbeatWarningLag: 5000 # the lag value for querycoord report warning when last heatbeat is too old, in milliseconds
|
||||
checkHandoffInterval: 5000
|
||||
growingRowCountWeight: 4.0
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
enableActiveStandby: false
|
||||
checkInterval: 1000
|
||||
checkHealthInterval: 3000 # 3s, the interval when query coord try to check health of query node
|
||||
checkHealthRPCTimeout: 2000 # 100ms, the timeout of check health rpc to query node
|
||||
brokerTimeout: 5000 # 5000ms, querycoord broker rpc timeout
|
||||
collectionRecoverTimes: 3 # if collection recover times reach the limit during loading state, release it
|
||||
observerTaskParallel: 16 # the parallel observer dispatcher task number
|
||||
checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
|
||||
checkNodeSessionInterval: 60 # the interval(in seconds) of check querynode cluster session
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
enableStoppingBalance: true # whether enable stopping balance
|
||||
cleanExcludeSegmentInterval: 60 # the time duration of clean pipeline exclude segment which used for filter invalid data, in seconds
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 19531
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
taskMergeCap: 1
|
||||
taskExecutionCap: 256
|
||||
enableActiveStandby: false # Enable active-standby
|
||||
brokerTimeout: 5000 # broker rpc timeout in milliseconds
|
||||
|
||||
# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
|
||||
queryNode:
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
stats:
|
||||
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
|
||||
segcore:
|
||||
cgoPoolSizeRatio: 2.0 # cgo pool size ratio to max read concurrency
|
||||
knowhereThreadPoolNumRatio: 4
|
||||
# Use more threads to make better use of SSD throughput in disk index.
|
||||
# This parameter is only useful when enable-disk = true.
|
||||
# And this value should be a number greater than 1 and less than 32.
|
||||
knowhereThreadPoolNumRatio: 4 # The number of threads in knowhere's thread pool. If disk is enabled, the pool size will multiply with knowhereThreadPoolNumRatio([1, 32]).
|
||||
chunkRows: 128 # The number of vectors in a chunk.
|
||||
exprEvalBatchSize: 8192 # The batch size for executor get next
|
||||
interimIndex: # build a vector temperate index for growing segment or binlog to accelerate search
|
||||
enableIndex: true
|
||||
nlist: 128 # segment index nlist
|
||||
nprobe: 16 # nprobe to search segment, based on your accuracy requirement, must smaller than nlist
|
||||
memExpansionRate: 1.15 # the ratio of building interim index memory usage to raw data
|
||||
interimIndex:
|
||||
enableIndex: true # Enable segment build with index to accelerate vector search when segment is in growing or binlog.
|
||||
nlist: 128 # temp index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8
|
||||
nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist
|
||||
memExpansionRate: 1.15 # extra memory needed by building interim index
|
||||
buildParallelRate: 0.5 # the ratio of building interim index parallel matched with cpu num
|
||||
loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments
|
||||
enableDisk: false # enable querynode load disk index, and search on disk index
|
||||
maxDiskUsagePercentage: 95
|
||||
cache:
|
||||
enabled: true # deprecated, TODO: remove it
|
||||
memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024 # deprecated, TODO: remove it
|
||||
enabled: true
|
||||
memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
|
||||
readAheadPolicy: willneed # The read ahead policy of chunk cache, options: `normal, random, sequential, willneed, dontneed`
|
||||
# Specifies the necessity for warming up the chunk cache.
|
||||
# 1. If set to "sync" or "async," the original vector data
|
||||
# will be synchronously/asynchronously loaded into the
|
||||
# chunk cache during the load process. This approach has
|
||||
# the potential to substantially reduce query/search latency
|
||||
# for a specific duration post-load, albeit accompanied
|
||||
# by a concurrent increase in disk usage;
|
||||
# 2. If set to "off," original vector data will only
|
||||
# be loaded into the chunk cache during search/query.
|
||||
warmup: async # options: `sync, async, off`
|
||||
# options: async, sync, off.
|
||||
# Specifies the necessity for warming up the chunk cache.
|
||||
# 1. If set to "sync" or "async," the original vector data will be synchronously/asynchronously loaded into the
|
||||
# chunk cache during the load process. This approach has the potential to substantially reduce query/search latency
|
||||
# for a specific duration post-load, albeit accompanied by a concurrent increase in disk usage;
|
||||
# 2. If set to "off," original vector data will only be loaded into the chunk cache during search/query.
|
||||
warmup: async
|
||||
mmap:
|
||||
mmapEnabled: false # Enable mmap for loading data
|
||||
mmapEnabled: false # Enable mmap for loading data
|
||||
lazyloadEnabled: false # Enable lazyload for loading data
|
||||
grouping:
|
||||
enabled: true
|
||||
maxNQ: 1000
|
||||
|
@ -334,42 +342,37 @@ queryNode:
|
|||
receiveChanSize: 10240
|
||||
unsolvedQueueSize: 10240
|
||||
# maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
|
||||
# Max read concurrency would be the value of runtime.NumCPU * maxReadConcurrentRatio.
|
||||
# It defaults to 2.0, which means max read concurrency would be the value of runtime.NumCPU * 2.
|
||||
# Max read concurrency must greater than or equal to 1, and less than or equal to runtime.NumCPU * 100.
|
||||
# Max read concurrency would be the value of hardware.GetCPUNum * maxReadConcurrentRatio.
|
||||
# It defaults to 2.0, which means max read concurrency would be the value of hardware.GetCPUNum * 2.
|
||||
# Max read concurrency must greater than or equal to 1, and less than or equal to hardware.GetCPUNum * 100.
|
||||
# (0, 100]
|
||||
maxReadConcurrentRatio: 1
|
||||
cpuRatio: 10 # ratio used to estimate read task cpu usage.
|
||||
maxTimestampLag: 86400
|
||||
# read task schedule policy: fifo(by default), user-task-polling.
|
||||
scheduleReadPolicy:
|
||||
# fifo: A FIFO queue support the schedule.
|
||||
# user-task-polling:
|
||||
# The user's tasks will be polled one by one and scheduled.
|
||||
# Scheduling is fair on task granularity.
|
||||
# The policy is based on the username for authentication.
|
||||
# And an empty username is considered the same user.
|
||||
# When there are no multi-users, the policy decay into FIFO
|
||||
# The user's tasks will be polled one by one and scheduled.
|
||||
# Scheduling is fair on task granularity.
|
||||
# The policy is based on the username for authentication.
|
||||
# And an empty username is considered the same user.
|
||||
# When there are no multi-users, the policy decay into FIFO"
|
||||
name: fifo
|
||||
maxPendingTask: 10240
|
||||
# user-task-polling configure:
|
||||
taskQueueExpire: 60 # 1 min by default, expire time of inner user task queue since queue is empty.
|
||||
enableCrossUserGrouping: false # false by default Enable Cross user grouping when using user-task-polling policy. (close it if task of any user can not merge others).
|
||||
maxPendingTaskPerUser: 1024 # 50 by default, max pending task in scheduler per user.
|
||||
mmap:
|
||||
mmapEnabled: false # enable mmap global, if set true, will use mmap to load segment data
|
||||
lazyloadEnabled: false
|
||||
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
taskQueueExpire: 60 # Control how long (many seconds) that queue retains since queue is empty
|
||||
enableCrossUserGrouping: false # Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other)
|
||||
maxPendingTaskPerUser: 1024 # Max pending task per user in scheduler
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
enableSegmentPrune: false # use partition prune function on shard delegator
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21123
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
enableSegmentPrune: false # use partition prune function on shard delegator
|
||||
|
||||
indexCoord:
|
||||
bindIndexNodeMode:
|
||||
|
@ -385,9 +388,7 @@ indexNode:
|
|||
buildParallel: 1
|
||||
enableDisk: true # enable index node build disk vector index
|
||||
maxDiskUsagePercentage: 95
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21121
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
|
@ -398,18 +399,16 @@ indexNode:
|
|||
dataCoord:
|
||||
channel:
|
||||
watchTimeoutInterval: 300 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer.
|
||||
balanceSilentDuration: 300 # The duration before the channelBalancer on datacoord to run
|
||||
balanceInterval: 360 #The interval for the channelBalancer on datacoord to check balance status
|
||||
balanceSilentDuration: 300 # The duration after which the channel manager start background channel balancing
|
||||
balanceInterval: 360 # The interval with which the channel manager check dml channel balance status
|
||||
checkInterval: 10 # The interval in seconds with which the channel manager advances channel states
|
||||
notifyChannelOperationTimeout: 5 # Timeout notifing channel operations (in seconds).
|
||||
segment:
|
||||
maxSize: 1024 # Maximum size of a segment in MB
|
||||
diskSegmentMaxSize: 2048 # Maximum size of a segment in MB for collection which has Disk index
|
||||
diskSegmentMaxSize: 2048 # Maximun size of a segment in MB for collection which has Disk index
|
||||
sealProportion: 0.12
|
||||
# The time of the assignment expiration in ms
|
||||
# Warning! this parameter is an expert variable and closely related to data integrity. Without specific
|
||||
# target and solid understanding of the scenarios, it should not be changed. If it's necessary to alter
|
||||
# this parameter, make sure that the newly changed value is larger than the previous value used before restart
|
||||
# otherwise there could be a large possibility of data loss
|
||||
assignmentExpiration: 2000
|
||||
assignmentExpiration: 2000 # The time of the assignment expiration in ms
|
||||
allocLatestExpireAttempt: 200 # The time attempting to alloc latest lastExpire from rootCoord after restart
|
||||
maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
|
||||
# If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than
|
||||
# minSizeFromIdleToSealed, Milvus will automatically seal it.
|
||||
|
@ -425,37 +424,44 @@ dataCoord:
|
|||
compactableProportion: 0.85
|
||||
# over (compactableProportion * segment max # of rows) rows.
|
||||
# MUST BE GREATER THAN OR EQUAL TO <smallProportion>!!!
|
||||
# During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
|
||||
# During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
|
||||
expansionRate: 1.25
|
||||
# Whether to enable levelzero segment
|
||||
enableLevelZero: true
|
||||
autoUpgradeSegmentIndex: false # whether auto upgrade segment index to index engine's version
|
||||
enableCompaction: true # Enable data segment compaction
|
||||
compaction:
|
||||
enableAutoCompaction: true
|
||||
rpcTimeout: 10 # compaction rpc request timeout in seconds
|
||||
maxParallelTaskNum: 10 # max parallel compaction task number
|
||||
indexBasedCompaction: true
|
||||
|
||||
rpcTimeout: 10
|
||||
maxParallelTaskNum: 10
|
||||
workerMaxParallelTaskNum: 2
|
||||
levelzero:
|
||||
forceTrigger:
|
||||
minSize: 8388608 # The minmum size in bytes to force trigger a LevelZero Compaction, default as 8MB
|
||||
deltalogMinNum: 10 # the minimum number of deltalog files to force trigger a LevelZero Compaction
|
||||
import:
|
||||
filesPerPreImportTask: 2 # The maximum number of files allowed per pre-import task.
|
||||
taskRetention: 10800 # The retention period in seconds for tasks in the Completed or Failed state.
|
||||
maxImportFileNumPerReq: 1024 # The maximum number of files allowed per single import request.
|
||||
waitForIndex: true # Indicates whether the import operation waits for the completion of index building.
|
||||
|
||||
maxSize: 67108864 # The maxmum size in bytes to force trigger a LevelZero Compaction, default as 64MB
|
||||
deltalogMinNum: 10 # The minimum number of deltalog files to force trigger a LevelZero Compaction
|
||||
deltalogMaxNum: 30 # The maxmum number of deltalog files to force trigger a LevelZero Compaction, default as 30
|
||||
enableGarbageCollection: true
|
||||
gc:
|
||||
interval: 3600 # gc interval in seconds
|
||||
scanInterval: 168 #gc residual file scan interval in hours
|
||||
missingTolerance: 3600 # file meta missing tolerance duration in seconds, 3600
|
||||
dropTolerance: 10800 # file belongs to dropped entity tolerance duration in seconds. 10800
|
||||
missingTolerance: 3600 # file meta missing tolerance duration in seconds, default to 1hr
|
||||
dropTolerance: 10800 # file belongs to dropped entity tolerance duration in seconds. 3600
|
||||
removeConcurrent: 32 # number of concurrent goroutines to remove dropped s3 objects
|
||||
scanInterval: 168 # garbage collection scan residue interval in hours
|
||||
enableActiveStandby: false
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
brokerTimeout: 5000 # 5000ms, dataCoord broker rpc timeout
|
||||
autoBalance: true # Enable auto balance
|
||||
checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
|
||||
import:
|
||||
filesPerPreImportTask: 2 # The maximum number of files allowed per pre-import task.
|
||||
taskRetention: 10800 # The retention period in seconds for tasks in the Completed or Failed state.
|
||||
maxSizeInMBPerImportTask: 6144 # To prevent generating of small segments, we will re-group imported files. This parameter represents the sum of file sizes in each group (each ImportTask).
|
||||
scheduleInterval: 2 # The interval for scheduling import, measured in seconds.
|
||||
checkIntervalHigh: 2 # The interval for checking import, measured in seconds, is set to a high frequency for the import checker.
|
||||
checkIntervalLow: 120 # The interval for checking import, measured in seconds, is set to a low frequency for the import checker.
|
||||
maxImportFileNumPerReq: 1024 # The maximum number of files allowed per single import request.
|
||||
waitForIndex: true # Indicates whether the import operation waits for the completion of index building.
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 13333
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
|
@ -468,31 +474,23 @@ dataNode:
|
|||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
maxParallelSyncMgrTasks: 256 #The max concurrent sync task number of datanode sync mgr globally
|
||||
maxParallelSyncMgrTasks: 256 # The max concurrent sync task number of datanode sync mgr globally
|
||||
skipMode:
|
||||
# when there are only timetick msg in flowgraph for a while (longer than coldTime),
|
||||
# flowGraph will turn on skip mode to skip most timeticks to reduce cost, especially there are a lot of channels
|
||||
enable: true
|
||||
skipNum: 4
|
||||
coldTime: 60
|
||||
enable: true # Support skip some timetick message to reduce CPU usage
|
||||
skipNum: 4 # Consume one for every n records skipped
|
||||
coldTime: 60 # Turn on skip mode after there are only timetick msg for x seconds
|
||||
segment:
|
||||
insertBufSize: 16777216 # Max buffer size to flush for a single segment.
|
||||
deleteBufBytes: 67108864 # Max buffer size to flush del for a single channel
|
||||
deleteBufBytes: 67108864 # Max buffer size in bytes to flush del for a single channel, default as 16MB
|
||||
syncPeriod: 600 # The period to sync segments if buffer is not empty.
|
||||
# can specify ip for example
|
||||
# ip: 127.0.0.1
|
||||
ip: # if not specify address, will use the first unicastable address as local ip
|
||||
port: 21124
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
memory:
|
||||
forceSyncEnable: true # `true` to force sync if memory usage is too high
|
||||
forceSyncEnable: true # Set true to force sync if memory usage is too high
|
||||
forceSyncSegmentNum: 1 # number of segments to sync, segments with top largest buffer will be synced.
|
||||
checkInterval: 3000 # the interal to check datanode memory usage, in milliseconds
|
||||
forceSyncWatermark: 0.5 # memory watermark for standalone, upon reaching this watermark, segments will be synced.
|
||||
timetick:
|
||||
byRPC: true
|
||||
interval: 500
|
||||
channel:
|
||||
# specify the size of global work pool of all channels
|
||||
# if this parameter <= 0, will set it as the maximum number of CPUs that can be executing
|
||||
|
@ -501,16 +499,30 @@ dataNode:
|
|||
# specify the size of global work pool for channel checkpoint updating
|
||||
# if this parameter <= 0, will set it as 10
|
||||
updateChannelCheckpointMaxParallel: 10
|
||||
updateChannelCheckpointInterval: 60 # the interval duration(in seconds) for datanode to update channel checkpoint of each channel
|
||||
updateChannelCheckpointRPCTimeout: 20 # timeout in seconds for UpdateChannelCheckpoint RPC call
|
||||
maxChannelCheckpointsPerPRC: 128 # The maximum number of channel checkpoints per UpdateChannelCheckpoint RPC.
|
||||
channelCheckpointUpdateTickInSeconds: 10 # The frequency, in seconds, at which the channel checkpoint updater executes updates.
|
||||
import:
|
||||
maxConcurrentTaskNum: 16 # The maximum number of import/pre-import tasks allowed to run concurrently on a datanode.
|
||||
maxImportFileSizeInGB: 16 # The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files.
|
||||
readBufferSizeInMB: 16 # The data block size (in MB) read from chunk manager by the datanode during import.
|
||||
compaction:
|
||||
levelZeroBatchMemoryRatio: 0.05 # The minimal memory ratio of free memory for level zero compaction executing in batch mode
|
||||
gracefulStopTimeout: 1800 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21124
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Configures the system log output.
|
||||
log:
|
||||
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
||||
file:
|
||||
rootPath: # root dir path to put logs, default "" means no log file will print. please adjust in embedded Milvus: /tmp/milvus/logs
|
||||
rootPath: # root dir path to put logs, default "" means no log file will print. please adjust in embedded Milvus: /tmp/milvus/logs
|
||||
maxSize: 300 # MB
|
||||
maxAge: 10 # Maximum time for log retention in day.
|
||||
maxBackups: 20
|
||||
|
@ -520,19 +532,22 @@ log:
|
|||
grpc:
|
||||
log:
|
||||
level: WARNING
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
gracefulStopTimeout: 10 # second, time to wait graceful stop finish
|
||||
client:
|
||||
compressionEnabled: false
|
||||
dialTimeout: 200
|
||||
keepAliveTime: 10000
|
||||
keepAliveTimeout: 20000
|
||||
maxMaxAttempts: 10
|
||||
initialBackOff: 0.2 # seconds
|
||||
maxBackoff: 10 # seconds
|
||||
backoffMultiplier: 2.0 # deprecated
|
||||
initialBackoff: 0.2
|
||||
maxBackoff: 10
|
||||
minResetInterval: 1000
|
||||
maxCancelError: 32
|
||||
minSessionCheckInterval: 200
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Configure the proxy tls enable.
|
||||
tls:
|
||||
|
@ -547,17 +562,10 @@ common:
|
|||
rootCoordStatistics: rootcoord-statistics
|
||||
rootCoordDml: rootcoord-dml
|
||||
replicateMsg: replicate-msg
|
||||
rootCoordDelta: rootcoord-delta
|
||||
search: search
|
||||
searchResult: searchResult
|
||||
queryTimeTick: queryTimeTick
|
||||
dataCoordStatistic: datacoord-statistics-channel
|
||||
dataCoordTimeTick: datacoord-timetick-channel
|
||||
dataCoordSegmentInfo: segment-info-channel
|
||||
subNamePrefix:
|
||||
proxySubNamePrefix: proxy
|
||||
rootCoordSubNamePrefix: rootCoord
|
||||
queryNodeSubNamePrefix: queryNode
|
||||
dataCoordSubNamePrefix: dataCoord
|
||||
dataNodeSubNamePrefix: dataNode
|
||||
defaultPartitionName: _default # default partition name for a collection
|
||||
|
@ -565,9 +573,9 @@ common:
|
|||
entityExpiration: -1 # Entity expiration in seconds, CAUTION -1 means never expire
|
||||
indexSliceSize: 16 # MB
|
||||
threadCoreCoefficient:
|
||||
highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority thread pool
|
||||
middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority thread pool
|
||||
lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority thread pool
|
||||
highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority pool
|
||||
middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority pool
|
||||
lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority pool
|
||||
buildIndexThreadPoolRatio: 0.75
|
||||
DiskIndex:
|
||||
MaxDegree: 56
|
||||
|
@ -588,34 +596,24 @@ common:
|
|||
authorizationEnabled: false
|
||||
# The superusers will ignore some system check processes,
|
||||
# like the old password verification when updating the credential
|
||||
# superUsers: root
|
||||
superUsers:
|
||||
tlsMode: 0
|
||||
session:
|
||||
ttl: 30 # ttl value when session granting a lease to register service
|
||||
retryTimes: 30 # retry times when session sending etcd requests
|
||||
storage:
|
||||
scheme: "s3"
|
||||
enablev2: false
|
||||
|
||||
# preCreatedTopic decides whether using existed topic
|
||||
preCreatedTopic:
|
||||
enabled: false
|
||||
# support pre-created topics
|
||||
# the name of pre-created topics
|
||||
names: ['topic1', 'topic2']
|
||||
# need to set a separated topic to stand for currently consumed timestamp for each channel
|
||||
timeticker: 'timetick-channel'
|
||||
|
||||
locks:
|
||||
metrics:
|
||||
enable: false
|
||||
enable: false # whether gather statistics for metrics locks
|
||||
threshold:
|
||||
info: 500 # minimum milliseconds for printing durations in info level
|
||||
warn: 1000 # minimum milliseconds for printing durations in warn level
|
||||
storage:
|
||||
scheme: s3
|
||||
enablev2: false
|
||||
ttMsgEnabled: true # Whether the instance disable sending ts messages
|
||||
traceLogMode: 0 # trace request info, 0: none, 1: simple request info, like collection/partition/database name, 2: request detail
|
||||
bloomFilterSize: 100000
|
||||
maxBloomFalsePositive: 0.05
|
||||
traceLogMode: 0 # trace request info
|
||||
bloomFilterSize: 100000 # bloom filter initial size
|
||||
maxBloomFalsePositive: 0.05 # max false positive rate for bloom filter
|
||||
|
||||
# QuotaConfig, configurations of Milvus quota and limits.
|
||||
# By default, we enable:
|
||||
|
@ -630,9 +628,6 @@ common:
|
|||
# If necessary, you can also manually force to deny RW requests.
|
||||
quotaAndLimits:
|
||||
enabled: true # `true` to enable quota and limits, `false` to disable.
|
||||
limits:
|
||||
maxCollectionNum: 65536
|
||||
maxCollectionNumPerDB: 65536
|
||||
# quotaCenterCollectInterval is the time interval that quotaCenter
|
||||
# collects metrics from Proxies, Query cluster and Data cluster.
|
||||
# seconds, (0 ~ 65536)
|
||||
|
@ -657,33 +652,36 @@ quotaAndLimits:
|
|||
# The maximum rate will not be greater than max.
|
||||
enabled: false
|
||||
insertRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
max: -1 # MB/s, default no limit
|
||||
upsertRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
max: -1 # MB/s, default no limit
|
||||
deleteRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
max: -1 # MB/s, default no limit
|
||||
bulkLoadRate:
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit collection bulkLoad rate
|
||||
dql:
|
||||
# dql limit rates, default no limit.
|
||||
# The maximum rate will not be greater than max.
|
||||
enabled: false
|
||||
searchRate:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
collection:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
queryRate:
|
||||
max: -1 # qps, default no limit
|
||||
collection:
|
||||
max: -1 # qps, default no limit
|
||||
max: -1 # qps, default no limit
|
||||
limits:
|
||||
maxCollectionNum: 65536
|
||||
maxCollectionNumPerDB: 65536
|
||||
limitWriting:
|
||||
# forceDeny false means dml requests are allowed (except for some
|
||||
# specific conditions, such as memory of nodes to water marker), true means always reject all dml requests.
|
||||
|
@ -707,7 +705,7 @@ quotaAndLimits:
|
|||
growingSegmentsSizeProtection:
|
||||
# No action will be taken if the growing segments size is less than the low watermark.
|
||||
# When the growing segments size exceeds the low watermark, the dml rate will be reduced,
|
||||
# but the rate will not be lower than `minRateRatio * dmlRate`.
|
||||
# but the rate will not be lower than minRateRatio * dmlRate.
|
||||
enabled: false
|
||||
minRateRatio: 0.5
|
||||
lowWaterLevel: 0.2
|
||||
|
@ -752,16 +750,11 @@ trace:
|
|||
# optional values: [0, 1]
|
||||
# Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
|
||||
sampleFraction: 0
|
||||
otlp:
|
||||
endpoint: # "127.0.0.1:4318"
|
||||
secure: true
|
||||
jaeger:
|
||||
url: # "http://127.0.0.1:14268/api/traces"
|
||||
# when exporter is jaeger should set the jaeger's URL
|
||||
|
||||
autoIndex:
|
||||
params:
|
||||
build: '{"M": 18,"efConstruction": 240,"index_type": "HNSW", "metric_type": "IP"}'
|
||||
url: # when exporter is jaeger should set the jaeger's URL
|
||||
otlp:
|
||||
endpoint: # example: "127.0.0.1:4318"
|
||||
secure: true
|
||||
|
||||
#when using GPU indexing, Milvus will utilize a memory pool to avoid frequent memory allocation and deallocation.
|
||||
#here, you can set the size of the memory occupied by the memory pool, with the unit being MB.
|
||||
|
@ -770,5 +763,5 @@ autoIndex:
|
|||
#milvus will automatically initialize half of the available GPU memory,
|
||||
#maxMemSize will the whole available GPU memory.
|
||||
gpu:
|
||||
initMemSize: 0 #sets the initial memory pool size.
|
||||
maxMemSize: 0 #when the memory pool is not large enough for the first time, Milvus will attempt to expand the memory pool once. maxMemSize sets the maximum memory usage limit.
|
||||
initMemSize: # Gpu Memory Pool init size
|
||||
maxMemSize: # Gpu Memory Pool Max size
|
||||
|
|
|
@ -67,6 +67,7 @@ func (p *autoIndexConfig) init(base *BaseTable) {
|
|||
Key: "autoIndex.params.build",
|
||||
Version: "2.2.0",
|
||||
DefaultValue: `{"M": 18,"efConstruction": 240,"index_type": "HNSW", "metric_type": "IP"}`,
|
||||
Export: true,
|
||||
}
|
||||
p.IndexParams.Init(base.mgr)
|
||||
|
||||
|
|
|
@ -669,6 +669,7 @@ like the old password verification when updating the credential`,
|
|||
Key: "common.storage.enablev2",
|
||||
Version: "2.3.1",
|
||||
DefaultValue: "false",
|
||||
Export: true,
|
||||
}
|
||||
p.EnableStorageV2.Init(base.mgr)
|
||||
|
||||
|
@ -676,6 +677,7 @@ like the old password verification when updating the credential`,
|
|||
Key: "common.storage.scheme",
|
||||
Version: "2.3.4",
|
||||
DefaultValue: "s3",
|
||||
Export: true,
|
||||
}
|
||||
p.StorageScheme.Init(base.mgr)
|
||||
|
||||
|
@ -691,6 +693,7 @@ like the old password verification when updating the credential`,
|
|||
Version: "2.3.2",
|
||||
DefaultValue: "true",
|
||||
Doc: "Whether the instance disable sending ts messages",
|
||||
Export: true,
|
||||
}
|
||||
p.TTMsgEnabled.Init(base.mgr)
|
||||
|
||||
|
@ -699,6 +702,7 @@ like the old password verification when updating the credential`,
|
|||
Version: "2.3.4",
|
||||
DefaultValue: "0",
|
||||
Doc: "trace request info",
|
||||
Export: true,
|
||||
}
|
||||
p.TraceLogMode.Init(base.mgr)
|
||||
|
||||
|
@ -707,6 +711,7 @@ like the old password verification when updating the credential`,
|
|||
Version: "2.3.2",
|
||||
DefaultValue: "100000",
|
||||
Doc: "bloom filter initial size",
|
||||
Export: true,
|
||||
}
|
||||
p.BloomFilterSize.Init(base.mgr)
|
||||
|
||||
|
@ -715,6 +720,7 @@ like the old password verification when updating the credential`,
|
|||
Version: "2.3.2",
|
||||
DefaultValue: "0.05",
|
||||
Doc: "max false positive rate for bloom filter",
|
||||
Export: true,
|
||||
}
|
||||
p.MaxBloomFalsePositive.Init(base.mgr)
|
||||
}
|
||||
|
@ -755,8 +761,9 @@ func (t *traceConfig) init(base *BaseTable) {
|
|||
Key: "trace.exporter",
|
||||
Version: "2.3.0",
|
||||
Doc: `trace exporter type, default is stdout,
|
||||
optional values: ['stdout', 'jaeger']`,
|
||||
Export: true,
|
||||
optional values: ['stdout', 'jaeger', 'otlp']`,
|
||||
DefaultValue: "stdout",
|
||||
Export: true,
|
||||
}
|
||||
t.Exporter.Init(base.mgr)
|
||||
|
||||
|
@ -783,6 +790,7 @@ Fractions >= 1 will always sample. Fractions < 0 are treated as zero.`,
|
|||
Key: "trace.otlp.endpoint",
|
||||
Version: "2.3.0",
|
||||
Doc: "example: \"127.0.0.1:4318\"",
|
||||
Export: true,
|
||||
}
|
||||
t.OtlpEndpoint.Init(base.mgr)
|
||||
|
||||
|
@ -790,6 +798,7 @@ Fractions >= 1 will always sample. Fractions < 0 are treated as zero.`,
|
|||
Key: "trace.otlp.secure",
|
||||
Version: "2.4.0",
|
||||
DefaultValue: "true",
|
||||
Export: true,
|
||||
}
|
||||
t.OtlpSecure.Init(base.mgr)
|
||||
}
|
||||
|
@ -1182,6 +1191,7 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.2.0",
|
||||
DefaultValue: "false",
|
||||
Doc: "if use access log",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.Enable.Init(base.mgr)
|
||||
|
||||
|
@ -1190,6 +1200,7 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.2.0",
|
||||
DefaultValue: "false",
|
||||
Doc: "if upload sealed access log file to minio",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.MinioEnable.Init(base.mgr)
|
||||
|
||||
|
@ -1215,6 +1226,7 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.2.0",
|
||||
DefaultValue: "64",
|
||||
Doc: "Max size for a single file, in MB.",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.MaxSize.Init(base.mgr)
|
||||
|
||||
|
@ -1223,6 +1235,7 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.3.2",
|
||||
DefaultValue: "10240",
|
||||
Doc: "Size of log of memory cache, in B",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.CacheSize.Init(base.mgr)
|
||||
|
||||
|
@ -1239,6 +1252,7 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.2.0",
|
||||
DefaultValue: "0",
|
||||
Doc: "Max time for single access log file in seconds",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.RotatedTime.Init(base.mgr)
|
||||
|
||||
|
@ -1247,6 +1261,7 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.2.0",
|
||||
DefaultValue: "access_log/",
|
||||
Doc: "File path in minIO",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.RemotePath.Init(base.mgr)
|
||||
|
||||
|
@ -1255,12 +1270,15 @@ please adjust in embedded Milvus: false`,
|
|||
Version: "2.2.0",
|
||||
DefaultValue: "0",
|
||||
Doc: "Max time for log file in minIO, in hours",
|
||||
Export: true,
|
||||
}
|
||||
p.AccessLog.RemoteMaxTime.Init(base.mgr)
|
||||
|
||||
p.AccessLog.Formatter = ParamGroup{
|
||||
KeyPrefix: "proxy.accessLog.formatters.",
|
||||
Version: "2.3.4",
|
||||
Export: true,
|
||||
Doc: "access log formatters for specified methods, if not set, use the base formatter.",
|
||||
}
|
||||
p.AccessLog.Formatter.Init(base.mgr)
|
||||
|
||||
|
@ -2179,6 +2197,7 @@ func (p *queryNodeConfig) init(base *BaseTable) {
|
|||
DefaultValue: "false",
|
||||
FallbackKeys: []string{"queryNode.mmapEnabled"},
|
||||
Doc: "Enable mmap for loading data",
|
||||
Export: true,
|
||||
}
|
||||
p.MmapEnabled.Init(base.mgr)
|
||||
|
||||
|
@ -2187,6 +2206,7 @@ func (p *queryNodeConfig) init(base *BaseTable) {
|
|||
Version: "2.4.0",
|
||||
DefaultValue: "false",
|
||||
Doc: "Enable lazyload for loading data",
|
||||
Export: true,
|
||||
}
|
||||
p.LazyLoadEnabled.Init(base.mgr)
|
||||
|
||||
|
@ -2195,6 +2215,7 @@ func (p *queryNodeConfig) init(base *BaseTable) {
|
|||
Version: "2.3.2",
|
||||
DefaultValue: "willneed",
|
||||
Doc: "The read ahead policy of chunk cache, options: `normal, random, sequential, willneed, dontneed`",
|
||||
Export: true,
|
||||
}
|
||||
p.ReadAheadPolicy.Init(base.mgr)
|
||||
|
||||
|
@ -2208,6 +2229,7 @@ Specifies the necessity for warming up the chunk cache.
|
|||
chunk cache during the load process. This approach has the potential to substantially reduce query/search latency
|
||||
for a specific duration post-load, albeit accompanied by a concurrent increase in disk usage;
|
||||
2. If set to "off," original vector data will only be loaded into the chunk cache during search/query.`,
|
||||
Export: true,
|
||||
}
|
||||
p.ChunkCacheWarmingUp.Init(base.mgr)
|
||||
|
||||
|
@ -2308,7 +2330,9 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
// use local storage path to check correct device
|
||||
localStoragePath := base.Get("localStorage.path")
|
||||
if _, err := os.Stat(localStoragePath); os.IsNotExist(err) {
|
||||
os.MkdirAll(localStoragePath, os.ModePerm)
|
||||
if err := os.MkdirAll(localStoragePath, os.ModePerm); err != nil {
|
||||
log.Fatal("failed to mkdir", zap.String("localStoragePath", localStoragePath), zap.Error(err))
|
||||
}
|
||||
}
|
||||
diskUsage, err := disk.Usage(localStoragePath)
|
||||
if err != nil {
|
||||
|
@ -2387,7 +2411,6 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
Key: "queryNode.gracefulStopTimeout",
|
||||
Version: "2.2.1",
|
||||
FallbackKeys: []string{"common.gracefulStopTimeout"},
|
||||
Export: true,
|
||||
}
|
||||
p.GracefulStopTimeout.Init(base.mgr)
|
||||
|
||||
|
@ -2427,7 +2450,14 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
Key: "queryNode.scheduler.scheduleReadPolicy.name",
|
||||
Version: "2.3.0",
|
||||
DefaultValue: "fifo",
|
||||
Doc: "Control how to schedule query/search read task in query node",
|
||||
Doc: `fifo: A FIFO queue support the schedule.
|
||||
user-task-polling:
|
||||
The user's tasks will be polled one by one and scheduled.
|
||||
Scheduling is fair on task granularity.
|
||||
The policy is based on the username for authentication.
|
||||
And an empty username is considered the same user.
|
||||
When there are no multi-users, the policy decay into FIFO"`,
|
||||
Export: true,
|
||||
}
|
||||
p.SchedulePolicyName.Init(base.mgr)
|
||||
p.SchedulePolicyTaskQueueExpire = ParamItem{
|
||||
|
@ -2435,6 +2465,7 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
Version: "2.3.0",
|
||||
DefaultValue: "60",
|
||||
Doc: "Control how long (many seconds) that queue retains since queue is empty",
|
||||
Export: true,
|
||||
}
|
||||
p.SchedulePolicyTaskQueueExpire.Init(base.mgr)
|
||||
p.SchedulePolicyEnableCrossUserGrouping = ParamItem{
|
||||
|
@ -2442,6 +2473,7 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
Version: "2.3.0",
|
||||
DefaultValue: "false",
|
||||
Doc: "Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other)",
|
||||
Export: true,
|
||||
}
|
||||
p.SchedulePolicyEnableCrossUserGrouping.Init(base.mgr)
|
||||
p.SchedulePolicyMaxPendingTaskPerUser = ParamItem{
|
||||
|
@ -2449,6 +2481,7 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
Version: "2.3.0",
|
||||
DefaultValue: "1024",
|
||||
Doc: "Max pending task per user in scheduler",
|
||||
Export: true,
|
||||
}
|
||||
p.SchedulePolicyMaxPendingTaskPerUser.Init(base.mgr)
|
||||
|
||||
|
@ -2498,6 +2531,7 @@ Max read concurrency must greater than or equal to 1, and less than or equal to
|
|||
Version: "2.3.4",
|
||||
DefaultValue: "false",
|
||||
Doc: "use partition prune function on shard delegator",
|
||||
Export: true,
|
||||
}
|
||||
p.EnableSegmentPrune.Init(base.mgr)
|
||||
p.DefaultSegmentFilterRatio = ParamItem{
|
||||
|
@ -2888,6 +2922,7 @@ During compaction, the size of segment # of rows is able to exceed segment max #
|
|||
Version: "2.4.0",
|
||||
Doc: "The minmum size in bytes to force trigger a LevelZero Compaction, default as 8MB",
|
||||
DefaultValue: "8388608",
|
||||
Export: true,
|
||||
}
|
||||
p.LevelZeroCompactionTriggerMinSize.Init(base.mgr)
|
||||
|
||||
|
@ -2896,6 +2931,7 @@ During compaction, the size of segment # of rows is able to exceed segment max #
|
|||
Version: "2.4.0",
|
||||
Doc: "The maxmum size in bytes to force trigger a LevelZero Compaction, default as 64MB",
|
||||
DefaultValue: "67108864",
|
||||
Export: true,
|
||||
}
|
||||
p.LevelZeroCompactionTriggerMaxSize.Init(base.mgr)
|
||||
|
||||
|
@ -2904,6 +2940,7 @@ During compaction, the size of segment # of rows is able to exceed segment max #
|
|||
Version: "2.4.0",
|
||||
Doc: "The minimum number of deltalog files to force trigger a LevelZero Compaction",
|
||||
DefaultValue: "10",
|
||||
Export: true,
|
||||
}
|
||||
p.LevelZeroCompactionTriggerDeltalogMinNum.Init(base.mgr)
|
||||
|
||||
|
@ -2912,6 +2949,7 @@ During compaction, the size of segment # of rows is able to exceed segment max #
|
|||
Version: "2.4.0",
|
||||
Doc: "The maxmum number of deltalog files to force trigger a LevelZero Compaction, default as 30",
|
||||
DefaultValue: "30",
|
||||
Export: true,
|
||||
}
|
||||
p.LevelZeroCompactionTriggerDeltalogMaxNum.Init(base.mgr)
|
||||
|
||||
|
@ -3237,7 +3275,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.FlowGraphMaxParallelism.Init(base.mgr)
|
||||
|
||||
p.FlowGraphSkipModeEnable = ParamItem{
|
||||
Key: "datanode.dataSync.skipMode.enable",
|
||||
Key: "dataNode.dataSync.skipMode.enable",
|
||||
Version: "2.3.4",
|
||||
DefaultValue: "true",
|
||||
PanicIfEmpty: false,
|
||||
|
@ -3247,7 +3285,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.FlowGraphSkipModeEnable.Init(base.mgr)
|
||||
|
||||
p.FlowGraphSkipModeSkipNum = ParamItem{
|
||||
Key: "datanode.dataSync.skipMode.skipNum",
|
||||
Key: "dataNode.dataSync.skipMode.skipNum",
|
||||
Version: "2.3.4",
|
||||
DefaultValue: "4",
|
||||
PanicIfEmpty: false,
|
||||
|
@ -3257,7 +3295,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.FlowGraphSkipModeSkipNum.Init(base.mgr)
|
||||
|
||||
p.FlowGraphSkipModeColdTime = ParamItem{
|
||||
Key: "datanode.dataSync.skipMode.coldTime",
|
||||
Key: "dataNode.dataSync.skipMode.coldTime",
|
||||
Version: "2.3.4",
|
||||
DefaultValue: "60",
|
||||
PanicIfEmpty: false,
|
||||
|
@ -3271,7 +3309,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
Version: "2.3.0",
|
||||
DefaultValue: "6",
|
||||
Doc: "deprecated, legacy flush manager max conurrency number",
|
||||
Export: true,
|
||||
Export: false,
|
||||
}
|
||||
p.MaxParallelSyncTaskNum.Init(base.mgr)
|
||||
|
||||
|
@ -3296,21 +3334,25 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.FlushInsertBufferSize.Init(base.mgr)
|
||||
|
||||
p.MemoryForceSyncEnable = ParamItem{
|
||||
Key: "datanode.memory.forceSyncEnable",
|
||||
Key: "dataNode.memory.forceSyncEnable",
|
||||
Version: "2.2.4",
|
||||
DefaultValue: "true",
|
||||
Doc: "Set true to force sync if memory usage is too high",
|
||||
Export: true,
|
||||
}
|
||||
p.MemoryForceSyncEnable.Init(base.mgr)
|
||||
|
||||
p.MemoryForceSyncSegmentNum = ParamItem{
|
||||
Key: "datanode.memory.forceSyncSegmentNum",
|
||||
Key: "dataNode.memory.forceSyncSegmentNum",
|
||||
Version: "2.2.4",
|
||||
DefaultValue: "1",
|
||||
Doc: "number of segments to sync, segments with top largest buffer will be synced.",
|
||||
Export: true,
|
||||
}
|
||||
p.MemoryForceSyncSegmentNum.Init(base.mgr)
|
||||
|
||||
p.MemoryCheckInterval = ParamItem{
|
||||
Key: "datanode.memory.checkInterval",
|
||||
Key: "dataNode.memory.checkInterval",
|
||||
Version: "2.4.0",
|
||||
DefaultValue: "3000", // milliseconds
|
||||
Doc: "the interal to check datanode memory usage, in milliseconds",
|
||||
|
@ -3320,16 +3362,20 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
|
||||
if os.Getenv(metricsinfo.DeployModeEnvKey) == metricsinfo.StandaloneDeployMode {
|
||||
p.MemoryForceSyncWatermark = ParamItem{
|
||||
Key: "datanode.memory.forceSyncWatermark",
|
||||
Key: "dataNode.memory.forceSyncWatermark",
|
||||
Version: "2.4.0",
|
||||
DefaultValue: "0.2",
|
||||
Doc: "memory watermark for standalone, upon reaching this watermark, segments will be synced.",
|
||||
Export: true,
|
||||
}
|
||||
} else {
|
||||
log.Info("DeployModeEnv is not set, use default", zap.Float64("default", 0.5))
|
||||
p.MemoryForceSyncWatermark = ParamItem{
|
||||
Key: "datanode.memory.forceSyncWatermark",
|
||||
Key: "dataNode.memory.forceSyncWatermark",
|
||||
Version: "2.4.0",
|
||||
DefaultValue: "0.5",
|
||||
Doc: "memory watermark for standalone, upon reaching this watermark, segments will be synced.",
|
||||
Export: true,
|
||||
}
|
||||
}
|
||||
p.MemoryForceSyncWatermark.Init(base.mgr)
|
||||
|
@ -3360,7 +3406,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.SyncPeriod.Init(base.mgr)
|
||||
|
||||
p.WatchEventTicklerInterval = ParamItem{
|
||||
Key: "datanode.segment.watchEventTicklerInterval",
|
||||
Key: "dataNode.segment.watchEventTicklerInterval",
|
||||
Version: "2.2.3",
|
||||
DefaultValue: "15",
|
||||
}
|
||||
|
@ -3381,18 +3427,20 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.FileReadConcurrency.Init(base.mgr)
|
||||
|
||||
p.DataNodeTimeTickByRPC = ParamItem{
|
||||
Key: "datanode.timetick.byRPC",
|
||||
Key: "dataNode.timetick.byRPC",
|
||||
Version: "2.2.9",
|
||||
PanicIfEmpty: false,
|
||||
DefaultValue: "true",
|
||||
Export: true,
|
||||
}
|
||||
p.DataNodeTimeTickByRPC.Init(base.mgr)
|
||||
|
||||
p.DataNodeTimeTickInterval = ParamItem{
|
||||
Key: "datanode.timetick.interval",
|
||||
Key: "dataNode.timetick.interval",
|
||||
Version: "2.2.5",
|
||||
PanicIfEmpty: false,
|
||||
DefaultValue: "500",
|
||||
Export: true,
|
||||
}
|
||||
p.DataNodeTimeTickInterval.Init(base.mgr)
|
||||
|
||||
|
@ -3405,55 +3453,66 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.SkipBFStatsLoad.Init(base.mgr)
|
||||
|
||||
p.ChannelWorkPoolSize = ParamItem{
|
||||
Key: "datanode.channel.workPoolSize",
|
||||
Key: "dataNode.channel.workPoolSize",
|
||||
Version: "2.3.2",
|
||||
PanicIfEmpty: false,
|
||||
DefaultValue: "-1",
|
||||
Doc: `specify the size of global work pool of all channels
|
||||
if this parameter <= 0, will set it as the maximum number of CPUs that can be executing
|
||||
suggest to set it bigger on large collection numbers to avoid blocking`,
|
||||
Export: true,
|
||||
}
|
||||
p.ChannelWorkPoolSize.Init(base.mgr)
|
||||
|
||||
p.UpdateChannelCheckpointMaxParallel = ParamItem{
|
||||
Key: "datanode.channel.updateChannelCheckpointMaxParallel",
|
||||
Key: "dataNode.channel.updateChannelCheckpointMaxParallel",
|
||||
Version: "2.3.4",
|
||||
PanicIfEmpty: false,
|
||||
DefaultValue: "10",
|
||||
Doc: `specify the size of global work pool for channel checkpoint updating
|
||||
if this parameter <= 0, will set it as 10`,
|
||||
Export: true,
|
||||
}
|
||||
p.UpdateChannelCheckpointMaxParallel.Init(base.mgr)
|
||||
|
||||
p.UpdateChannelCheckpointInterval = ParamItem{
|
||||
Key: "datanode.channel.updateChannelCheckpointInterval",
|
||||
Key: "dataNode.channel.updateChannelCheckpointInterval",
|
||||
Version: "2.4.0",
|
||||
Doc: "the interval duration(in seconds) for datanode to update channel checkpoint of each channel",
|
||||
DefaultValue: "60",
|
||||
Export: true,
|
||||
}
|
||||
p.UpdateChannelCheckpointInterval.Init(base.mgr)
|
||||
|
||||
p.UpdateChannelCheckpointRPCTimeout = ParamItem{
|
||||
Key: "datanode.channel.updateChannelCheckpointRPCTimeout",
|
||||
Key: "dataNode.channel.updateChannelCheckpointRPCTimeout",
|
||||
Version: "2.4.0",
|
||||
Doc: "timeout in seconds for UpdateChannelCheckpoint RPC call",
|
||||
DefaultValue: "20",
|
||||
Export: true,
|
||||
}
|
||||
p.UpdateChannelCheckpointRPCTimeout.Init(base.mgr)
|
||||
|
||||
p.MaxChannelCheckpointsPerRPC = ParamItem{
|
||||
Key: "datanode.channel.maxChannelCheckpointsPerPRC",
|
||||
Key: "dataNode.channel.maxChannelCheckpointsPerPRC",
|
||||
Version: "2.4.0",
|
||||
Doc: "The maximum number of channel checkpoints per UpdateChannelCheckpoint RPC.",
|
||||
DefaultValue: "128",
|
||||
Export: true,
|
||||
}
|
||||
p.MaxChannelCheckpointsPerRPC.Init(base.mgr)
|
||||
|
||||
p.ChannelCheckpointUpdateTickInSeconds = ParamItem{
|
||||
Key: "datanode.channel.channelCheckpointUpdateTickInSeconds",
|
||||
Key: "dataNode.channel.channelCheckpointUpdateTickInSeconds",
|
||||
Version: "2.4.0",
|
||||
Doc: "The frequency, in seconds, at which the channel checkpoint updater executes updates.",
|
||||
DefaultValue: "10",
|
||||
Export: true,
|
||||
}
|
||||
p.ChannelCheckpointUpdateTickInSeconds.Init(base.mgr)
|
||||
|
||||
p.MaxConcurrentImportTaskNum = ParamItem{
|
||||
Key: "datanode.import.maxConcurrentTaskNum",
|
||||
Key: "dataNode.import.maxConcurrentTaskNum",
|
||||
Version: "2.4.0",
|
||||
Doc: "The maximum number of import/pre-import tasks allowed to run concurrently on a datanode.",
|
||||
DefaultValue: "16",
|
||||
|
@ -3463,7 +3522,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.MaxConcurrentImportTaskNum.Init(base.mgr)
|
||||
|
||||
p.MaxImportFileSizeInGB = ParamItem{
|
||||
Key: "datanode.import.maxImportFileSizeInGB",
|
||||
Key: "dataNode.import.maxImportFileSizeInGB",
|
||||
Version: "2.4.0",
|
||||
Doc: "The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files.",
|
||||
DefaultValue: "16",
|
||||
|
@ -3473,7 +3532,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.MaxImportFileSizeInGB.Init(base.mgr)
|
||||
|
||||
p.ReadBufferSizeInMB = ParamItem{
|
||||
Key: "datanode.import.readBufferSizeInMB",
|
||||
Key: "dataNode.import.readBufferSizeInMB",
|
||||
Version: "2.4.0",
|
||||
Doc: "The data block size (in MB) read from chunk manager by the datanode during import.",
|
||||
DefaultValue: "16",
|
||||
|
@ -3483,7 +3542,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.ReadBufferSizeInMB.Init(base.mgr)
|
||||
|
||||
p.L0BatchMemoryRatio = ParamItem{
|
||||
Key: "datanode.compaction.levelZeroBatchMemoryRatio",
|
||||
Key: "dataNode.compaction.levelZeroBatchMemoryRatio",
|
||||
Version: "2.4.0",
|
||||
Doc: "The minimal memory ratio of free memory for level zero compaction executing in batch mode",
|
||||
DefaultValue: "0.05",
|
||||
|
@ -3492,7 +3551,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
|||
p.L0BatchMemoryRatio.Init(base.mgr)
|
||||
|
||||
p.GracefulStopTimeout = ParamItem{
|
||||
Key: "datanode.gracefulStopTimeout",
|
||||
Key: "dataNode.gracefulStopTimeout",
|
||||
Version: "2.3.7",
|
||||
DefaultValue: strconv.Itoa(DefaultGracefulStopTimeout),
|
||||
Doc: "seconds. force stop node without graceful stop",
|
||||
|
@ -3566,7 +3625,6 @@ func (p *indexNodeConfig) init(base *BaseTable) {
|
|||
Version: "2.2.1",
|
||||
FallbackKeys: []string{"common.gracefulStopTimeout"},
|
||||
Doc: "seconds. force stop node without graceful stop",
|
||||
Export: true,
|
||||
}
|
||||
p.GracefulStopTimeout.Init(base.mgr)
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ type grpcConfig struct {
|
|||
Domain string `refreshable:"false"`
|
||||
IP string `refreshable:"false"`
|
||||
TLSMode ParamItem `refreshable:"false"`
|
||||
IPItem ParamItem `refreshable:"false"`
|
||||
Port ParamItem `refreshable:"false"`
|
||||
InternalPort ParamItem `refreshable:"false"`
|
||||
ServerPemPath ParamItem `refreshable:"false"`
|
||||
|
@ -67,14 +68,14 @@ type grpcConfig struct {
|
|||
|
||||
func (p *grpcConfig) init(domain string, base *BaseTable) {
|
||||
p.Domain = domain
|
||||
ipItem := ParamItem{
|
||||
Key: p.Domain + ".ip",
|
||||
Version: "2.3.3",
|
||||
DefaultValue: "",
|
||||
Export: true,
|
||||
p.IPItem = ParamItem{
|
||||
Key: p.Domain + ".ip",
|
||||
Version: "2.3.3",
|
||||
Doc: "if not specified, use the first unicastable address",
|
||||
Export: true,
|
||||
}
|
||||
ipItem.Init(base.mgr)
|
||||
p.IP = funcutil.GetIP(ipItem.GetValue())
|
||||
p.IPItem.Init(base.mgr)
|
||||
p.IP = funcutil.GetIP(p.IPItem.GetValue())
|
||||
|
||||
p.Port = ParamItem{
|
||||
Key: p.Domain + ".port",
|
||||
|
|
|
@ -1485,13 +1485,15 @@ The maximum rate will not be greater than ` + "max" + `.`,
|
|||
Key: "quotaAndLimits.limits.maxCollectionNum",
|
||||
Version: "2.2.0",
|
||||
DefaultValue: "65536",
|
||||
Export: true,
|
||||
}
|
||||
p.MaxCollectionNum.Init(base.mgr)
|
||||
|
||||
p.MaxCollectionNumPerDB = ParamItem{
|
||||
Key: "quotaAndLimits.limits.maxCollectionNumPerDB",
|
||||
Version: "2.2.0",
|
||||
DefaultValue: "64",
|
||||
DefaultValue: "65536",
|
||||
Export: true,
|
||||
}
|
||||
p.MaxCollectionNumPerDB.Init(base.mgr)
|
||||
|
||||
|
|
|
@ -325,8 +325,9 @@ func (p *TiKVConfig) Init(base *BaseTable) {
|
|||
p.Endpoints = ParamItem{
|
||||
Key: "tikv.endpoints",
|
||||
Version: "2.3.0",
|
||||
DefaultValue: "localhost:2379",
|
||||
DefaultValue: "localhost:2389",
|
||||
PanicIfEmpty: true,
|
||||
Doc: "Note that the default pd port of tikv is 2379, which conflicts with etcd.",
|
||||
Export: true,
|
||||
}
|
||||
p.Endpoints.Init(base.mgr)
|
||||
|
@ -590,7 +591,7 @@ func (p *PulsarConfig) Init(base *BaseTable) {
|
|||
Key: "pulsar.webport",
|
||||
Version: "2.0.0",
|
||||
DefaultValue: "80",
|
||||
Doc: "Web port of pulsar, if you connect direcly without proxy, should use 8080",
|
||||
Doc: "Web port of pulsar, if you connect directly without proxy, should use 8080",
|
||||
Export: true,
|
||||
}
|
||||
p.WebPort.Init(base.mgr)
|
||||
|
@ -664,6 +665,7 @@ func (p *PulsarConfig) Init(base *BaseTable) {
|
|||
Key: "pulsar.requestTimeout",
|
||||
Version: "2.3.0",
|
||||
DefaultValue: "60",
|
||||
Doc: "pulsar client global request timeout in seconds",
|
||||
Export: true,
|
||||
}
|
||||
p.RequestTimeout.Init(base.mgr)
|
||||
|
@ -672,6 +674,7 @@ func (p *PulsarConfig) Init(base *BaseTable) {
|
|||
Key: "pulsar.enableClientMetrics",
|
||||
Version: "2.3.0",
|
||||
DefaultValue: "false",
|
||||
Doc: "Whether to register pulsar client metrics into milvus metrics path.",
|
||||
Export: true,
|
||||
}
|
||||
p.EnableClientMetrics.Init(base.mgr)
|
||||
|
@ -885,6 +888,8 @@ please adjust in embedded Milvus: /tmp/milvus/rdb_data`,
|
|||
Key: "rocksmq.compressionTypes",
|
||||
DefaultValue: "0,0,7,7,7",
|
||||
Version: "2.2.12",
|
||||
Doc: "compaction compression type, only support use 0,7. 0 means not compress, 7 will use zstd. Length of types means num of rocksdb level.",
|
||||
Export: true,
|
||||
}
|
||||
r.CompressionTypes.Init(base.mgr)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue