Gernate milvus.yaml by code (#22003)

Signed-off-by: Enwei Jiao <enwei.jiao@zilliz.com>
pull/22358/head
Enwei Jiao 2023-02-23 11:37:46 +08:00 committed by GitHub
parent 3e9d497157
commit 0851e05014
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1263 additions and 492 deletions

View File

@ -166,7 +166,19 @@ func (mr *MilvusRoles) runIndexNode(ctx context.Context, localMsg bool, wg *sync
}
func (mr *MilvusRoles) setupLogger() {
logConfig := paramtable.Get().Log
params := paramtable.Get()
logConfig := log.Config{
Level: params.LogCfg.Level.GetValue(),
GrpcLevel: params.LogCfg.GrpcLogLevel.GetValue(),
Format: params.LogCfg.Format.GetValue(),
Stdout: params.LogCfg.Stdout.GetAsBool(),
File: log.FileLogConfig{
RootPath: params.LogCfg.RootPath.GetValue(),
MaxSize: params.LogCfg.MaxSize.GetAsInt(),
MaxDays: params.LogCfg.MaxAge.GetAsInt(),
MaxBackups: params.LogCfg.MaxBackups.GetAsInt(),
},
}
id := paramtable.GetNodeID()
roleName := paramtable.GetRole()
rootPath := logConfig.File.RootPath

View File

@ -0,0 +1,305 @@
package main
import (
"encoding/csv"
"fmt"
"os"
"reflect"
"strings"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/samber/lo"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)
type DocContent struct {
key string
defaultValue string
sinceVersion string
refreshable string
exportToUser bool
comment string
}
func collect() []DocContent {
params := paramtable.ComponentParam{}
params.Init()
val := reflect.ValueOf(params)
data := make([]DocContent, 0)
keySet := typeutil.NewSet[string]()
for i := 0; i < val.NumField(); i++ {
valueField := val.Field(i)
collectRecursive(&params, &data, &valueField)
}
result := make([]DocContent, 0)
for _, d := range data {
if keySet.Contain(d.key) {
continue
}
keySet.Insert(d.key)
result = append(result, d)
}
return result
}
func collectRecursive(params *paramtable.ComponentParam, data *[]DocContent, val *reflect.Value) {
if val.Kind() != reflect.Struct {
return
}
log.Debug("enter", zap.Any("variable", val.String()))
for j := 0; j < val.NumField(); j++ {
subVal := val.Field(j)
tag := val.Type().Field(j).Tag
t := val.Type().Field(j).Type.String()
if t == "paramtable.ParamItem" {
item := subVal.Interface().(paramtable.ParamItem)
refreshable := tag.Get("refreshable")
defaultValue := params.GetWithDefault(item.Key, item.DefaultValue)
log.Debug("got key", zap.String("key", item.Key), zap.Any("value", defaultValue), zap.String("variable", val.Type().Field(j).Name))
*data = append(*data, DocContent{item.Key, defaultValue, item.Version, refreshable, item.Export, item.Doc})
for _, fk := range item.FallbackKeys {
log.Debug("got fallback key", zap.String("key", fk), zap.Any("value", defaultValue), zap.String("variable", val.Type().Field(j).Name))
*data = append(*data, DocContent{fk, defaultValue, item.Version, refreshable, item.Export, item.Doc})
}
} else if t == "paramtable.ParamGroup" {
item := subVal.Interface().(paramtable.ParamGroup)
log.Debug("got key", zap.String("key", item.KeyPrefix), zap.String("variable", val.Type().Field(j).Name))
refreshable := tag.Get("refreshable")
*data = append(*data, DocContent{item.KeyPrefix, "", item.Version, refreshable, item.Export, item.Doc})
} else {
collectRecursive(params, data, &subVal)
}
}
}
func WriteCsv() {
f, err := os.Create("configs.csv")
defer f.Close()
if err != nil {
log.Error("create file failed", zap.Error(err))
os.Exit(-2)
}
w := csv.NewWriter(f)
w.Write([]string{"key", "defaultValue", "sinceVersion", "refreshable", "exportToUser", "comment"})
result := collect()
w.WriteAll(lo.Map(result, func(d DocContent, _ int) []string {
return []string{d.key, d.defaultValue, d.sinceVersion, d.refreshable, fmt.Sprintf("%t", d.exportToUser), d.comment}
}))
w.Flush()
}
type YamlGroup struct {
name string
header string
disable bool
}
type YamlMarshaller struct {
writer *os.File
groups []YamlGroup
data []DocContent
}
func (m *YamlMarshaller) writeYamlRecursive(data []DocContent, level int) {
var topLevels = typeutil.NewOrderedMap[string, []DocContent]()
for _, d := range data {
key := strings.Split(d.key, ".")[level]
old, ok := topLevels.Get(key)
if !ok {
topLevels.Set(key, []DocContent{d})
} else {
topLevels.Set(key, append(old, d))
}
}
var keys []string
var extraHeaders map[string]string
disabledGroups := lo.Map(
lo.Filter(
m.groups,
func(g YamlGroup, _ int) bool { return g.disable }),
func(g YamlGroup, _ int) string { return g.name })
if level == 0 {
keys = lo.Map(m.groups, func(g YamlGroup, _ int) string { return g.name })
extraHeaders = lo.SliceToMap(m.groups, func(g YamlGroup) (string, string) { return g.name, g.header })
} else {
keys = topLevels.Keys()
}
for _, key := range keys {
contents, ok := topLevels.Get(key)
if !ok {
log.Debug("didnot found config for " + key)
continue
}
content := contents[0]
isDisabled := slices.Contains(disabledGroups, strings.Split(content.key, ".")[0])
if strings.Count(content.key, ".") == level {
if isDisabled {
m.writer.WriteString("# ")
}
m.writeContent(key, content.defaultValue, content.comment, level)
continue
}
extra, ok := extraHeaders[key]
if ok {
m.writer.WriteString(extra + "\n")
}
if isDisabled {
m.writer.WriteString("# ")
}
m.writer.WriteString(fmt.Sprintf("%s%s:\n", strings.Repeat(" ", level*2), key))
m.writeYamlRecursive(contents, level+1)
}
}
func (m *YamlMarshaller) writeContent(key, value, comment string, level int) {
if strings.Contains(comment, "\n") {
multilines := strings.Split(comment, "\n")
for _, line := range multilines {
m.writer.WriteString(fmt.Sprintf("%s# %s\n", strings.Repeat(" ", level*2), line))
}
m.writer.WriteString(fmt.Sprintf("%s%s: %s\n", strings.Repeat(" ", level*2), key, value))
} else if comment != "" {
m.writer.WriteString(fmt.Sprintf("%s%s: %s # %s\n", strings.Repeat(" ", level*2), key, value, comment))
} else {
m.writer.WriteString(fmt.Sprintf("%s%s: %s\n", strings.Repeat(" ", level*2), key, value))
}
}
func WriteYaml() {
f, err := os.Create("milvus.yaml")
defer f.Close()
if err != nil {
log.Error("create file failed", zap.Error(err))
os.Exit(-2)
}
result := collect()
f.WriteString(`# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
`)
groups := []YamlGroup{
{
name: "etcd",
header: "\n# Related configuration of etcd, used to store Milvus metadata & service discovery.",
},
{
name: "metastore",
},
{
name: "mysql",
header: "\n# Related configuration of mysql, used to store Milvus metadata.",
},
{
name: "localStorage",
},
{
name: "minio",
header: `
# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
# We refer to the storage service as MinIO/S3 in the following description for simplicity.`,
},
{
name: "pulsar",
header: `
# Milvus supports three MQ: rocksmq(based on RockDB), Pulsar and Kafka, which should be reserved in config what you use.
# There is a note about enabling priority if we config multiple mq in this file
# 1. standalone(local) mode: rocksmq(default) > Pulsar > Kafka
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq is unsupported)
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.`,
},
{
name: "kafka",
header: "\n# If you want to enable kafka, needs to comment the pulsar configs",
disable: true,
},
{
name: "rocksmq",
},
{
name: "rootCoord",
header: "\n# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests",
},
{
name: "proxy",
header: "\n# Related configuration of proxy, used to validate client requests and reduce the returned results.",
},
{
name: "queryCoord",
header: "\n# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.",
},
{
name: "queryNode",
header: "\n# Related configuration of queryNode, used to run hybrid search between vector and scalar data.",
},
{
name: "indexCoord",
},
{
name: "indexNode",
},
{
name: "dataCoord",
},
{
name: "dataNode",
},
{
name: "log",
header: "\n# Configures the system log output.",
},
{
name: "grpc",
},
{
name: "tls",
header: "\n# Configure the proxy tls enable.",
},
{
name: "common",
},
{
name: "quotaAndLimits",
header: `
# QuotaConfig, configurations of Milvus quota and limits.
# By default, we enable:
# 1. TT protection;
# 2. Memory protection.
# 3. Disk quota protection.
# You can enable:
# 1. DML throughput limitation;
# 2. DDL, DQL qps/rps limitation;
# 3. DQL Queue length/latency protection;
# 4. DQL result rate protection;
# If necessary, you can also manually force to deny RW requests.`,
},
{
name: "trace",
},
}
marshller := YamlMarshaller{f, groups, result}
marshller.writeYamlRecursive(lo.Filter(result, func(d DocContent, _ int) bool {
return d.exportToUser
}), 0)
}

View File

@ -1,79 +1,40 @@
package main
import (
"encoding/csv"
"fmt"
"os"
"reflect"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
)
func collect(data *[][]string, val *reflect.Value) {
if val.Kind() != reflect.Struct {
return
}
for j := 0; j < val.NumField(); j++ {
subVal := val.Field(j)
tag := val.Type().Field(j).Tag
log.Debug("subVal", zap.Any("subVal", subVal),
zap.String("name", val.Type().Field(j).Name),
zap.Any("tag", val.Type().Field(j).Tag),
zap.Any("type", val.Type().Field(j).Type),
)
t := val.Type().Field(j).Type.String()
if t == "paramtable.ParamItem" {
item := subVal.Interface().(paramtable.ParamItem)
refreshable := tag.Get("refreshable")
if refreshable == "" {
refreshable = "undefined"
}
*data = append(*data, []string{item.Key, item.GetValue(), item.Version, refreshable})
} else if t == "paramtable.ParamGroup" {
item := subVal.Interface().(paramtable.ParamGroup)
refreshable := tag.Get("refreshable")
if refreshable == "" {
refreshable = "undefined"
}
*data = append(*data, []string{item.KeyPrefix, "", item.Version, refreshable})
} else {
collect(data, &subVal)
}
}
}
const (
generateCsv = "gen-csv"
generateYaml = "gen-yaml"
showYaml = "show-yaml"
)
func main() {
params := paramtable.ComponentParam{}
params.Init()
args := os.Args
f, err := os.Create("configs.csv")
defer f.Close()
if err != nil {
log.Error("create file failed", zap.Error(err))
if len(args) < 2 {
log.Error("len of args should large than 2")
os.Exit(-1)
}
w := csv.NewWriter(f)
w.Write([]string{"key", "default", "version", "refreshable"})
val := reflect.ValueOf(params)
data := make([][]string, 0)
keySet := typeutil.NewSet[string]()
for i := 0; i < val.NumField(); i++ {
valueField := val.Field(i)
// typeField := val.Type().Field(i)
collect(&data, &valueField)
}
result := make([][]string, 0)
for _, d := range data {
if keySet.Contain(d[0]) {
continue
switch args[1] {
case generateCsv:
WriteCsv()
case generateYaml:
WriteYaml()
case showYaml:
var f string
if len(args) == 2 {
f = "configs/milvus.yaml"
} else {
f = args[2]
}
keySet.Insert(d[0])
result = append(result, d)
ShowYaml(f)
default:
log.Error(fmt.Sprintf("unknown argument %s", args[1]))
}
w.WriteAll(result)
w.Flush()
}

View File

@ -0,0 +1,26 @@
package main
import (
"fmt"
"os"
"sort"
"github.com/milvus-io/milvus/internal/log"
"github.com/spf13/viper"
"go.uber.org/zap"
)
func ShowYaml(filepath string) {
reader := viper.New()
reader.SetConfigFile(filepath)
if err := reader.ReadInConfig(); err != nil {
log.Warn("read config failed", zap.Error(err))
os.Exit(-3)
}
keys := reader.AllKeys()
sort.Strings(keys)
for _, key := range keys {
v := reader.GetString(key)
fmt.Fprintln(os.Stdout, key, "=", v)
}
}

View File

@ -16,12 +16,12 @@
# Related configuration of etcd, used to store Milvus metadata & service discovery.
etcd:
endpoints:
- localhost:2379
endpoints: localhost:2379
rootPath: by-dev # The root path where data is stored in etcd
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
log:
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
# path is one of:
# - "default" as os.Stderr,
# - "stderr" as os.Stderr,
@ -29,14 +29,6 @@ etcd:
# - file path to append server logs to.
# please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log
path: stdout
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
use:
# please adjust in embedded Milvus: true
embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
data:
# Embedded Etcd only.
# please adjust in embedded Milvus: /tmp/milvus/etcdData/
dir: default.etcd
ssl:
enabled: false # Whether to support ETCD secure connection mode
tlsCert: /path/to/etcd-client.pem # path to your cert file
@ -44,12 +36,16 @@ etcd:
tlsCACert: /path/to/ca.pem # path to your CACert file
# TLS min version
# Optional values: 1.0, 1.1, 1.2, 1.3。
# We recommend using version 1.2 and above
# We recommend using version 1.2 and above.
tlsMinVersion: 1.3
use:
embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
data:
dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/
# Default value: etcd
# Valid values: [etcd, mysql]
metastore:
# Default value: etcd
# Valid values: [etcd, mysql]
type: etcd
# Related configuration of mysql, used to store Milvus metadata.
@ -59,127 +55,118 @@ mysql:
address: localhost
port: 3306
dbName: milvus_meta
driverName: mysql
maxOpenConns: 20
maxIdleConns: 5
# please adjust in embedded Milvus: /tmp/milvus/data/
localStorage:
path: /var/lib/milvus/data/
path: /var/lib/milvus/data/ # please adjust in embedded Milvus: /tmp/milvus/data/
# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
# We refer to the storage service as MinIO/S3 in the following description for simplicity.
minio:
address: localhost # Address of MinIO/S3
port: 9000 # Port of MinIO/S3
port: 9000 # Port of MinIO/S3
accessKeyID: minioadmin # accessKeyID of MinIO/S3
secretAccessKey: minioadmin # MinIO/S3 encryption string
useSSL: false # Access to MinIO/S3 with SSL
bucketName: "a-bucket" # Bucket name in MinIO/S3
bucketName: a-bucket # Bucket name in MinIO/S3
rootPath: files # The root path where the message is stored in MinIO/S3
# Whether to use IAM role to access S3/GCS instead of access/secret keys
# For more infomation, refer to
# Whether to useIAM role to access S3/GCS instead of access/secret keys
# For more information, refer to
# aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
# gcp: https://cloud.google.com/storage/docs/access-control/iam
useIAM: false
# Cloud Provider of S3. Supports: "aws", "gcp".
# You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio
# You can use "gcp" for other cloud provider supports S3 API with signature v2
# When `useIAM` enabled, only "aws" & "gcp" is supported for now
cloudProvider: "aws"
# When useIAM enabled, only "aws" & "gcp" is supported for now
cloudProvider: aws
# Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
# Leave it empty if you want to use AWS default endpoint
iamEndpoint: ""
iamEndpoint:
# Milvus supports three MQ: rocksmq(based on RockDB), Pulsar and Kafka, which should be reserved in config what you use.
# There is a note about enabling priority if we config multiple mq in this file
# 1. standalone(local) mode: rockskmq(default) > Pulsar > Kafka
# 1. standalone(local) mode: rocksmq(default) > Pulsar > Kafka
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq is unsupported)
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
pulsar:
address: localhost # Address of pulsar
port: 6650 # Port of pulsar
port: 6650 # Port of Pulsar
webport: 80 # Web port of pulsar, if you connect direcly without proxy, should use 8080
maxMessageSize: 5242880 # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
tenant: public
namespace: default
# If you want to enable kafka, needs to comment the pulsar configs
kafka:
producer:
client.id: dc
consumer:
client.id: dc1
# brokerList: localhost1:9092,localhost2:9092,localhost3:9092
# saslUsername: username
# saslPassword: password
# saslMechanisms: PLAIN
# securityProtocol: SASL_SSL
# kafka:
# brokerList:
# saslUsername:
# saslPassword:
# saslMechanisms: PLAIN
# securityProtocol: SASL_SSL
rocksmq:
# The path where the message is stored in rocksmq
# please adjust in embedded Milvus: /tmp/milvus/rdb_data
path: /var/lib/milvus/rdb_data # The path where the message is stored in rocksmq
path: /var/lib/milvus/rdb_data
lrucacheratio: 0.06 # rocksdb cache memory ratio
rocksmqPageSize: 2147483648 # 2 GB, 2 * 1024 * 1024 * 1024 bytes, The size of each page of messages in rocksmq
retentionTimeInMinutes: 7200 # 5 days, 5 * 24 * 60 minutes, The retention time of the message in rocksmq.
retentionSizeInMB: 8192 # 8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.
compactionInterval: 86400 # 1 day, trigger rocksdb compaction every day to remove deleted data
lrucacheratio: 0.06 # rocksdb cache memory ratio
# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
rootCoord:
address: localhost
port: 53100
enableActiveStandby: false # Enable active-standby
dmlChannelNum: 256 # The number of dml channels created at system startup
maxPartitionNum: 4096 # Maximum number of partitions in a collection
minSegmentSizeToEnableIndex: 1024 # It's a threshold. When the segment size is less than this value, the segment will not be indexed
# (in seconds) Duration after which an import task will expire (be killed). Default 900 seconds (15 minutes).
# Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
importTaskExpiration: 900
# (in seconds) Milvus will keep the record of import tasks for at least `importTaskRetention` seconds. Default 86400
# seconds (24 hours).
# Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
importTaskRetention: 86400
importTaskExpiration: 900 # (in seconds) Duration after which an import task will expire (be killed). Default 900 seconds (15 minutes).
importTaskRetention: 86400 # (in seconds) Milvus will keep the record of import tasks for at least `importTaskRetention` seconds. Default 86400, seconds (24 hours).
enableActiveStandby: false
port: 53100
grpc:
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
# Related configuration of proxy, used to validate client requests and reduce the returned results.
proxy:
port: 19530
internalPort: 19529
http:
enabled: true # Whether to enable the http server
debug_mode: false # Whether to enable http server debug mode
timeTickInterval: 200 # ms, the interval that proxy synchronize the time tick
msgStream:
timeTick:
bufSize: 512
maxNameLength: 255 # Maximum length of name for a collection or alias
maxFieldNum: 64 # Maximum number of fields in a collection.
maxNameLength: 255 # Maximum length of name for a collection or alias
# Maximum number of fields in a collection.
# As of today (2.2.0 and after) it is strongly DISCOURAGED to set maxFieldNum >= 64.
# So adjust at your risk!
maxDimension: 32768 # Maximum dimension of a vector
# It is strongly DISCOURAGED to set `maxShardNum` > 64.
maxFieldNum: 64
maxShardNum: 64 # Maximum number of shards in a collection
maxTaskNum: 1024 # max task number of proxy task queue
maxDimension: 32768 # Maximum dimension of a vector
# Whether to produce gin logs.\n
# please adjust in embedded Milvus: false
ginLogging: true # Whether to produce gin logs.
ginLogging: true
maxTaskNum: 1024 # max task number of proxy task queue
accessLog:
localPath: /tmp/milvus_accesslog
filename: milvus_access_log.log
filename: milvus_access_log.log # Log filename, leave empty to disable file log.
http:
enabled: true # Whether to enable the http server
debug_mode: false # Whether to enable http server debug mode
port: 19530
internalPort: 19529
grpc:
serverMaxRecvSize: 67108864 # 64M
serverMaxSendSize: 67108864 # 64M
clientMaxRecvSize: 268435456 # 256 MB
clientMaxSendSize: 268435456 # 256 MB
serverMaxSendSize: 67108864
serverMaxRecvSize: 67108864
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
queryCoord:
address: localhost
port: 19531
taskMergeCap: 16
taskExecutionCap: 256
autoHandoff: true # Enable auto handoff
autoBalance: true # Enable auto balance
overloadedMemoryThresholdPercentage: 90 # The threshold percentage that memory overload
@ -192,174 +179,162 @@ queryCoord:
heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available
loadTimeoutSeconds: 600
checkHandoffInterval: 5000
taskMergeCap: 16
taskExecutionCap: 256
enableActiveStandby: false # Enable active-standby
enableActiveStandby: false
port: 19531
grpc:
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
queryNode:
cacheSize: 32 # GB, default 32 GB, `cacheSize` is the memory used for caching data for faster query. The `cacheSize` must be less than system memory size.
port: 21123
loadMemoryUsageFactor: 3 # The multiply factor of calculating the memory usage while loading segments
enableDisk: true # enable querynode load disk index, and search on disk index
maxDiskUsagePercentage: 95
gracefulStopTimeout: 30
stats:
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
dataSync:
flowGraph:
maxQueueLength: 1024 # Maximum length of task queue in flowgraph
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
# Segcore will divide a segment into multiple chunks to enbale small index
stats:
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
segcore:
chunkRows: 1024 # The number of vectors in a chunk.
# Note: we have disabled segment small index since @2022.05.12. So below related configurations won't work.
# We won't create small index for growing segments and search on these segments will directly use bruteforce scan.
smallIndex:
nlist: 128 # small index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8
nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist
loadMemoryUsageFactor: 3 # The multiply factor of calculating the memory usage while loading segments
enableDisk: true # enable querynode load disk index, and search on disk index
maxDiskUsagePercentage: 95
cache:
enabled: true
memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
grouping:
enabled: true
maxNQ: 1000
topKMergeRatio: 10
scheduler:
receiveChanSize: 10240
unsolvedQueueSize: 10240
# maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
# Max read concurrency would be the value of `runtime.NumCPU * maxReadConcurrentRatio`.
# Max read concurrency would be the value of runtime.NumCPU * maxReadConcurrentRatio.
# It defaults to 2.0, which means max read concurrency would be the value of runtime.NumCPU * 2.
# Max read concurrency must greater than or equal to 1, and less than or equal to runtime.NumCPU * 100.
maxReadConcurrentRatio: 2.0 # (0, 100]
cpuRatio: 10.0 # ratio used to estimate read task cpu usage.
# maxTimestampLag is the max ts lag between serviceable and guarantee timestamp.
# if the lag is larger than this config, scheduler will return error without waiting.
# the valid value is [3600, infinite)
# (0, 100]
maxReadConcurrentRatio: 2
cpuRatio: 10 # ratio used to estimate read task cpu usage.
maxTimestampLag: 86400
grouping:
enabled: true
maxNQ: 1000
topKMergeRatio: 10.0
gracefulStopTimeout: 30
port: 21123
grpc:
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
indexCoord:
# deprecated
address: localhost
port: 31000
enableActiveStandby: false # Enable active-standby
minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed
bindIndexNodeMode:
enable: false
address: "localhost:22930"
address: localhost:22930
withCred: false
nodeID: 0
# deprecated
gc:
interval: 600 # gc interval in seconds
scheduler:
interval: 1000 # scheduler interval in Millisecond
segment:
minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed
indexNode:
port: 21121
scheduler:
buildParallel: 1
enableDisk: true # enable index node build disk vector index
maxDiskUsagePercentage: 95
gracefulStopTimeout: 30
scheduler:
buildParallel: 1
port: 21121
grpc:
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
dataCoord:
address: localhost
port: 13333
enableCompaction: true # Enable data segment compaction
enableGarbageCollection: true
enableActiveStandby: false # Enable active-standby
channel:
watchTimeoutInterval: 30 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer.
segment:
maxSize: 512 # Maximum size of a segment in MB
diskSegmentMaxSize: 2048 # Maximun size of a segment in MB for collection which has Disk index
# Minimum proportion for a segment which can be sealed.
# Sealing early can prevent producing large growing segments in case these segments might slow down our search/query.
# Segments that sealed early will be compacted into a larger segment (within maxSize) eventually.
sealProportion: 0.23
assignmentExpiration: 2000 # The time of the assignment expiration in ms
maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
# If a segment didn't accept dml records in `maxIdleTime` and the size of segment is greater than
# `minSizeFromIdleToSealed`, Milvus will automatically seal it.
maxIdleTime: 600 # The max idle time of segment in seconds, 10*60.
# If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than
# minSizeFromIdleToSealed, Milvus will automatically seal it.
# The max idle time of segment in seconds, 10*60.
maxIdleTime: 600
minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed.
# The max number of statslog file for one segment, the segment will be sealed if
# the number of statslog file reaches to max value.
# The max number of binlog file for one segment, the segment will be sealed if
# the number of binlog file reaches to max value.
maxBinlogFileNumber: 32
smallProportion: 0.5 # The segment is considered as "small segment" when its # of rows is smaller than
# (smallProportion * segment max # of rows).
compactableProportion: 0.5 # A compaction will happen on small segments if the segment after compaction will have
# A compaction will happen on small segments if the segment after compaction will have
compactableProportion: 0.5
# over (compactableProportion * segment max # of rows) rows.
# MUST BE GREATER THAN OR EQUAL TO <smallProportion>!!!
expansionRate: 1.25 # During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
# During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
expansionRate: 1.25
enableCompaction: true # Enable data segment compaction
compaction:
enableAutoCompaction: true
enableGarbageCollection: true
gc:
interval: 3600 # gc interval in seconds
missingTolerance: 86400 # file meta missing tolerance duration in seconds, 60*24
dropTolerance: 3600 # file belongs to dropped entity tolerance duration in seconds, 3600
dropTolerance: 3600 # file belongs to dropped entity tolerance duration in seconds. 3600
enableActiveStandby: false
port: 13333
grpc:
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
dataNode:
port: 21124
dataSync:
flowGraph:
maxQueueLength: 1024 # Maximum length of task queue in flowgraph
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
segment:
# Max buffer size to flush for a single segment.
insertBufSize: 16777216 # Bytes, 16 MB
# Max buffer size to flush del for a single channel
deleteBufBytes: 67108864 # Bytes, 64MB
# The period to sync segments if buffer is not empty.
syncPeriod: 600 # Seconds, 10min
insertBufSize: 16777216 # Max buffer size to flush for a single segment.
deleteBufBytes: 67108864 # Max buffer size to flush del for a single channel
syncPeriod: 600 # The period to sync segments if buffer is not empty.
port: 21124
grpc:
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
# Configures the system log output.
log:
level: debug # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
stdout: "true" # default true, print log to stdout
file:
# please adjust in embedded Milvus: /tmp/milvus/logs
rootPath: "" # root dir path to put logs, default "" means no log file will print
rootPath: # root dir path to put logs, default "" means no log file will print. please adjust in embedded Milvus: /tmp/milvus/logs
maxSize: 300 # MB
maxAge: 10 # Maximum time for log retention in day.
maxBackups: 20
format: text # text/json
format: text # text or json
stdout: true # Stdout enable or not
grpc:
log:
level: WARNING
serverMaxRecvSize: 536870912 # 512MB
serverMaxSendSize: 536870912 # 512MB
clientMaxRecvSize: 268435456 # 256 MB
clientMaxSendSize: 268435456 # 256 MB
serverMaxSendSize: 536870912
serverMaxRecvSize: 536870912
client:
compressionEnabled: false
dialTimeout: 5000
keepAliveTime: 10000
keepAliveTimeout: 20000
maxMaxAttempts: 5
initialBackOff: 1.0
maxBackoff: 60.0
backoffMultiplier: 2.0
compressionEnabled: false
initialBackoff: 1
maxBackoff: 60
backoffMultiplier: 2
clientMaxSendSize: 268435456
clientMaxRecvSize: 268435456
# Configure the proxy tls enable.
tls:
@ -367,70 +342,52 @@ tls:
serverKeyPath: configs/cert/server.key
caPemPath: configs/cert/ca.pem
common:
# Channel name generation rule: ${namePrefix}-${ChannelIdx}
chanNamePrefix:
cluster: "by-dev"
rootCoordTimeTick: "rootcoord-timetick"
rootCoordStatistics: "rootcoord-statistics"
rootCoordDml: "rootcoord-dml"
rootCoordDelta: "rootcoord-delta"
search: "search"
searchResult: "searchResult"
queryTimeTick: "queryTimeTick"
queryNodeStats: "query-node-stats"
# Cmd for loadIndex, flush, etc...
cmd: "cmd"
dataCoordStatistic: "datacoord-statistics-channel"
dataCoordTimeTick: "datacoord-timetick-channel"
dataCoordSegmentInfo: "segment-info-channel"
# Sub name generation rule: ${subNamePrefix}-${NodeID}
cluster: by-dev
rootCoordTimeTick: rootcoord-timetick
rootCoordStatistics: rootcoord-statistics
rootCoordDml: rootcoord-dml
rootCoordDelta: rootcoord-delta
search: search
searchResult: searchResult
queryTimeTick: queryTimeTick
dataCoordStatistic: datacoord-statistics-channel
dataCoordTimeTick: datacoord-timetick-channel
dataCoordSegmentInfo: segment-info-channel
subNamePrefix:
rootCoordSubNamePrefix: "rootCoord"
proxySubNamePrefix: "proxy"
queryNodeSubNamePrefix: "queryNode"
dataNodeSubNamePrefix: "dataNode"
dataCoordSubNamePrefix: "dataCoord"
defaultPartitionName: "_default" # default partition name for a collection
defaultIndexName: "_default_idx" # default index name
retentionDuration: 0 # time travel reserved time, insert/delete will not be cleaned in this period. disable it by default
entityExpiration: -1 # Entity expiration in seconds, CAUTION make sure entityExpiration >= retentionDuration and -1 means never expire
gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
gracefulStopTimeout: 30 # seconds. it will force quit the server if the graceful stop process is not completed during this time.
# Default value: auto
# Valid values: [auto, avx512, avx2, avx, sse4_2]
# This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
simdType: auto
proxySubNamePrefix: proxy
rootCoordSubNamePrefix: rootCoord
queryNodeSubNamePrefix: queryNode
dataCoordSubNamePrefix: dataCoord
dataNodeSubNamePrefix: dataNode
defaultPartitionName: _default # default partition name for a collection
defaultIndexName: _default_idx # default index name
retentionDuration: 0 # time travel reserved time, insert/delete will not be cleaned in this period. disable it by default
entityExpiration: -1 # Entity expiration in seconds, CAUTION make sure entityExpiration >= retentionDuration and -1 means never expire
indexSliceSize: 16 # MB
threadCoreCoefficient: 10 # This parameter specify how many times the number of threads is the number of cores
DiskIndex:
MaxDegree: 56
SearchListSize: 100
PQCodeBudgetGBRatio: 0.125
BuildNumThreadsRatio: 1.0
SearchCacheBudgetGBRatio: 0.10
LoadNumThreadRatio: 8.0
BeamWidthRatio: 4.0
# This parameter specify how many times the number of threads is the number of cores
threadCoreCoefficient : 10
# please adjust in embedded Milvus: local
storageType: minio
BuildNumThreadsRatio: 1
SearchCacheBudgetGBRatio: 0.1
LoadNumThreadRatio: 8
BeamWidthRatio: 4
gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
gracefulStopTimeout: 30 # seconds. it will force quit the server if the graceful stop process is not completed during this time.
storageType: minio # please adjust in embedded Milvus: local
# Default value: auto
# Valid values: [auto, avx512, avx2, avx, sse4_2]
# This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
simdType: auto
security:
authorizationEnabled: false
# The superusers will ignore some system check processes,
# like the old password verification when updating the credential
superUsers:
- "root"
# tls mode values [0, 1, 2]
# 0 is close, 1 is one-way authentication, 2 is two-way authentication.
superUsers: root
tlsMode: 0
session:
ttl: 60 # ttl value when session granting a lease to register service
retryTimes: 30 # retry times when session sending etcd requests
@ -448,16 +405,14 @@ common:
# If necessary, you can also manually force to deny RW requests.
quotaAndLimits:
enabled: true # `true` to enable quota and limits, `false` to disable.
# quotaCenterCollectInterval is the time interval that quotaCenter
# collects metrics from Proxies, Query cluster and Data cluster.
quotaCenterCollectInterval: 3 # seconds, (0 ~ 65536)
ddl: # ddl limit rates, default no limit.
# seconds, (0 ~ 65536)
quotaCenterCollectInterval: 3
ddl:
enabled: false
collectionRate: -1 # qps, default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
partitionRate: -1 # qps, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
indexRate:
enabled: false
max: -1 # qps, default no limit, rate for CreateIndex, DropIndex
@ -467,87 +422,82 @@ quotaAndLimits:
compactionRate:
enabled: false
max: -1 # qps, default no limit, rate for manualCompaction
# dml limit rates, default no limit.
# The maximum rate will not be greater than `max`.
dml:
# dml limit rates, default no limit.
# The maximum rate will not be greater than max.
enabled: false
insertRate:
max: -1 # MB/s, default no limit
deleteRate:
max: -1 # MB/s, default no limit
bulkLoadRate: # not support yet. TODO: limit bulkLoad rate
max: -1 # MB/s, default no limit
# dql limit rates, default no limit.
# The maximum rate will not be greater than `max`.
bulkLoadRate:
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
dql:
# dql limit rates, default no limit.
# The maximum rate will not be greater than max.
enabled: false
searchRate:
max: -1 # vps (vectors per second), default no limit
queryRate:
max: -1 # qps, default no limit
# limitWriting decides whether dml requests are allowed.
limitWriting:
# forceDeny `false` means dml requests are allowed (except for some
# specific conditions, such as memory of nodes to water marker), `true` means always reject all dml requests.
# forceDeny false means dml requests are allowed (except for some
# specific conditions, such as memory of nodes to water marker), true means always reject all dml requests.
forceDeny: false
ttProtection:
enabled: false
# maxTimeTickDelay indicates the backpressure for DML Operations.
# DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
# if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
maxTimeTickDelay: 300 # in seconds
# seconds
maxTimeTickDelay: 300
memProtection:
enabled: true
# When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
# When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
# When memory usage < memoryLowWaterLevel, no action.
# memoryLowWaterLevel should be less than memoryHighWaterLevel.
enabled: true
dataNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in DataNodes
dataNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in DataNodes
queryNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in QueryNodes
queryNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in QueryNodes
diskProtection:
# When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;
enabled: true
enabled: true # When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;
diskQuota: -1 # MB, (0, +inf), default no limit
# limitReading decides whether dql requests are allowed.
limitReading:
# forceDeny `false` means dql requests are allowed (except for some
# specific conditions, such as collection has been dropped), `true` means always reject all dql requests.
# forceDeny false means dql requests are allowed (except for some
# specific conditions, such as collection has been dropped), true means always reject all dql requests.
forceDeny: false
queueProtection:
enabled: false
# nqInQueueThreshold indicated that the system was under backpressure for Search/Query path.
# If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off
# until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1.
nqInQueueThreshold: -1 # int, default no limit
# int, default no limit
nqInQueueThreshold: -1
# queueLatencyThreshold indicated that the system was under backpressure for Search/Query path.
# If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off
# until the latency of queuing no longer exceeds queueLatencyThreshold.
# The latency here refers to the averaged latency over a period of time.
queueLatencyThreshold: -1 # milliseconds, default no limit
# milliseconds, default no limit
queueLatencyThreshold: -1
resultProtection:
enabled: false
# maxReadResultRate indicated that the system was under backpressure for Search/Query path.
# If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off
# until the read result rate no longer exceeds maxReadResultRate.
maxReadResultRate: -1 # MB/s, default no limit
# coolOffSpeed is the speed of search&query rates cool off.
coolOffSpeed: 0.9 # (0, 1]
# MB/s, default no limit
maxReadResultRate: -1
# colOffSpeed is the speed of search&query rates cool off.
# (0, 1]
coolOffSpeed: 0.9
trace:
# trace exporter type, default is empty,
# trace exporter type, default is stdout,
# optional values: ['stdout', 'jaeger']
exporter: 'stdout'
exporter: stdout
# fraction of traceID based sampler,
# optional values: [0, 1]
# Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
sampleFraction: 0
# when exporter is jaeger should set the jaeger's URL
jaeger:
url: ''
url: # when exporter is jaeger should set the jaeger's URL

View File

@ -132,10 +132,11 @@ func TestConfigFromRemote(t *testing.T) {
mgr.Close()
client.KV.Put(ctx, "test/config/test/etcd", "value2")
time.Sleep(100)
assert.Eventually(t, func() bool {
_, err = mgr.GetConfig("test.etcd")
return err.Error() == "key not found: test.etcd"
}, 300*time.Millisecond, 10*time.Millisecond)
_, err = mgr.GetConfig("test.etcd")
assert.EqualError(t, err, "key not found: test.etcd")
})
}

View File

@ -23,7 +23,7 @@ func Connect(cfg *paramtable.MetaDBConfig) error {
cfg.Username.GetValue(), cfg.Password.GetValue(), cfg.Address.GetValue(), cfg.Port.GetAsInt(), cfg.DBName.GetValue())
var ormLogger logger.Interface
if cfg.LogLevel.GetValue() == "debug" {
if log.Level().String() == "debug" {
ormLogger = logger.Default.LogMode(logger.Info)
} else {
ormLogger = logger.Default

View File

@ -35,8 +35,6 @@ import (
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
"github.com/milvus-io/milvus-proto/go-api/schemapb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/datacoord"
"github.com/milvus-io/milvus/internal/datanode"
grpcdatacoordclient "github.com/milvus-io/milvus/internal/distributed/datacoord"
grpcdatacoordclient2 "github.com/milvus-io/milvus/internal/distributed/datacoord/client"
grpcdatanode "github.com/milvus-io/milvus/internal/distributed/datanode"
@ -46,16 +44,12 @@ import (
grpcquerynode "github.com/milvus-io/milvus/internal/distributed/querynode"
grpcrootcoord "github.com/milvus-io/milvus/internal/distributed/rootcoord"
rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client"
"github.com/milvus-io/milvus/internal/indexnode"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
querycoord "github.com/milvus-io/milvus/internal/querycoordv2"
"github.com/milvus-io/milvus/internal/querynode"
"github.com/milvus-io/milvus/internal/rootcoord"
"github.com/milvus-io/milvus/internal/tracer"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/componentutil"
@ -65,7 +59,6 @@ import (
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/importutil"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
@ -97,11 +90,6 @@ func runRootCoord(ctx context.Context, localMsg bool) *grpcrootcoord.Server {
wg.Add(1)
go func() {
if !localMsg {
logutil.SetupLogger(&rootcoord.Params.Log)
defer log.Sync()
}
factory := dependency.NewDefaultFactory(localMsg)
var err error
rc, err = grpcrootcoord.NewServer(ctx, factory)
@ -126,11 +114,6 @@ func runQueryCoord(ctx context.Context, localMsg bool) *grpcquerycoord.Server {
wg.Add(1)
go func() {
if !localMsg {
logutil.SetupLogger(&querycoord.Params.Log)
defer log.Sync()
}
factory := dependency.NewDefaultFactory(localMsg)
var err error
qs, err = grpcquerycoord.NewServer(ctx, factory)
@ -155,11 +138,6 @@ func runQueryNode(ctx context.Context, localMsg bool, alias string) *grpcqueryno
wg.Add(1)
go func() {
if !localMsg {
logutil.SetupLogger(&querynode.Params.Log)
defer log.Sync()
}
factory := dependency.NewDefaultFactory(localMsg)
var err error
qn, err = grpcquerynode.NewServer(ctx, factory)
@ -184,11 +162,6 @@ func runDataCoord(ctx context.Context, localMsg bool) *grpcdatacoordclient.Serve
wg.Add(1)
go func() {
if !localMsg {
logutil.SetupLogger(&datacoord.Params.Log)
defer log.Sync()
}
factory := dependency.NewDefaultFactory(localMsg)
ds = grpcdatacoordclient.NewServer(ctx, factory)
wg.Done()
@ -209,11 +182,6 @@ func runDataNode(ctx context.Context, localMsg bool, alias string) *grpcdatanode
wg.Add(1)
go func() {
if !localMsg {
logutil.SetupLogger(&datanode.Params.Log)
defer log.Sync()
}
factory := dependency.NewDefaultFactory(localMsg)
var err error
dn, err = grpcdatanode.NewServer(ctx, factory)
@ -238,11 +206,6 @@ func runIndexNode(ctx context.Context, localMsg bool, alias string) *grpcindexno
wg.Add(1)
go func() {
if !localMsg {
logutil.SetupLogger(&indexnode.Params.Log)
defer log.Sync()
}
factory := dependency.NewDefaultFactory(localMsg)
var err error
in, err = grpcindexnode.NewServer(ctx, factory)

View File

@ -15,7 +15,6 @@ import (
"os"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
@ -63,8 +62,6 @@ type BaseTable struct {
configDir string
YamlFile string
Log log.Config
}
// NewBaseTableFromYamlOnly only used in migration tool.
@ -109,7 +106,6 @@ func (gp *BaseTable) init(refreshInterval int) {
}
gp.initConfigsFromLocal(refreshInterval)
gp.initConfigsFromRemote(refreshInterval)
gp.initLog()
}
func (gp *BaseTable) initConfigsFromLocal(refreshInterval int) {
@ -214,30 +210,3 @@ func (gp *BaseTable) Reset(key string) error {
gp.mgr.ResetConfig(key)
return nil
}
// initLog init log of the base table
func (gp *BaseTable) initLog() {
gp.Log = log.Config{}
format := gp.GetWithDefault("log.format", DefaultLogFormat)
gp.Log.Format = format
level := gp.GetWithDefault("log.level", DefaultLogLevelForBase)
gp.Log.Level = level
gp.Log.File.MaxSize, _ = strconv.Atoi(gp.GetWithDefault("log.file.maxSize", "300"))
gp.Log.File.MaxBackups, _ = strconv.Atoi(gp.GetWithDefault("log.file.maxBackups", "10"))
gp.Log.File.MaxDays, _ = strconv.Atoi(gp.GetWithDefault("log.file.maxAge", "20"))
gp.Log.File.RootPath = gp.GetWithDefault("log.file.rootPath", DefaultRootPath)
stdout, err := strconv.ParseBool(gp.GetWithDefault("log.stdout", "true"))
if err != nil {
gp.Log.Stdout = true
} else {
gp.Log.Stdout = stdout
}
grpclog, err := gp.Load("grpc.log.level")
if err != nil {
gp.Log.GrpcLevel = DefaultLogLevel
} else {
gp.Log.GrpcLevel = strings.ToUpper(grpclog)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -30,16 +30,6 @@ func TestComponentParam(t *testing.T) {
Init()
params := Get()
t.Run("test kafkaConfig", func(t *testing.T) {
params := params.ServiceParam.KafkaCfg
producerConfig := params.ProducerExtraConfig.GetValue()
assert.Equal(t, "dc", producerConfig["client.id"])
consumerConfig := params.ConsumerExtraConfig.GetValue()
assert.Equal(t, "dc1", consumerConfig["client.id"])
})
t.Run("test commonConfig", func(t *testing.T) {
Params := params.CommonCfg

View File

@ -74,6 +74,7 @@ func (p *grpcConfig) init(domain string, base *BaseTable) {
Key: p.Domain + ".port",
Version: "2.0.0",
DefaultValue: strconv.FormatInt(ProxyExternalPort, 10),
Export: true,
}
p.Port.Init(base.mgr)
@ -88,24 +89,28 @@ func (p *grpcConfig) init(domain string, base *BaseTable) {
Key: "common.security.tlsMode",
Version: "2.0.0",
DefaultValue: "0",
Export: true,
}
p.TLSMode.Init(base.mgr)
p.ServerPemPath = ParamItem{
Key: "tls.serverPemPath",
Version: "2.0.0",
Export: true,
}
p.ServerPemPath.Init(base.mgr)
p.ServerKeyPath = ParamItem{
Key: "tls.serverKeyPath",
Version: "2.0.0",
Export: true,
}
p.ServerKeyPath.Init(base.mgr)
p.CaPemPath = ParamItem{
Key: "tls.caPemPath",
Version: "2.0.0",
Export: true,
}
p.CaPemPath.Init(base.mgr)
}
@ -148,6 +153,7 @@ func (p *GrpcServerConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.ServerMaxSendSize.Init(base.mgr)
@ -169,6 +175,7 @@ func (p *GrpcServerConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.ServerMaxRecvSize.Init(base.mgr)
}
@ -213,6 +220,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.ClientMaxSendSize.Init(base.mgr)
@ -234,6 +242,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.ClientMaxRecvSize.Init(base.mgr)
@ -254,6 +263,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.DialTimeout.Init(base.mgr)
@ -274,6 +284,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.KeepAliveTimeout.Init(base.mgr)
@ -294,6 +305,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.KeepAliveTime.Init(base.mgr)
@ -320,6 +332,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.MaxAttempts.Init(base.mgr)
@ -340,6 +353,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.InitialBackoff.Init(base.mgr)
@ -360,6 +374,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.MaxBackoff.Init(base.mgr)
@ -380,6 +395,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.BackoffMultiplier.Init(base.mgr)
@ -400,6 +416,7 @@ func (p *GrpcClientConfig) Init(domain string, base *BaseTable) {
}
return v
},
Export: true,
}
p.CompressionEnabled.Init(base.mgr)
}

View File

@ -10,6 +10,8 @@ func (p *httpConfig) init(base *BaseTable) {
Key: "proxy.http.enabled",
DefaultValue: "true",
Version: "2.1.0",
Doc: "Whether to enable the http server",
Export: true,
}
p.Enabled.Init(base.mgr)
@ -17,6 +19,8 @@ func (p *httpConfig) init(base *BaseTable) {
Key: "proxy.http.debug_mode",
DefaultValue: "false",
Version: "2.1.0",
Doc: "Whether to enable http server debug mode",
Export: true,
}
p.DebugMode.Init(base.mgr)
}

View File

@ -27,6 +27,7 @@ type ParamItem struct {
DefaultValue string
FallbackKeys []string
PanicIfEmpty bool
Export bool
Formatter func(originValue string) string
Forbidden bool
@ -121,6 +122,7 @@ type ParamGroup struct {
KeyPrefix string // which should be named as "A.B."
Version string
Doc string
Export bool
GetFunc func() map[string]string

View File

@ -106,6 +106,8 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.enabled",
Version: "2.2.0",
DefaultValue: "false",
Doc: "`true` to enable quota and limits, `false` to disable.",
Export: true,
}
p.QuotaAndLimitsEnabled.Init(base.mgr)
@ -121,6 +123,10 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: `quotaCenterCollectInterval is the time interval that quotaCenter
collects metrics from Proxies, Query cluster and Data cluster.
seconds, (0 ~ 65536)`,
Export: true,
}
p.QuotaCenterCollectInterval.Init(base.mgr)
@ -131,6 +137,7 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.ddl.enabled",
Version: "2.2.0",
DefaultValue: "false",
Export: true,
}
p.DDLLimitEnabled.Init(base.mgr)
@ -148,6 +155,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "qps, default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection",
Export: true,
}
p.DDLCollectionRate.Init(base.mgr)
@ -165,6 +174,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "qps, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition",
Export: true,
}
p.DDLPartitionRate.Init(base.mgr)
@ -172,6 +183,7 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.indexRate.enabled",
Version: "2.2.0",
DefaultValue: "false",
Export: true,
}
p.IndexLimitEnabled.Init(base.mgr)
@ -189,6 +201,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "qps, default no limit, rate for CreateIndex, DropIndex",
Export: true,
}
p.MaxIndexRate.Init(base.mgr)
@ -196,6 +210,7 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.flushRate.enabled",
Version: "2.2.0",
DefaultValue: "false",
Export: true,
}
p.FlushLimitEnabled.Init(base.mgr)
@ -213,6 +228,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "qps, default no limit, rate for flush",
Export: true,
}
p.MaxFlushRate.Init(base.mgr)
@ -220,6 +237,7 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.compactionRate.enabled",
Version: "2.2.0",
DefaultValue: "false",
Export: true,
}
p.CompactionLimitEnabled.Init(base.mgr)
@ -237,6 +255,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "qps, default no limit, rate for manualCompaction",
Export: true,
}
p.MaxCompactionRate.Init(base.mgr)
@ -245,6 +265,9 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.dml.enabled",
Version: "2.2.0",
DefaultValue: "false",
Doc: `dml limit rates, default no limit.
The maximum rate will not be greater than ` + "max" + `.`,
Export: true,
}
p.DMLLimitEnabled.Init(base.mgr)
@ -265,6 +288,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "MB/s, default no limit",
Export: true,
}
p.DMLMaxInsertRate.Init(base.mgr)
@ -307,6 +332,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "MB/s, default no limit",
Export: true,
}
p.DMLMaxDeleteRate.Init(base.mgr)
@ -349,6 +376,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "MB/s, default no limit, not support yet. TODO: limit bulkLoad rate",
Export: true,
}
p.DMLMaxBulkLoadRate.Init(base.mgr)
@ -378,6 +407,9 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.dql.enabled",
Version: "2.2.0",
DefaultValue: "false",
Doc: `dql limit rates, default no limit.
The maximum rate will not be greater than ` + "max" + `.`,
Export: true,
}
p.DQLLimitEnabled.Init(base.mgr)
@ -395,6 +427,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "vps (vectors per second), default no limit",
Export: true,
}
p.DQLMaxSearchRate.Init(base.mgr)
@ -433,6 +467,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "qps, default no limit",
Export: true,
}
p.DQLMaxQueryRate.Init(base.mgr)
@ -470,13 +506,17 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.limitWriting.forceDeny",
Version: "2.2.0",
DefaultValue: "false",
Doc: `forceDeny ` + "false" + ` means dml requests are allowed (except for some
specific conditions, such as memory of nodes to water marker), ` + "true" + ` means always reject all dml requests.`,
Export: true,
}
p.ForceDenyWriting.Init(base.mgr)
p.TtProtectionEnabled = ParamItem{
Key: "quotaAndLimits.limitWriting.ttProtection.enabled",
Version: "2.2.0",
DefaultValue: "true",
DefaultValue: "false",
Export: true,
}
p.TtProtectionEnabled.Init(base.mgr)
@ -496,6 +536,11 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return fmt.Sprintf("%f", delay)
},
Doc: `maxTimeTickDelay indicates the backpressure for DML Operations.
DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
seconds`,
Export: true,
}
p.MaxTimeTickDelay.Init(base.mgr)
@ -503,6 +548,10 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.limitWriting.memProtection.enabled",
Version: "2.2.0",
DefaultValue: "true",
Doc: `When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
When memory usage < memoryLowWaterLevel, no action.`,
Export: true,
}
p.MemProtectionEnabled.Init(base.mgr)
@ -524,6 +573,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "(0, 1], memoryLowWaterLevel in DataNodes",
Export: true,
}
p.DataNodeMemoryLowWaterLevel.Init(base.mgr)
@ -546,6 +597,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "(0, 1], memoryHighWaterLevel in DataNodes",
Export: true,
}
p.DataNodeMemoryHighWaterLevel.Init(base.mgr)
@ -565,6 +618,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "(0, 1], memoryLowWaterLevel in QueryNodes",
Export: true,
}
p.QueryNodeMemoryLowWaterLevel.Init(base.mgr)
@ -587,6 +642,8 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: "(0, 1], memoryHighWaterLevel in QueryNodes",
Export: true,
}
p.QueryNodeMemoryHighWaterLevel.Init(base.mgr)
@ -594,6 +651,8 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.limitWriting.diskProtection.enabled",
Version: "2.2.0",
DefaultValue: "true",
Doc: "When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;",
Export: true,
}
p.DiskProtectionEnabled.Init(base.mgr)
@ -614,6 +673,8 @@ func (p *quotaConfig) init(base *BaseTable) {
// megabytes to bytes
return fmt.Sprintf("%f", megaBytes2Bytes(level))
},
Doc: "MB, (0, +inf), default no limit",
Export: true,
}
p.DiskQuota.Init(base.mgr)
@ -622,6 +683,9 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.limitReading.forceDeny",
Version: "2.2.0",
DefaultValue: "false",
Doc: `forceDeny ` + "false" + ` means dql requests are allowed (except for some
specific conditions, such as collection has been dropped), ` + "true" + ` means always reject all dql requests.`,
Export: true,
}
p.ForceDenyReading.Init(base.mgr)
@ -629,6 +693,7 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.limitReading.queueProtection.enabled",
Version: "2.2.0",
DefaultValue: "false",
Export: true,
}
p.QueueProtectionEnabled.Init(base.mgr)
@ -647,6 +712,11 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: `nqInQueueThreshold indicated that the system was under backpressure for Search/Query path.
If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off
until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1.
int, default no limit`,
Export: true,
}
p.NQInQueueThreshold.Init(base.mgr)
@ -665,6 +735,12 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: `queueLatencyThreshold indicated that the system was under backpressure for Search/Query path.
If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off
until the latency of queuing no longer exceeds queueLatencyThreshold.
The latency here refers to the averaged latency over a period of time.
milliseconds, default no limit`,
Export: true,
}
p.QueueLatencyThreshold.Init(base.mgr)
@ -672,6 +748,7 @@ func (p *quotaConfig) init(base *BaseTable) {
Key: "quotaAndLimits.limitReading.resultProtection.enabled",
Version: "2.2.0",
DefaultValue: "false",
Export: true,
}
p.ResultProtectionEnabled.Init(base.mgr)
@ -693,6 +770,11 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: `maxReadResultRate indicated that the system was under backpressure for Search/Query path.
If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off
until the read result rate no longer exceeds maxReadResultRate.
MB/s, default no limit`,
Export: true,
}
p.MaxReadResultRate.Init(base.mgr)
@ -710,6 +792,9 @@ func (p *quotaConfig) init(base *BaseTable) {
}
return v
},
Doc: `colOffSpeed is the speed of search&query rates cool off.
(0, 1]`,
Export: true,
}
p.CoolOffSpeed.Init(base.mgr)

View File

@ -106,7 +106,9 @@ func (p *EtcdConfig) Init(base *BaseTable) {
p.Endpoints = ParamItem{
Key: "etcd.endpoints",
Version: "2.0.0",
DefaultValue: "localhost:2379",
PanicIfEmpty: true,
Export: true,
}
p.Endpoints.Init(base.mgr)
@ -114,6 +116,8 @@ func (p *EtcdConfig) Init(base *BaseTable) {
Key: "etcd.use.embed",
DefaultValue: "false",
Version: "2.1.0",
Doc: "Whether to enable embedded Etcd (an in-process EtcdServer).",
Export: true,
}
p.UseEmbedEtcd.Init(base.mgr)
@ -122,9 +126,9 @@ func (p *EtcdConfig) Init(base *BaseTable) {
}
p.ConfigPath = ParamItem{
Key: "etcd.config.path",
DefaultValue: "",
Version: "2.1.0",
Key: "etcd.config.path",
Version: "2.1.0",
Export: false,
}
p.ConfigPath.Init(base.mgr)
@ -132,20 +136,28 @@ func (p *EtcdConfig) Init(base *BaseTable) {
Key: "etcd.data.dir",
DefaultValue: "default.etcd",
Version: "2.1.0",
Doc: `Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/`,
Export: true,
}
p.DataDir.Init(base.mgr)
p.RootPath = ParamItem{
Key: "etcd.rootPath",
Version: "2.0.0",
DefaultValue: "by-dev",
PanicIfEmpty: true,
Doc: "The root path where data is stored in etcd",
Export: true,
}
p.RootPath.Init(base.mgr)
p.MetaSubPath = ParamItem{
Key: "etcd.metaSubPath",
Version: "2.0.0",
DefaultValue: "meta",
PanicIfEmpty: true,
Doc: "metaRootPath = rootPath + '/' + metaSubPath",
Export: true,
}
p.MetaSubPath.Init(base.mgr)
@ -159,7 +171,10 @@ func (p *EtcdConfig) Init(base *BaseTable) {
p.KvSubPath = ParamItem{
Key: "etcd.kvSubPath",
Version: "2.0.0",
DefaultValue: "kv",
PanicIfEmpty: true,
Doc: "kvRootPath = rootPath + '/' + kvSubPath",
Export: true,
}
p.KvSubPath.Init(base.mgr)
@ -174,6 +189,8 @@ func (p *EtcdConfig) Init(base *BaseTable) {
Key: "etcd.log.level",
DefaultValue: defaultEtcdLogLevel,
Version: "2.0.0",
Doc: "Only supports debug, info, warn, error, panic, or fatal. Default 'info'.",
Export: true,
}
p.EtcdLogLevel.Init(base.mgr)
@ -181,6 +198,13 @@ func (p *EtcdConfig) Init(base *BaseTable) {
Key: "etcd.log.path",
DefaultValue: defaultEtcdLogPath,
Version: "2.0.0",
Doc: `path is one of:
- "default" as os.Stderr,
- "stderr" as os.Stderr,
- "stdout" as os.Stdout,
- file path to append server logs to.
please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log`,
Export: true,
}
p.EtcdLogPath.Init(base.mgr)
@ -188,24 +212,32 @@ func (p *EtcdConfig) Init(base *BaseTable) {
Key: "etcd.ssl.enabled",
DefaultValue: "false",
Version: "2.0.0",
Doc: "Whether to support ETCD secure connection mode",
Export: true,
}
p.EtcdUseSSL.Init(base.mgr)
p.EtcdTLSCert = ParamItem{
Key: "etcd.ssl.tlsCert",
Version: "2.0.0",
Doc: "path to your cert file",
Export: true,
}
p.EtcdTLSCert.Init(base.mgr)
p.EtcdTLSKey = ParamItem{
Key: "etcd.ssl.tlsKey",
Version: "2.0.0",
Doc: "path to your key file",
Export: true,
}
p.EtcdTLSKey.Init(base.mgr)
p.EtcdTLSCACert = ParamItem{
Key: "etcd.ssl.tlsCACert",
Version: "2.0.0",
Doc: "path to your CACert file",
Export: true,
}
p.EtcdTLSCACert.Init(base.mgr)
@ -213,6 +245,10 @@ func (p *EtcdConfig) Init(base *BaseTable) {
Key: "etcd.ssl.tlsMinVersion",
DefaultValue: "1.3",
Version: "2.0.0",
Doc: `TLS min version
Optional values: 1.0, 1.1, 1.2, 1.3
We recommend using version 1.2 and above.`,
Export: true,
}
p.EtcdTLSMinVersion.Init(base.mgr)
}
@ -226,6 +262,8 @@ func (p *LocalStorageConfig) Init(base *BaseTable) {
Key: "localStorage.path",
Version: "2.0.0",
DefaultValue: "/var/lib/milvus/data",
Doc: "please adjust in embedded Milvus: /tmp/milvus/data/",
Export: true,
}
p.Path.Init(base.mgr)
}
@ -239,6 +277,9 @@ func (p *MetaStoreConfig) Init(base *BaseTable) {
Key: "metastore.type",
Version: "2.2.0",
DefaultValue: util.MetaStoreTypeEtcd,
Doc: `Default value: etcd
Valid values: [etcd, mysql]`,
Export: true,
}
p.MetaStoreType.Init(base.mgr)
}
@ -253,7 +294,6 @@ type MetaDBConfig struct {
DBName ParamItem `refreshable:"false"`
MaxOpenConns ParamItem `refreshable:"false"`
MaxIdleConns ParamItem `refreshable:"false"`
LogLevel ParamItem `refreshable:"false"`
}
func (p *MetaDBConfig) Init(base *BaseTable) {
@ -261,6 +301,7 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.username",
Version: "2.2.0",
PanicIfEmpty: true,
Export: true,
}
p.Username.Init(base.mgr)
@ -268,6 +309,7 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.password",
Version: "2.2.0",
PanicIfEmpty: true,
Export: true,
}
p.Password.Init(base.mgr)
@ -275,6 +317,7 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.address",
Version: "2.2.0",
PanicIfEmpty: true,
Export: true,
}
p.Address.Init(base.mgr)
@ -282,6 +325,7 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.port",
Version: "2.2.0",
DefaultValue: "3306",
Export: true,
}
p.Port.Init(base.mgr)
@ -289,6 +333,7 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.dbName",
Version: "2.2.0",
PanicIfEmpty: true,
Export: true,
}
p.DBName.Init(base.mgr)
@ -296,6 +341,7 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.maxOpenConns",
Version: "2.2.0",
DefaultValue: "20",
Export: true,
}
p.MaxOpenConns.Init(base.mgr)
@ -303,15 +349,10 @@ func (p *MetaDBConfig) Init(base *BaseTable) {
Key: "mysql.maxIdleConns",
Version: "2.2.0",
DefaultValue: "5",
Export: true,
}
p.MaxIdleConns.Init(base.mgr)
p.LogLevel = ParamItem{
Key: "log.level",
Version: "2.0.0",
DefaultValue: "debug",
}
p.LogLevel.Init(base.mgr)
}
// /////////////////////////////////////////////////////////////////////////////
@ -337,6 +378,8 @@ func (p *PulsarConfig) Init(base *BaseTable) {
Key: "pulsar.port",
Version: "2.0.0",
DefaultValue: "6650",
Doc: "Port of Pulsar",
Export: true,
}
p.Port.Init(base.mgr)
@ -355,6 +398,8 @@ func (p *PulsarConfig) Init(base *BaseTable) {
port, _ := p.Port.get()
return "pulsar://" + addr + ":" + port
},
Doc: "Address of pulsar",
Export: true,
}
p.Address.Init(base.mgr)
@ -362,6 +407,8 @@ func (p *PulsarConfig) Init(base *BaseTable) {
Key: "pulsar.webport",
Version: "2.0.0",
DefaultValue: "80",
Doc: "Web port of pulsar, if you connect direcly without proxy, should use 8080",
Export: true,
}
p.WebPort.Init(base.mgr)
@ -384,6 +431,8 @@ func (p *PulsarConfig) Init(base *BaseTable) {
Key: "pulsar.maxMessageSize",
Version: "2.0.0",
DefaultValue: strconv.Itoa(SuggestPulsarMaxMessageSize),
Doc: "5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.",
Export: true,
}
p.MaxMessageSize.Init(base.mgr)
@ -391,6 +440,7 @@ func (p *PulsarConfig) Init(base *BaseTable) {
Key: "pulsar.tenant",
Version: "2.2.0",
DefaultValue: "public",
Export: true,
}
p.Tenant.Init(base.mgr)
@ -398,6 +448,7 @@ func (p *PulsarConfig) Init(base *BaseTable) {
Key: "pulsar.namespace",
Version: "2.2.0",
DefaultValue: "default",
Export: true,
}
p.Namespace.Init(base.mgr)
@ -445,6 +496,7 @@ func (k *KafkaConfig) Init(base *BaseTable) {
Key: "kafka.brokerList",
DefaultValue: "",
Version: "2.1.0",
Export: true,
}
k.Address.Init(base.mgr)
@ -452,6 +504,7 @@ func (k *KafkaConfig) Init(base *BaseTable) {
Key: "kafka.saslUsername",
DefaultValue: "",
Version: "2.1.0",
Export: true,
}
k.SaslUsername.Init(base.mgr)
@ -459,6 +512,7 @@ func (k *KafkaConfig) Init(base *BaseTable) {
Key: "kafka.saslPassword",
DefaultValue: "",
Version: "2.1.0",
Export: true,
}
k.SaslPassword.Init(base.mgr)
@ -466,6 +520,7 @@ func (k *KafkaConfig) Init(base *BaseTable) {
Key: "kafka.saslMechanisms",
DefaultValue: "PLAIN",
Version: "2.1.0",
Export: true,
}
k.SaslMechanisms.Init(base.mgr)
@ -473,6 +528,7 @@ func (k *KafkaConfig) Init(base *BaseTable) {
Key: "kafka.securityProtocol",
DefaultValue: "SASL_SSL",
Version: "2.1.0",
Export: true,
}
k.SecurityProtocol.Init(base.mgr)
@ -509,6 +565,9 @@ func (r *RocksmqConfig) Init(base *BaseTable) {
r.Path = ParamItem{
Key: "rocksmq.path",
Version: "2.0.0",
Doc: `The path where the message is stored in rocksmq
please adjust in embedded Milvus: /tmp/milvus/rdb_data`,
Export: true,
}
r.Path.Init(base.mgr)
@ -516,6 +575,8 @@ func (r *RocksmqConfig) Init(base *BaseTable) {
Key: "rocksmq.lrucacheratio",
DefaultValue: "0.0.6",
Version: "2.0.0",
Doc: "rocksdb cache memory ratio",
Export: true,
}
r.LRUCacheRatio.Init(base.mgr)
@ -523,6 +584,8 @@ func (r *RocksmqConfig) Init(base *BaseTable) {
Key: "rocksmq.rocksmqPageSize",
DefaultValue: strconv.FormatInt(256<<20, 10),
Version: "2.0.0",
Doc: "2 GB, 2 * 1024 * 1024 * 1024 bytes, The size of each page of messages in rocksmq",
Export: true,
}
r.PageSize.Init(base.mgr)
@ -530,6 +593,8 @@ func (r *RocksmqConfig) Init(base *BaseTable) {
Key: "rocksmq.retentionTimeInMinutes",
DefaultValue: "7200",
Version: "2.0.0",
Doc: "5 days, 5 * 24 * 60 minutes, The retention time of the message in rocksmq.",
Export: true,
}
r.RetentionTimeInMinutes.Init(base.mgr)
@ -537,6 +602,8 @@ func (r *RocksmqConfig) Init(base *BaseTable) {
Key: "rocksmq.retentionSizeInMB",
DefaultValue: "7200",
Version: "2.0.0",
Doc: "8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.",
Export: true,
}
r.RetentionSizeInMB.Init(base.mgr)
@ -544,6 +611,8 @@ func (r *RocksmqConfig) Init(base *BaseTable) {
Key: "rocksmq.compactionInterval",
DefaultValue: "86400",
Version: "2.0.0",
Doc: "1 day, trigger rocksdb compaction every day to remove deleted data",
Export: true,
}
r.CompactionInterval.Init(base.mgr)
@ -575,6 +644,8 @@ func (p *MinioConfig) Init(base *BaseTable) {
Key: "minio.port",
DefaultValue: "9000",
Version: "2.0.0",
Doc: "Port of MinIO/S3",
Export: true,
}
p.Port.Init(base.mgr)
@ -592,34 +663,48 @@ func (p *MinioConfig) Init(base *BaseTable) {
port, _ := p.Port.get()
return addr + ":" + port
},
Doc: "Address of MinIO/S3",
Export: true,
}
p.Address.Init(base.mgr)
p.AccessKeyID = ParamItem{
Key: "minio.accessKeyID",
Version: "2.0.0",
DefaultValue: "minioadmin",
PanicIfEmpty: false, // tmp fix, need to be conditional
Doc: "accessKeyID of MinIO/S3",
Export: true,
}
p.AccessKeyID.Init(base.mgr)
p.SecretAccessKey = ParamItem{
Key: "minio.secretAccessKey",
Version: "2.0.0",
DefaultValue: "minioadmin",
PanicIfEmpty: false, // tmp fix, need to be conditional
Doc: "MinIO/S3 encryption string",
Export: true,
}
p.SecretAccessKey.Init(base.mgr)
p.UseSSL = ParamItem{
Key: "minio.useSSL",
Version: "2.0.0",
DefaultValue: "false",
PanicIfEmpty: true,
Doc: "Access to MinIO/S3 with SSL",
Export: true,
}
p.UseSSL.Init(base.mgr)
p.BucketName = ParamItem{
Key: "minio.bucketName",
Version: "2.0.0",
DefaultValue: "a-bucket",
PanicIfEmpty: true,
Doc: "Bucket name in MinIO/S3",
Export: true,
}
p.BucketName.Init(base.mgr)
@ -627,6 +712,8 @@ func (p *MinioConfig) Init(base *BaseTable) {
Key: "minio.rootPath",
Version: "2.0.0",
PanicIfEmpty: true,
Doc: "The root path where the message is stored in MinIO/S3",
Export: true,
}
p.RootPath.Init(base.mgr)
@ -634,6 +721,11 @@ func (p *MinioConfig) Init(base *BaseTable) {
Key: "minio.useIAM",
DefaultValue: DefaultMinioUseIAM,
Version: "2.0.0",
Doc: `Whether to ` + "useIAM" + ` role to access S3/GCS instead of access/secret keys
For more information, refer to
aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
gcp: https://cloud.google.com/storage/docs/access-control/iam`,
Export: true,
}
p.UseIAM.Init(base.mgr)
@ -641,6 +733,11 @@ func (p *MinioConfig) Init(base *BaseTable) {
Key: "minio.cloudProvider",
DefaultValue: DefaultMinioCloudProvider,
Version: "2.2.0",
Doc: `Cloud Provider of S3. Supports: "aws", "gcp".
You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio
You can use "gcp" for other cloud provider supports S3 API with signature v2
When ` + "useIAM" + ` enabled, only "aws" & "gcp" is supported for now`,
Export: true,
}
p.CloudProvider.Init(base.mgr)
@ -648,6 +745,9 @@ func (p *MinioConfig) Init(base *BaseTable) {
Key: "minio.iamEndpoint",
DefaultValue: DefaultMinioIAMEndpoint,
Version: "2.0.0",
Doc: `Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
Leave it empty if you want to use AWS default endpoint`,
Export: true,
}
p.IAMEndpoint.Init(base.mgr)
}

View File

@ -0,0 +1,52 @@
package typeutil
type OrderedMap[K comparable, V any] struct {
keys []K
values map[K]V
}
func NewOrderedMap[K comparable, V any]() *OrderedMap[K, V] {
o := OrderedMap[K, V]{}
o.keys = []K{}
o.values = map[K]V{}
return &o
}
func (o *OrderedMap[K, V]) Get(key K) (V, bool) {
val, exists := o.values[key]
return val, exists
}
func (o *OrderedMap[K, V]) Set(key K, value V) {
_, exists := o.values[key]
if !exists {
o.keys = append(o.keys, key)
}
o.values[key] = value
}
func (o *OrderedMap[K, V]) Delete(key K) {
// check key is in use
_, ok := o.values[key]
if !ok {
return
}
// remove from keys
for i, k := range o.keys {
if k == key {
o.keys = append(o.keys[:i], o.keys[i+1:]...)
break
}
}
// remove from values
delete(o.values, key)
}
func (o *OrderedMap[K, V]) Keys() []K {
return o.keys
}
// SortKeys Sort the map keys using your sort func
func (o *OrderedMap[K, V]) SortKeys(sortFunc func(keys []K)) {
sortFunc(o.keys)
}

View File

@ -0,0 +1,46 @@
package typeutil
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestOrderedMap(t *testing.T) {
o := NewOrderedMap[string, int]()
// number
o.Set("number", 3)
v, _ := o.Get("number")
assert.Equal(t, 3, v)
// overriding existing key
o.Set("number", 4)
v, _ = o.Get("number")
assert.Equal(t, 4, v)
o.Set("number2", 2)
o.Set("number3", 3)
o.Set("number4", 4)
// Keys method
keys := o.Keys()
expectedKeys := []string{
"number",
"number2",
"number3",
"number4",
}
for i, key := range keys {
assert.Equal(t, expectedKeys[i], key, "Keys method %s != %s", key, expectedKeys[i])
}
for i, key := range expectedKeys {
assert.Equal(t, key, expectedKeys[i], "Keys method %s != %s", key, expectedKeys[i])
}
// delete
o.Delete("number2")
o.Delete("not a key being used")
assert.Equal(t, 3, len(o.Keys()))
_, ok := o.Get("number2")
assert.False(t, ok, "Delete did not remove 'number2' key")
}