mirror of https://github.com/milvus-io/milvus.git
Reduce MQ buffer length and flowgraph wait queue length to 16 (#26179)
Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>pull/26224/head
parent
21eeb37ce9
commit
767955ec6b
|
@ -232,7 +232,7 @@ queryCoord:
|
||||||
queryNode:
|
queryNode:
|
||||||
dataSync:
|
dataSync:
|
||||||
flowGraph:
|
flowGraph:
|
||||||
maxQueueLength: 1024 # Maximum length of task queue in flowgraph
|
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||||
stats:
|
stats:
|
||||||
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
|
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
|
||||||
|
@ -369,7 +369,7 @@ dataCoord:
|
||||||
dataNode:
|
dataNode:
|
||||||
dataSync:
|
dataSync:
|
||||||
flowGraph:
|
flowGraph:
|
||||||
maxQueueLength: 1024 # Maximum length of task queue in flowgraph
|
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||||
maxParallelSyncTaskNum: 2 # Maximum number of sync tasks executed in parallel in each flush manager
|
maxParallelSyncTaskNum: 2 # Maximum number of sync tasks executed in parallel in each flush manager
|
||||||
segment:
|
segment:
|
||||||
|
|
|
@ -5,11 +5,12 @@ import (
|
||||||
"github.com/milvus-io/milvus/internal/mq/msgstream/mqwrapper/rmq"
|
"github.com/milvus-io/milvus/internal/mq/msgstream/mqwrapper/rmq"
|
||||||
"github.com/milvus-io/milvus/pkg/log"
|
"github.com/milvus-io/milvus/pkg/log"
|
||||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||||
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewRocksmqFactory creates a new message stream factory based on rocksmq.
|
// NewRocksmqFactory creates a new message stream factory based on rocksmq.
|
||||||
func NewRocksmqFactory(path string) msgstream.Factory {
|
func NewRocksmqFactory(path string, cfg *paramtable.ServiceParam) msgstream.Factory {
|
||||||
if err := server.InitRocksMQ(path); err != nil {
|
if err := server.InitRocksMQ(path); err != nil {
|
||||||
log.Fatal("fail to init rocksmq", zap.Error(err))
|
log.Fatal("fail to init rocksmq", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
@ -18,7 +19,7 @@ func NewRocksmqFactory(path string) msgstream.Factory {
|
||||||
return &msgstream.CommonFactory{
|
return &msgstream.CommonFactory{
|
||||||
Newer: rmq.NewClientWithDefaultOptions,
|
Newer: rmq.NewClientWithDefaultOptions,
|
||||||
DispatcherFactory: msgstream.ProtoUDFactory{},
|
DispatcherFactory: msgstream.ProtoUDFactory{},
|
||||||
ReceiveBufSize: 1024,
|
ReceiveBufSize: cfg.MQCfg.ReceiveBufSize.GetAsInt64(),
|
||||||
MQBufSize: 1024,
|
MQBufSize: cfg.MQCfg.MQBufSize.GetAsInt64(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ func TestRmsFactory(t *testing.T) {
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
rmsFactory := NewRocksmqFactory(dir)
|
rmsFactory := NewRocksmqFactory(dir, ¶mtable.Get().ServiceParam)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err := rmsFactory.NewMsgStream(ctx)
|
_, err := rmsFactory.NewMsgStream(ctx)
|
||||||
|
|
|
@ -38,7 +38,7 @@ type DefaultFactory struct {
|
||||||
func NewDefaultFactory(standAlone bool) *DefaultFactory {
|
func NewDefaultFactory(standAlone bool) *DefaultFactory {
|
||||||
return &DefaultFactory{
|
return &DefaultFactory{
|
||||||
standAlone: standAlone,
|
standAlone: standAlone,
|
||||||
msgStreamFactory: smsgstream.NewRocksmqFactory("/tmp/milvus/rocksmq/"),
|
msgStreamFactory: smsgstream.NewRocksmqFactory("/tmp/milvus/rocksmq/", ¶mtable.Get().ServiceParam),
|
||||||
chunkManagerFactory: storage.NewChunkManagerFactory("local",
|
chunkManagerFactory: storage.NewChunkManagerFactory("local",
|
||||||
storage.RootPath("/tmp/milvus")),
|
storage.RootPath("/tmp/milvus")),
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ func NewDefaultFactory(standAlone bool) *DefaultFactory {
|
||||||
func MockDefaultFactory(standAlone bool, params *paramtable.ComponentParam) *DefaultFactory {
|
func MockDefaultFactory(standAlone bool, params *paramtable.ComponentParam) *DefaultFactory {
|
||||||
return &DefaultFactory{
|
return &DefaultFactory{
|
||||||
standAlone: standAlone,
|
standAlone: standAlone,
|
||||||
msgStreamFactory: smsgstream.NewRocksmqFactory("/tmp/milvus/rocksmq/"),
|
msgStreamFactory: smsgstream.NewRocksmqFactory("/tmp/milvus/rocksmq/", ¶mtable.Get().ServiceParam),
|
||||||
chunkManagerFactory: storage.NewChunkManagerFactoryWithParam(params),
|
chunkManagerFactory: storage.NewChunkManagerFactoryWithParam(params),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -86,11 +86,11 @@ func (f *DefaultFactory) initMQ(standalone bool, params *paramtable.ComponentPar
|
||||||
case mqTypeNatsmq:
|
case mqTypeNatsmq:
|
||||||
f.msgStreamFactory = msgstream.NewNatsmqFactory()
|
f.msgStreamFactory = msgstream.NewNatsmqFactory()
|
||||||
case mqTypeRocksmq:
|
case mqTypeRocksmq:
|
||||||
f.msgStreamFactory = smsgstream.NewRocksmqFactory(params.RocksmqCfg.Path.GetValue())
|
f.msgStreamFactory = smsgstream.NewRocksmqFactory(params.RocksmqCfg.Path.GetValue(), ¶ms.ServiceParam)
|
||||||
case mqTypePulsar:
|
case mqTypePulsar:
|
||||||
f.msgStreamFactory = msgstream.NewPmsFactory(¶ms.PulsarCfg)
|
f.msgStreamFactory = msgstream.NewPmsFactory(¶ms.ServiceParam)
|
||||||
case mqTypeKafka:
|
case mqTypeKafka:
|
||||||
f.msgStreamFactory = msgstream.NewKmsFactory(¶ms.KafkaCfg)
|
f.msgStreamFactory = msgstream.NewKmsFactory(¶ms.ServiceParam)
|
||||||
}
|
}
|
||||||
if f.msgStreamFactory == nil {
|
if f.msgStreamFactory == nil {
|
||||||
return errors.New("failed to create MQ: check the milvus log for initialization failures")
|
return errors.New("failed to create MQ: check the milvus log for initialization failures")
|
||||||
|
|
|
@ -47,7 +47,7 @@ func TestMain(m *testing.M) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMockFactory() msgstream.Factory {
|
func newMockFactory() msgstream.Factory {
|
||||||
return msgstream.NewPmsFactory(&Params.PulsarCfg)
|
return msgstream.NewPmsFactory(&Params.ServiceParam)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMockProducer(factory msgstream.Factory, pchannel string) (msgstream.MsgStream, error) {
|
func newMockProducer(factory msgstream.Factory, pchannel string) (msgstream.MsgStream, error) {
|
||||||
|
|
|
@ -42,17 +42,18 @@ type PmsFactory struct {
|
||||||
PulsarAddress string
|
PulsarAddress string
|
||||||
PulsarWebAddress string
|
PulsarWebAddress string
|
||||||
ReceiveBufSize int64
|
ReceiveBufSize int64
|
||||||
PulsarBufSize int64
|
MQBufSize int64
|
||||||
PulsarAuthPlugin string
|
PulsarAuthPlugin string
|
||||||
PulsarAuthParams string
|
PulsarAuthParams string
|
||||||
PulsarTenant string
|
PulsarTenant string
|
||||||
PulsarNameSpace string
|
PulsarNameSpace string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPmsFactory(config *paramtable.PulsarConfig) *PmsFactory {
|
func NewPmsFactory(serviceParam *paramtable.ServiceParam) *PmsFactory {
|
||||||
|
config := &serviceParam.PulsarCfg
|
||||||
return &PmsFactory{
|
return &PmsFactory{
|
||||||
PulsarBufSize: 1024,
|
MQBufSize: serviceParam.MQCfg.MQBufSize.GetAsInt64(),
|
||||||
ReceiveBufSize: 1024,
|
ReceiveBufSize: serviceParam.MQCfg.ReceiveBufSize.GetAsInt64(),
|
||||||
PulsarAddress: config.Address.GetValue(),
|
PulsarAddress: config.Address.GetValue(),
|
||||||
PulsarWebAddress: config.WebAddress.GetValue(),
|
PulsarWebAddress: config.WebAddress.GetValue(),
|
||||||
PulsarAuthPlugin: config.AuthPlugin.GetValue(),
|
PulsarAuthPlugin: config.AuthPlugin.GetValue(),
|
||||||
|
@ -77,7 +78,7 @@ func (f *PmsFactory) NewMsgStream(ctx context.Context) (MsgStream, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewMqMsgStream(ctx, f.ReceiveBufSize, f.PulsarBufSize, pulsarClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
return NewMqMsgStream(ctx, f.ReceiveBufSize, f.MQBufSize, pulsarClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTtMsgStream is used to generate a new TtMsgstream object
|
// NewTtMsgStream is used to generate a new TtMsgstream object
|
||||||
|
@ -95,7 +96,7 @@ func (f *PmsFactory) NewTtMsgStream(ctx context.Context) (MsgStream, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewMqTtMsgStream(ctx, f.ReceiveBufSize, f.PulsarBufSize, pulsarClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
return NewMqTtMsgStream(ctx, f.ReceiveBufSize, f.MQBufSize, pulsarClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *PmsFactory) getAuthentication() (pulsar.Authentication, error) {
|
func (f *PmsFactory) getAuthentication() (pulsar.Authentication, error) {
|
||||||
|
@ -152,16 +153,17 @@ type KmsFactory struct {
|
||||||
dispatcherFactory ProtoUDFactory
|
dispatcherFactory ProtoUDFactory
|
||||||
config *paramtable.KafkaConfig
|
config *paramtable.KafkaConfig
|
||||||
ReceiveBufSize int64
|
ReceiveBufSize int64
|
||||||
|
MQBufSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *KmsFactory) NewMsgStream(ctx context.Context) (MsgStream, error) {
|
func (f *KmsFactory) NewMsgStream(ctx context.Context) (MsgStream, error) {
|
||||||
kafkaClient := kafkawrapper.NewKafkaClientInstanceWithConfig(f.config)
|
kafkaClient := kafkawrapper.NewKafkaClientInstanceWithConfig(f.config)
|
||||||
return NewMqMsgStream(ctx, f.ReceiveBufSize, -1, kafkaClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
return NewMqMsgStream(ctx, f.ReceiveBufSize, f.MQBufSize, kafkaClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *KmsFactory) NewTtMsgStream(ctx context.Context) (MsgStream, error) {
|
func (f *KmsFactory) NewTtMsgStream(ctx context.Context) (MsgStream, error) {
|
||||||
kafkaClient := kafkawrapper.NewKafkaClientInstanceWithConfig(f.config)
|
kafkaClient := kafkawrapper.NewKafkaClientInstanceWithConfig(f.config)
|
||||||
return NewMqTtMsgStream(ctx, f.ReceiveBufSize, -1, kafkaClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
return NewMqTtMsgStream(ctx, f.ReceiveBufSize, f.MQBufSize, kafkaClient, f.dispatcherFactory.NewUnmarshalDispatcher())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *KmsFactory) NewQueryMsgStream(ctx context.Context) (MsgStream, error) {
|
func (f *KmsFactory) NewQueryMsgStream(ctx context.Context) (MsgStream, error) {
|
||||||
|
@ -180,11 +182,12 @@ func (f *KmsFactory) NewMsgStreamDisposer(ctx context.Context) func([]string, st
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewKmsFactory(config *paramtable.KafkaConfig) Factory {
|
func NewKmsFactory(config *paramtable.ServiceParam) Factory {
|
||||||
f := &KmsFactory{
|
f := &KmsFactory{
|
||||||
dispatcherFactory: ProtoUDFactory{},
|
dispatcherFactory: ProtoUDFactory{},
|
||||||
ReceiveBufSize: 1024,
|
ReceiveBufSize: config.MQCfg.ReceiveBufSize.GetAsInt64(),
|
||||||
config: config,
|
MQBufSize: config.MQCfg.MQBufSize.GetAsInt64(),
|
||||||
|
config: &config.KafkaCfg,
|
||||||
}
|
}
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
@ -192,11 +195,12 @@ func NewKmsFactory(config *paramtable.KafkaConfig) Factory {
|
||||||
// NewNatsmqFactory create a new nats-mq factory.
|
// NewNatsmqFactory create a new nats-mq factory.
|
||||||
func NewNatsmqFactory() Factory {
|
func NewNatsmqFactory() Factory {
|
||||||
paramtable.Init()
|
paramtable.Init()
|
||||||
nmq.MustInitNatsMQ(nmq.ParseServerOption(paramtable.Get()))
|
paramtable := paramtable.Get()
|
||||||
|
nmq.MustInitNatsMQ(nmq.ParseServerOption(paramtable))
|
||||||
return &CommonFactory{
|
return &CommonFactory{
|
||||||
Newer: nmq.NewClientWithDefaultOptions,
|
Newer: nmq.NewClientWithDefaultOptions,
|
||||||
DispatcherFactory: ProtoUDFactory{},
|
DispatcherFactory: ProtoUDFactory{},
|
||||||
ReceiveBufSize: 1024,
|
ReceiveBufSize: paramtable.MQCfg.ReceiveBufSize.GetAsInt64(),
|
||||||
MQBufSize: 1024,
|
MQBufSize: paramtable.MQCfg.MQBufSize.GetAsInt64(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ import (
|
||||||
|
|
||||||
func TestPmsFactory(t *testing.T) {
|
func TestPmsFactory(t *testing.T) {
|
||||||
Params.Init()
|
Params.Init()
|
||||||
pmsFactory := NewPmsFactory(&Params.PulsarCfg)
|
pmsFactory := NewPmsFactory(&Params.ServiceParam)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err := pmsFactory.NewMsgStream(ctx)
|
_, err := pmsFactory.NewMsgStream(ctx)
|
||||||
|
@ -42,7 +42,7 @@ func TestPmsFactory(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPmsFactoryWithAuth(t *testing.T) {
|
func TestPmsFactoryWithAuth(t *testing.T) {
|
||||||
config := &Params.PulsarCfg
|
config := &Params.ServiceParam
|
||||||
Params.Save(Params.PulsarCfg.AuthPlugin.Key, "token")
|
Params.Save(Params.PulsarCfg.AuthPlugin.Key, "token")
|
||||||
Params.Save(Params.PulsarCfg.AuthParams.Key, "token:fake_token")
|
Params.Save(Params.PulsarCfg.AuthParams.Key, "token:fake_token")
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -77,7 +77,7 @@ func TestPmsFactoryWithAuth(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKafkaFactory(t *testing.T) {
|
func TestKafkaFactory(t *testing.T) {
|
||||||
kmsFactory := NewKmsFactory(&Params.KafkaCfg)
|
kmsFactory := NewKmsFactory(&Params.ServiceParam)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err := kmsFactory.NewMsgStream(ctx)
|
_, err := kmsFactory.NewMsgStream(ctx)
|
||||||
|
|
|
@ -166,7 +166,7 @@ func (kc *kafkaClient) Subscribe(options mqwrapper.ConsumerOptions) (mqwrapper.C
|
||||||
metrics.MsgStreamOpCounter.WithLabelValues(metrics.CreateConsumerLabel, metrics.TotalLabel).Inc()
|
metrics.MsgStreamOpCounter.WithLabelValues(metrics.CreateConsumerLabel, metrics.TotalLabel).Inc()
|
||||||
|
|
||||||
config := kc.newConsumerConfig(options.SubscriptionName, options.SubscriptionInitialPosition)
|
config := kc.newConsumerConfig(options.SubscriptionName, options.SubscriptionInitialPosition)
|
||||||
consumer, err := newKafkaConsumer(config, options.Topic, options.SubscriptionName, options.SubscriptionInitialPosition)
|
consumer, err := newKafkaConsumer(config, options.BufSize, options.Topic, options.SubscriptionName, options.SubscriptionInitialPosition)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.MsgStreamOpCounter.WithLabelValues(metrics.CreateConsumerLabel, metrics.FailLabel).Inc()
|
metrics.MsgStreamOpCounter.WithLabelValues(metrics.CreateConsumerLabel, metrics.FailLabel).Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -29,8 +29,8 @@ type Consumer struct {
|
||||||
|
|
||||||
const timeout = 3000
|
const timeout = 3000
|
||||||
|
|
||||||
func newKafkaConsumer(config *kafka.ConfigMap, topic string, groupID string, position mqwrapper.SubscriptionInitialPosition) (*Consumer, error) {
|
func newKafkaConsumer(config *kafka.ConfigMap, bufSize int64, topic string, groupID string, position mqwrapper.SubscriptionInitialPosition) (*Consumer, error) {
|
||||||
msgChannel := make(chan mqwrapper.Message, 256)
|
msgChannel := make(chan mqwrapper.Message, bufSize)
|
||||||
kc := &Consumer{
|
kc := &Consumer{
|
||||||
config: config,
|
config: config,
|
||||||
msgChannel: msgChannel,
|
msgChannel: msgChannel,
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestKafkaConsumer_Subscription(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
kc, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
kc, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer kc.Close()
|
defer kc.Close()
|
||||||
assert.Equal(t, kc.Subscription(), groupID)
|
assert.Equal(t, kc.Subscription(), groupID)
|
||||||
|
@ -32,7 +32,7 @@ func TestKafkaConsumer_SeekExclusive(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ func TestKafkaConsumer_SeekInclusive(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ func TestKafkaConsumer_GetSeek(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ func TestKafkaConsumer_ChanWithNoAssign(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ func TestKafkaConsumer_SeekAfterChan(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ func TestKafkaConsumer_GetLatestMsgID(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionUnknown)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ func TestKafkaConsumer_ConsumeFromLatest(t *testing.T) {
|
||||||
testKafkaConsumerProduceData(t, topic, data1, data2)
|
testKafkaConsumerProduceData(t, topic, data1, data2)
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionLatest)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionLatest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
data1 = []int{444, 555}
|
data1 = []int{444, 555}
|
||||||
|
@ -211,7 +211,7 @@ func TestKafkaConsumer_ConsumeFromEarliest(t *testing.T) {
|
||||||
testKafkaConsumerProduceData(t, topic, data1, data2)
|
testKafkaConsumerProduceData(t, topic, data1, data2)
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
msg := <-consumer.Chan()
|
msg := <-consumer.Chan()
|
||||||
assert.Equal(t, 111, BytesToInt(msg.Payload()))
|
assert.Equal(t, 111, BytesToInt(msg.Payload()))
|
||||||
|
@ -220,7 +220,7 @@ func TestKafkaConsumer_ConsumeFromEarliest(t *testing.T) {
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
config = createConfig(groupID)
|
config = createConfig(groupID)
|
||||||
consumer2, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
consumer2, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
msg = <-consumer2.Chan()
|
msg = <-consumer2.Chan()
|
||||||
assert.Equal(t, 111, BytesToInt(msg.Payload()))
|
assert.Equal(t, 111, BytesToInt(msg.Payload()))
|
||||||
|
@ -262,7 +262,7 @@ func TestKafkaConsumer_CheckPreTopicValid(t *testing.T) {
|
||||||
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
topic := fmt.Sprintf("test-topicName-%d", rand.Int())
|
||||||
|
|
||||||
config := createConfig(groupID)
|
config := createConfig(groupID)
|
||||||
consumer, err := newKafkaConsumer(config, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
consumer, err := newKafkaConsumer(config, 16, topic, groupID, mqwrapper.SubscriptionPositionEarliest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer consumer.Close()
|
defer consumer.Close()
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPulsarMsgUtil(t *testing.T) {
|
func TestPulsarMsgUtil(t *testing.T) {
|
||||||
pmsFactory := NewPmsFactory(&Params.PulsarCfg)
|
pmsFactory := NewPmsFactory(&Params.ServiceParam)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
msgStream, err := pmsFactory.NewMsgStream(ctx)
|
msgStream, err := pmsFactory.NewMsgStream(ctx)
|
||||||
|
|
|
@ -1576,7 +1576,7 @@ func (p *queryNodeConfig) init(base *BaseTable) {
|
||||||
p.FlowGraphMaxQueueLength = ParamItem{
|
p.FlowGraphMaxQueueLength = ParamItem{
|
||||||
Key: "queryNode.dataSync.flowGraph.maxQueueLength",
|
Key: "queryNode.dataSync.flowGraph.maxQueueLength",
|
||||||
Version: "2.0.0",
|
Version: "2.0.0",
|
||||||
DefaultValue: "1024",
|
DefaultValue: "16",
|
||||||
Doc: "Maximum length of task queue in flowgraph",
|
Doc: "Maximum length of task queue in flowgraph",
|
||||||
Export: true,
|
Export: true,
|
||||||
}
|
}
|
||||||
|
@ -2370,7 +2370,7 @@ func (p *dataNodeConfig) init(base *BaseTable) {
|
||||||
p.FlowGraphMaxQueueLength = ParamItem{
|
p.FlowGraphMaxQueueLength = ParamItem{
|
||||||
Key: "dataNode.dataSync.flowGraph.maxQueueLength",
|
Key: "dataNode.dataSync.flowGraph.maxQueueLength",
|
||||||
Version: "2.0.0",
|
Version: "2.0.0",
|
||||||
DefaultValue: "1024",
|
DefaultValue: "16",
|
||||||
Doc: "Maximum length of task queue in flowgraph",
|
Doc: "Maximum length of task queue in flowgraph",
|
||||||
Export: true,
|
Export: true,
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,7 +289,7 @@ func TestComponentParam(t *testing.T) {
|
||||||
assert.Equal(t, 1000, interval)
|
assert.Equal(t, 1000, interval)
|
||||||
|
|
||||||
length := Params.FlowGraphMaxQueueLength.GetAsInt32()
|
length := Params.FlowGraphMaxQueueLength.GetAsInt32()
|
||||||
assert.Equal(t, int32(1024), length)
|
assert.Equal(t, int32(16), length)
|
||||||
|
|
||||||
maxParallelism := Params.FlowGraphMaxParallelism.GetAsInt32()
|
maxParallelism := Params.FlowGraphMaxParallelism.GetAsInt32()
|
||||||
assert.Equal(t, int32(1024), maxParallelism)
|
assert.Equal(t, int32(1024), maxParallelism)
|
||||||
|
|
|
@ -373,6 +373,9 @@ type MQConfig struct {
|
||||||
EnablePursuitMode ParamItem `refreshable:"true"`
|
EnablePursuitMode ParamItem `refreshable:"true"`
|
||||||
PursuitLag ParamItem `refreshable:"true"`
|
PursuitLag ParamItem `refreshable:"true"`
|
||||||
PursuitBufferSize ParamItem `refreshable:"true"`
|
PursuitBufferSize ParamItem `refreshable:"true"`
|
||||||
|
|
||||||
|
MQBufSize ParamItem `refreshable:"false"`
|
||||||
|
ReceiveBufSize ParamItem `refreshable:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes the MQConfig object with a BaseTable.
|
// Init initializes the MQConfig object with a BaseTable.
|
||||||
|
@ -413,6 +416,23 @@ Valid values: [default, pulsar, kafka, rocksmq, natsmq]`,
|
||||||
Export: true,
|
Export: true,
|
||||||
}
|
}
|
||||||
p.PursuitBufferSize.Init(base.mgr)
|
p.PursuitBufferSize.Init(base.mgr)
|
||||||
|
|
||||||
|
p.MQBufSize = ParamItem{
|
||||||
|
Key: "mq.mqBufSize",
|
||||||
|
Version: "2.3.0",
|
||||||
|
DefaultValue: "16",
|
||||||
|
Doc: `MQ client consumer buffer length`,
|
||||||
|
Export: true,
|
||||||
|
}
|
||||||
|
p.MQBufSize.Init(base.mgr)
|
||||||
|
|
||||||
|
p.ReceiveBufSize = ParamItem{
|
||||||
|
Key: "mq.receiveBufSize",
|
||||||
|
Version: "2.3.0",
|
||||||
|
DefaultValue: "16",
|
||||||
|
Doc: "MQ consumer chan buffer length",
|
||||||
|
}
|
||||||
|
p.ReceiveBufSize.Init(base.mgr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// /////////////////////////////////////////////////////////////////////////////
|
// /////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
Loading…
Reference in New Issue