Fix copylocks linter errors (#26217)

Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
pull/26138/head
congqixia 2023-08-08 20:41:15 +08:00 committed by GitHub
parent 99e0651c54
commit b9850ce5c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 37 additions and 37 deletions

View File

@ -327,9 +327,9 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
t.Run("Merge without expiration2", func(t *testing.T) {
mockbIO := &binlogIO{cm, alloc}
paramtable.Get().Save(Params.CommonCfg.EntityExpirationTTL.Key, "0")
BinLogMaxSize := Params.DataNodeCfg.BinLogMaxSize
BinLogMaxSize := Params.DataNodeCfg.BinLogMaxSize.GetValue()
defer func() {
Params.DataNodeCfg.BinLogMaxSize = BinLogMaxSize
Params.Save(Params.DataNodeCfg.BinLogMaxSize.Key, BinLogMaxSize)
}()
paramtable.Get().Save(Params.DataNodeCfg.BinLogMaxSize.Key, "128")
iData := genInsertDataWithExpiredTS()

View File

@ -401,10 +401,10 @@ func TestFlowGraphInsertBufferNode_AutoFlush(t *testing.T) {
t.Run("Pure auto flush", func(t *testing.T) {
// iBNode.insertBuffer.maxSize = 2
tmp := Params.DataNodeCfg.FlushInsertBufferSize
tmp := Params.DataNodeCfg.FlushInsertBufferSize.GetValue()
paramtable.Get().Save(Params.DataNodeCfg.FlushInsertBufferSize.Key, "200")
defer func() {
Params.DataNodeCfg.FlushInsertBufferSize = tmp
Params.Save(Params.DataNodeCfg.FlushInsertBufferSize.Key, tmp)
}()
for i := range inMsg.insertMessages {
@ -493,10 +493,10 @@ func TestFlowGraphInsertBufferNode_AutoFlush(t *testing.T) {
})
t.Run("Auto with manual flush", func(t *testing.T) {
tmp := Params.DataNodeCfg.FlushInsertBufferSize
tmp := Params.DataNodeCfg.FlushInsertBufferSize.GetValue()
paramtable.Get().Save(Params.DataNodeCfg.FlushInsertBufferSize.Key, "200")
defer func() {
Params.DataNodeCfg.FlushInsertBufferSize = tmp
Params.Save(Params.DataNodeCfg.FlushInsertBufferSize.Key, tmp)
}()
fpMut.Lock()
@ -648,10 +648,10 @@ func TestInsertBufferNodeRollBF(t *testing.T) {
var iMsg flowgraph.Msg = &inMsg
t.Run("Pure roll BF", func(t *testing.T) {
tmp := Params.DataNodeCfg.FlushInsertBufferSize
tmp := Params.DataNodeCfg.FlushInsertBufferSize.GetValue()
paramtable.Get().Save(Params.DataNodeCfg.FlushInsertBufferSize.Key, "200")
defer func() {
Params.DataNodeCfg.FlushInsertBufferSize = tmp
Params.Save(Params.DataNodeCfg.FlushInsertBufferSize.Key, tmp)
}()
inMsg.startPositions = []*msgpb.MsgPosition{{Timestamp: 100}}

View File

@ -11,9 +11,9 @@ import (
)
func Test_getOrCreateIOPool(t *testing.T) {
ioConcurrency := Params.DataNodeCfg.IOConcurrency
ioConcurrency := Params.DataNodeCfg.IOConcurrency.GetValue()
paramtable.Get().Save(Params.DataNodeCfg.IOConcurrency.Key, "64")
defer func() { Params.DataNodeCfg.IOConcurrency = ioConcurrency }()
defer func() { Params.Save(Params.DataNodeCfg.IOConcurrency.Key, ioConcurrency) }()
nP := 10
nTask := 10
wg := sync.WaitGroup{}

View File

@ -367,7 +367,7 @@ func (s *Server) init() error {
etcdConfig := &paramtable.Get().EtcdCfg
Params := &paramtable.Get().ProxyGrpcServerCfg
log.Debug("Proxy init service's parameter table done")
HTTPParams := paramtable.Get().HTTPCfg
HTTPParams := &paramtable.Get().HTTPCfg
log.Debug("Proxy init http server's parameter table done")
if !funcutil.CheckPortAvailable(Params.Port.GetAsInt()) {

View File

@ -45,7 +45,7 @@ func NewClient(ctx context.Context, addr string) (*Client, error) {
if addr == "" {
return nil, fmt.Errorf("addr is empty")
}
clientParams := paramtable.Get().QueryNodeGrpcClientCfg
clientParams := &paramtable.Get().QueryNodeGrpcClientCfg
client := &Client{
addr: addr,
grpcClient: &grpcclient.ClientBase[querypb.QueryNodeClient]{

View File

@ -110,7 +110,7 @@ func (i *IndexNode) waitTaskFinish() {
return
}
gracefulTimeout := Params.IndexNodeCfg.GracefulStopTimeout
gracefulTimeout := &Params.IndexNodeCfg.GracefulStopTimeout
ticker := time.NewTicker(time.Second)
defer ticker.Stop()

View File

@ -37,7 +37,7 @@ import (
func TestMultiRateLimiter(t *testing.T) {
collectionID := int64(1)
t.Run("test multiRateLimiter", func(t *testing.T) {
bak := Params.QuotaConfig.QuotaAndLimitsEnabled
bak := Params.QuotaConfig.QuotaAndLimitsEnabled.GetValue()
paramtable.Get().Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, "true")
multiLimiter := NewMultiRateLimiter()
multiLimiter.collectionLimiters[collectionID] = newRateLimiter(false)
@ -66,11 +66,11 @@ func TestMultiRateLimiter(t *testing.T) {
}
}
Params.QuotaConfig.QuotaAndLimitsEnabled = bak
Params.Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, bak)
})
t.Run("test global static limit", func(t *testing.T) {
bak := Params.QuotaConfig.QuotaAndLimitsEnabled
bak := Params.QuotaConfig.QuotaAndLimitsEnabled.GetValue()
paramtable.Get().Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, "true")
multiLimiter := NewMultiRateLimiter()
multiLimiter.collectionLimiters[1] = newRateLimiter(false)
@ -103,32 +103,32 @@ func TestMultiRateLimiter(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_RateLimit, errCode)
}
}
Params.QuotaConfig.QuotaAndLimitsEnabled = bak
Params.Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, bak)
})
t.Run("not enable quotaAndLimit", func(t *testing.T) {
multiLimiter := NewMultiRateLimiter()
multiLimiter.collectionLimiters[collectionID] = newRateLimiter(false)
bak := Params.QuotaConfig.QuotaAndLimitsEnabled
bak := Params.QuotaConfig.QuotaAndLimitsEnabled.GetValue()
paramtable.Get().Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, "false")
for _, rt := range internalpb.RateType_value {
errCode := multiLimiter.Check(collectionID, internalpb.RateType(rt), 1)
assert.Equal(t, commonpb.ErrorCode_Success, errCode)
}
Params.QuotaConfig.QuotaAndLimitsEnabled = bak
Params.Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, bak)
})
t.Run("test limit", func(t *testing.T) {
run := func(insertRate float64) {
bakInsertRate := Params.QuotaConfig.DMLMaxInsertRate
bakInsertRate := Params.QuotaConfig.DMLMaxInsertRate.GetValue()
paramtable.Get().Save(Params.QuotaConfig.DMLMaxInsertRate.Key, fmt.Sprintf("%f", insertRate))
multiLimiter := NewMultiRateLimiter()
bak := Params.QuotaConfig.QuotaAndLimitsEnabled
bak := Params.QuotaConfig.QuotaAndLimitsEnabled.GetValue()
paramtable.Get().Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, "true")
errCode := multiLimiter.Check(collectionID, internalpb.RateType_DMLInsert, 1*1024*1024)
assert.Equal(t, commonpb.ErrorCode_Success, errCode)
Params.QuotaConfig.QuotaAndLimitsEnabled = bak
Params.QuotaConfig.DMLMaxInsertRate = bakInsertRate
Params.Save(Params.QuotaConfig.QuotaAndLimitsEnabled.Key, bak)
Params.Save(Params.QuotaConfig.DMLMaxInsertRate.Key, bakInsertRate)
}
run(math.MaxFloat64)
run(math.MaxFloat64 / 1.2)

View File

@ -865,9 +865,9 @@ func TestPasswordVerify(t *testing.T) {
}
func TestValidateTravelTimestamp(t *testing.T) {
originalRetentionDuration := Params.CommonCfg.RetentionDuration
originalRetentionDuration := Params.CommonCfg.RetentionDuration.GetValue()
defer func() {
Params.CommonCfg.RetentionDuration = originalRetentionDuration
Params.Save(Params.CommonCfg.RetentionDuration.Key, originalRetentionDuration)
}()
travelTs := tsoutil.GetCurrentTime()

View File

@ -182,7 +182,7 @@ func TestQuotaCenter(t *testing.T) {
{10 * time.Second, t0.Add(100 * time.Second), t0, 0},
}
backup := Params.QuotaConfig.MaxTimeTickDelay
backup := Params.QuotaConfig.MaxTimeTickDelay.GetValue()
for _, c := range ttCases {
paramtable.Get().Save(Params.QuotaConfig.MaxTimeTickDelay.Key, fmt.Sprintf("%f", c.maxTtDelay.Seconds()))
@ -203,7 +203,7 @@ func TestQuotaCenter(t *testing.T) {
}
}
Params.QuotaConfig.MaxTimeTickDelay = backup
Params.Save(Params.QuotaConfig.MaxTimeTickDelay.Key, backup)
})
t.Run("test TimeTickDelayFactor factors", func(t *testing.T) {
@ -226,7 +226,7 @@ func TestQuotaCenter(t *testing.T) {
{100 * time.Second, 0},
}
backup := Params.QuotaConfig.MaxTimeTickDelay
backup := Params.QuotaConfig.MaxTimeTickDelay.GetValue()
paramtable.Get().Save(Params.QuotaConfig.DMLLimitEnabled.Key, "true")
paramtable.Get().Save(Params.QuotaConfig.TtProtectionEnabled.Key, "true")
paramtable.Get().Save(Params.QuotaConfig.MaxTimeTickDelay.Key, "10.0")
@ -276,7 +276,7 @@ func TestQuotaCenter(t *testing.T) {
deleteFactor := float64(quotaCenter.currentRates[1][internalpb.RateType_DMLDelete]) / Params.QuotaConfig.DMLMaxInsertRatePerCollection.GetAsFloat()
assert.Equal(t, c.expectedFactor, deleteFactor)
}
Params.QuotaConfig.MaxTimeTickDelay = backup
Params.Save(Params.QuotaConfig.MaxTimeTickDelay.Key, backup)
})
t.Run("test calculateReadRates", func(t *testing.T) {
@ -496,7 +496,7 @@ func TestQuotaCenter(t *testing.T) {
quotaCenter.checkDiskQuota()
// total DiskQuota exceeded
quotaBackup := Params.QuotaConfig.DiskQuota
quotaBackup := Params.QuotaConfig.DiskQuota.GetValue()
paramtable.Get().Save(Params.QuotaConfig.DiskQuota.Key, "99")
quotaCenter.dataCoordMetrics = &metricsinfo.DataCoordQuotaMetrics{
TotalBinlogSize: 200 * 1024 * 1024,
@ -509,10 +509,10 @@ func TestQuotaCenter(t *testing.T) {
assert.Equal(t, Limit(0), quotaCenter.currentRates[collection][internalpb.RateType_DMLUpsert])
assert.Equal(t, Limit(0), quotaCenter.currentRates[collection][internalpb.RateType_DMLDelete])
}
paramtable.Get().Save(Params.QuotaConfig.DiskQuota.Key, quotaBackup.GetValue())
paramtable.Get().Save(Params.QuotaConfig.DiskQuota.Key, quotaBackup)
// collection DiskQuota exceeded
colQuotaBackup := Params.QuotaConfig.DiskQuotaPerCollection
colQuotaBackup := Params.QuotaConfig.DiskQuotaPerCollection.GetValue()
paramtable.Get().Save(Params.QuotaConfig.DiskQuotaPerCollection.Key, "30")
quotaCenter.dataCoordMetrics = &metricsinfo.DataCoordQuotaMetrics{CollectionBinlogSize: map[int64]int64{
1: 20 * 1024 * 1024, 2: 30 * 1024 * 1024, 3: 60 * 1024 * 1024}}
@ -528,7 +528,7 @@ func TestQuotaCenter(t *testing.T) {
assert.Equal(t, Limit(0), quotaCenter.currentRates[3][internalpb.RateType_DMLInsert])
assert.Equal(t, Limit(0), quotaCenter.currentRates[3][internalpb.RateType_DMLUpsert])
assert.Equal(t, Limit(0), quotaCenter.currentRates[3][internalpb.RateType_DMLDelete])
paramtable.Get().Save(Params.QuotaConfig.DiskQuotaPerCollection.Key, colQuotaBackup.GetValue())
paramtable.Get().Save(Params.QuotaConfig.DiskQuotaPerCollection.Key, colQuotaBackup)
})
t.Run("test setRates", func(t *testing.T) {
@ -599,8 +599,8 @@ func TestQuotaCenter(t *testing.T) {
meta.EXPECT().GetCollectionByID(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, merr.ErrCollectionNotFound).Maybe()
quotaCenter := NewQuotaCenter(pcm, nil, &dataCoordMockForQuota{}, core.tsoAllocator, meta)
quotaCenter.resetAllCurrentRates()
quotaBackup := Params.QuotaConfig.DiskQuota
colQuotaBackup := Params.QuotaConfig.DiskQuotaPerCollection
quotaBackup := Params.QuotaConfig.DiskQuota.GetValue()
colQuotaBackup := Params.QuotaConfig.DiskQuotaPerCollection.GetValue()
paramtable.Get().Save(Params.QuotaConfig.DiskQuota.Key, test.totalDiskQuota)
paramtable.Get().Save(Params.QuotaConfig.DiskQuotaPerCollection.Key, test.collDiskQuota)
quotaCenter.diskMu.Lock()
@ -610,8 +610,8 @@ func TestQuotaCenter(t *testing.T) {
quotaCenter.diskMu.Unlock()
allowance := quotaCenter.diskAllowance(collection)
assert.Equal(t, test.expectAllowance, allowance)
paramtable.Get().Save(Params.QuotaConfig.DiskQuota.Key, quotaBackup.GetValue())
paramtable.Get().Save(Params.QuotaConfig.DiskQuotaPerCollection.Key, colQuotaBackup.GetValue())
paramtable.Get().Save(Params.QuotaConfig.DiskQuota.Key, quotaBackup)
paramtable.Get().Save(Params.QuotaConfig.DiskQuotaPerCollection.Key, colQuotaBackup)
})
}
})