mirror of https://github.com/milvus-io/milvus.git
enhance: cleanup lint check exclusions (#40829)
See: #40828 Signed-off-by: Ted Xu <ted.xu@zilliz.com>pull/39177/head
parent
901308df1e
commit
688505ab1c
|
@ -49,7 +49,7 @@ linters-settings:
|
|||
severity: warning
|
||||
disabled: false
|
||||
arguments:
|
||||
- ["ID"] # Allow list
|
||||
- ["ID", "IDS"] # Allow list
|
||||
- name: context-as-argument
|
||||
severity: warning
|
||||
disabled: false
|
||||
|
@ -182,7 +182,6 @@ issues:
|
|||
- ifElseChain
|
||||
- elseif
|
||||
- commentFormatting
|
||||
- var-naming
|
||||
- exitAfterDefer
|
||||
- captLocal
|
||||
- singleCaseSwitch
|
||||
|
@ -193,9 +192,6 @@ issues:
|
|||
- SA9009
|
||||
- SA1006
|
||||
- S1009
|
||||
- unlambda
|
||||
- dupCase
|
||||
- dupArg
|
||||
- offBy1
|
||||
- unslice
|
||||
# Integer overflow conversion
|
||||
|
|
|
@ -167,7 +167,6 @@ issues:
|
|||
- ifElseChain
|
||||
- elseif
|
||||
- commentFormatting
|
||||
- var-naming
|
||||
- exitAfterDefer
|
||||
- captLocal
|
||||
- singleCaseSwitch
|
||||
|
@ -178,9 +177,6 @@ issues:
|
|||
- SA9009
|
||||
- SA1006
|
||||
- S1009
|
||||
- unlambda
|
||||
- dupCase
|
||||
- dupArg
|
||||
- offBy1
|
||||
- unslice
|
||||
# Integer overflow conversion
|
||||
|
|
|
@ -355,7 +355,7 @@ func (t *searchTask) initAdvancedSearchRequest(ctx context.Context) error {
|
|||
// fetch search_growing from search param
|
||||
t.SearchRequest.SubReqs = make([]*internalpb.SubSearchRequest, len(t.request.GetSubReqs()))
|
||||
t.queryInfos = make([]*planpb.QueryInfo, len(t.request.GetSubReqs()))
|
||||
queryFieldIds := []int64{}
|
||||
queryFieldIDs := []int64{}
|
||||
for index, subReq := range t.request.GetSubReqs() {
|
||||
plan, queryInfo, offset, _, err := t.tryGeneratePlan(subReq.GetSearchParams(), subReq.GetDsl(), subReq.GetExprTemplateValues())
|
||||
if err != nil {
|
||||
|
@ -386,7 +386,7 @@ func (t *searchTask) initAdvancedSearchRequest(ctx context.Context) error {
|
|||
}
|
||||
|
||||
internalSubReq.FieldId = queryInfo.GetQueryFieldId()
|
||||
queryFieldIds = append(queryFieldIds, internalSubReq.FieldId)
|
||||
queryFieldIDs = append(queryFieldIDs, internalSubReq.FieldId)
|
||||
// set PartitionIDs for sub search
|
||||
if t.partitionKeyMode {
|
||||
// isolation has tighter constraint, check first
|
||||
|
@ -429,7 +429,7 @@ func (t *searchTask) initAdvancedSearchRequest(ctx context.Context) error {
|
|||
}
|
||||
|
||||
var err error
|
||||
if function.HasNonBM25Functions(t.schema.CollectionSchema.Functions, queryFieldIds) {
|
||||
if function.HasNonBM25Functions(t.schema.CollectionSchema.Functions, queryFieldIDs) {
|
||||
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-AdvancedSearch-call-function-udf")
|
||||
defer sp.End()
|
||||
exec, err := function.NewFunctionExecutor(t.schema.CollectionSchema)
|
||||
|
|
|
@ -65,8 +65,8 @@ type upsertTask struct {
|
|||
partitionKeyMode bool
|
||||
partitionKeys *schemapb.FieldData
|
||||
// automatic generate pk as new pk wehen autoID == true
|
||||
// delete task need use the oldIds
|
||||
oldIds *schemapb.IDs
|
||||
// delete task need use the oldIDs
|
||||
oldIDs *schemapb.IDs
|
||||
schemaTimestamp uint64
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ func (it *upsertTask) insertPreExecute(ctx context.Context) error {
|
|||
// use the passed pk as new pk when autoID == false
|
||||
// automatic generate pk as new pk wehen autoID == true
|
||||
var err error
|
||||
it.result.IDs, it.oldIds, err = checkUpsertPrimaryFieldData(it.schema.CollectionSchema, it.upsertMsg.InsertMsg)
|
||||
it.result.IDs, it.oldIDs, err = checkUpsertPrimaryFieldData(it.schema.CollectionSchema, it.upsertMsg.InsertMsg)
|
||||
log := log.Ctx(ctx).With(zap.String("collectionName", it.upsertMsg.InsertMsg.CollectionName))
|
||||
if err != nil {
|
||||
log.Warn("check primary field data and hash primary key failed when upsert",
|
||||
|
@ -507,7 +507,7 @@ func (it *upsertTask) deleteExecute(ctx context.Context, msgPack *msgstream.MsgP
|
|||
it.result.Status = merr.Status(err)
|
||||
return err
|
||||
}
|
||||
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIds
|
||||
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIDs
|
||||
it.upsertMsg.DeleteMsg.HashValues = typeutil.HashPK2Channels(it.upsertMsg.DeleteMsg.PrimaryKeys, channelNames)
|
||||
|
||||
// repack delete msg by dmChannel
|
||||
|
|
|
@ -96,7 +96,7 @@ func (ut *upsertTaskByStreamingService) packInsertMessage(ctx context.Context) (
|
|||
func (it *upsertTaskByStreamingService) packDeleteMessage(ctx context.Context) ([]message.MutableMessage, error) {
|
||||
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy deleteExecute upsert %d", it.ID()))
|
||||
collID := it.upsertMsg.DeleteMsg.CollectionID
|
||||
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIds
|
||||
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIDs
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", collID))
|
||||
// hash primary keys to channels
|
||||
|
|
|
@ -1709,12 +1709,12 @@ func checkUpsertPrimaryFieldData(schema *schemapb.CollectionSchema, insertMsg *m
|
|||
if !primaryFieldSchema.GetAutoID() {
|
||||
return ids, ids, nil
|
||||
}
|
||||
newIds, err := parsePrimaryFieldData2IDs(newPrimaryFieldData)
|
||||
newIDs, err := parsePrimaryFieldData2IDs(newPrimaryFieldData)
|
||||
if err != nil {
|
||||
log.Warn("parse primary field data to IDs failed", zap.Error(err))
|
||||
return nil, nil, err
|
||||
}
|
||||
return newIds, ids, nil
|
||||
return newIDs, ids, nil
|
||||
}
|
||||
|
||||
func getPartitionKeyFieldData(fieldSchema *schemapb.FieldSchema, insertMsg *msgstream.InsertMsg) (*schemapb.FieldData, error) {
|
||||
|
|
|
@ -779,7 +779,7 @@ var _ RecordWriter = (*multiFieldRecordWriter)(nil)
|
|||
|
||||
type multiFieldRecordWriter struct {
|
||||
fw *pqarrow.FileWriter
|
||||
fieldIds []FieldID
|
||||
fieldIDs []FieldID
|
||||
schema *arrow.Schema
|
||||
|
||||
numRows int
|
||||
|
@ -788,8 +788,8 @@ type multiFieldRecordWriter struct {
|
|||
|
||||
func (mfw *multiFieldRecordWriter) Write(r Record) error {
|
||||
mfw.numRows += r.Len()
|
||||
columns := make([]arrow.Array, len(mfw.fieldIds))
|
||||
for i, fieldId := range mfw.fieldIds {
|
||||
columns := make([]arrow.Array, len(mfw.fieldIDs))
|
||||
for i, fieldId := range mfw.fieldIDs {
|
||||
columns[i] = r.Column(fieldId)
|
||||
mfw.writtenUncompressed += uint64(CalculateArraySize(columns[i]))
|
||||
}
|
||||
|
@ -806,7 +806,7 @@ func (mfw *multiFieldRecordWriter) Close() error {
|
|||
return mfw.fw.Close()
|
||||
}
|
||||
|
||||
func newMultiFieldRecordWriter(fieldIds []FieldID, fields []arrow.Field, writer io.Writer) (*multiFieldRecordWriter, error) {
|
||||
func newMultiFieldRecordWriter(fieldIDs []FieldID, fields []arrow.Field, writer io.Writer) (*multiFieldRecordWriter, error) {
|
||||
schema := arrow.NewSchema(fields, nil)
|
||||
fw, err := pqarrow.NewFileWriter(schema, writer,
|
||||
parquet.NewWriterProperties(parquet.WithMaxRowGroupLength(math.MaxInt64)), // No additional grouping for now.
|
||||
|
@ -816,7 +816,7 @@ func newMultiFieldRecordWriter(fieldIds []FieldID, fields []arrow.Field, writer
|
|||
}
|
||||
return &multiFieldRecordWriter{
|
||||
fw: fw,
|
||||
fieldIds: fieldIds,
|
||||
fieldIDs: fieldIDs,
|
||||
schema: schema,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -1079,7 +1079,7 @@ func (dsw *MultiFieldDeltalogStreamWriter) GetRecordWriter() (RecordWriter, erro
|
|||
return dsw.rw, nil
|
||||
}
|
||||
|
||||
fieldIds := []FieldID{common.RowIDField, common.TimeStampField} // Not used.
|
||||
fieldIDs := []FieldID{common.RowIDField, common.TimeStampField} // Not used.
|
||||
fields := []arrow.Field{
|
||||
{
|
||||
Name: "pk",
|
||||
|
@ -1093,7 +1093,7 @@ func (dsw *MultiFieldDeltalogStreamWriter) GetRecordWriter() (RecordWriter, erro
|
|||
},
|
||||
}
|
||||
|
||||
rw, err := newMultiFieldRecordWriter(fieldIds, fields, &dsw.buf)
|
||||
rw, err := newMultiFieldRecordWriter(fieldIDs, fields, &dsw.buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (s *ResultCacheServerSuite) TestSend() {
|
|||
s.Equal(6, len(msg.GetIds().GetIntId().GetData()))
|
||||
}
|
||||
|
||||
func generateIntIds(num int) *schemapb.IDs {
|
||||
func generateIntIDs(num int) *schemapb.IDs {
|
||||
data := make([]int64, num)
|
||||
for i := 0; i < num; i++ {
|
||||
data[i] = int64(i)
|
||||
|
@ -77,7 +77,7 @@ func generateIntIds(num int) *schemapb.IDs {
|
|||
}
|
||||
}
|
||||
|
||||
func generateStrIds(num int) *schemapb.IDs {
|
||||
func generateStrIDs(num int) *schemapb.IDs {
|
||||
data := make([]string, num)
|
||||
for i := 0; i < num; i++ {
|
||||
data[i] = strconv.FormatInt(int64(i), 10)
|
||||
|
@ -98,7 +98,7 @@ func (s *ResultCacheServerSuite) TestSplit() {
|
|||
cacheSrv := NewResultCacheServer(srv, 1024, 1024)
|
||||
|
||||
err := cacheSrv.Send(&internalpb.RetrieveResults{
|
||||
Ids: generateIntIds(1024),
|
||||
Ids: generateIntIDs(1024),
|
||||
})
|
||||
s.NoError(err)
|
||||
|
||||
|
@ -130,7 +130,7 @@ func (s *ResultCacheServerSuite) TestSplit() {
|
|||
cacheSrv := NewResultCacheServer(srv, 1024, 1024)
|
||||
|
||||
err := cacheSrv.Send(&internalpb.RetrieveResults{
|
||||
Ids: generateStrIds(2048),
|
||||
Ids: generateStrIDs(2048),
|
||||
})
|
||||
s.NoError(err)
|
||||
|
||||
|
|
|
@ -56,12 +56,12 @@ func TestMain(m *testing.M) {
|
|||
paramtable.Init()
|
||||
Params = paramtable.Get()
|
||||
mockKafkaCluster, err := kafka.NewMockCluster(1)
|
||||
defer mockKafkaCluster.Close()
|
||||
if err != nil {
|
||||
// nolint
|
||||
fmt.Printf("Failed to create MockCluster: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer mockKafkaCluster.Close()
|
||||
broker := mockKafkaCluster.BootstrapServers()
|
||||
Params.Save("kafka.brokerList", broker)
|
||||
// Disable pursuit mode for unit test by default
|
||||
|
|
|
@ -27,12 +27,12 @@ var Params = paramtable.Get()
|
|||
func TestMain(m *testing.M) {
|
||||
paramtable.Init()
|
||||
mockCluster, err := kafka.NewMockCluster(1)
|
||||
defer mockCluster.Close()
|
||||
if err != nil {
|
||||
// nolint
|
||||
fmt.Printf("Failed to create MockCluster: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer mockCluster.Close()
|
||||
|
||||
broker := mockCluster.BootstrapServers()
|
||||
Params.Save("kafka.brokerList", broker)
|
||||
|
|
|
@ -31,19 +31,21 @@ import (
|
|||
var natsServerAddress string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
paramtable.Init()
|
||||
exitCode := func() int {
|
||||
paramtable.Init()
|
||||
storeDir, _ := os.MkdirTemp("", "milvus_mq_nmq")
|
||||
defer os.RemoveAll(storeDir)
|
||||
|
||||
storeDir, _ := os.MkdirTemp("", "milvus_mq_nmq")
|
||||
defer os.RemoveAll(storeDir)
|
||||
cfg := ParseServerOption(paramtable.Get())
|
||||
cfg.Opts.Port = server.RANDOM_PORT
|
||||
cfg.Opts.StoreDir = storeDir
|
||||
MustInitNatsMQ(cfg)
|
||||
defer CloseNatsMQ()
|
||||
|
||||
cfg := ParseServerOption(paramtable.Get())
|
||||
cfg.Opts.Port = server.RANDOM_PORT
|
||||
cfg.Opts.StoreDir = storeDir
|
||||
MustInitNatsMQ(cfg)
|
||||
defer CloseNatsMQ()
|
||||
natsServerAddress = Nmq.ClientURL()
|
||||
return m.Run()
|
||||
}()
|
||||
|
||||
natsServerAddress = Nmq.ClientURL()
|
||||
exitCode := m.Run()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
|
|
|
@ -35,33 +35,36 @@ import (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
paramtable.Init()
|
||||
pt := paramtable.Get()
|
||||
pt.Save(pt.ServiceParam.MQCfg.EnablePursuitMode.Key, "false")
|
||||
exitCode := func() int {
|
||||
paramtable.Init()
|
||||
pt := paramtable.Get()
|
||||
pt.Save(pt.ServiceParam.MQCfg.EnablePursuitMode.Key, "false")
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
path := "/tmp/milvus/rdb_data"
|
||||
defer os.RemoveAll(path)
|
||||
paramtable.Get().Save("rocksmq.compressionTypes", "0,0,0,0,0")
|
||||
_ = server2.InitRocksMQ(path)
|
||||
defer server2.CloseRocksMQ()
|
||||
return m.Run()
|
||||
}()
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
path := "/tmp/milvus/rdb_data"
|
||||
defer os.RemoveAll(path)
|
||||
paramtable.Get().Save("rocksmq.compressionTypes", "0,0,0,0,0")
|
||||
_ = server2.InitRocksMQ(path)
|
||||
exitCode := m.Run()
|
||||
defer server2.CloseRocksMQ()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func Test_NewRmqClient(t *testing.T) {
|
||||
client, err := createRmqClient()
|
||||
defer client.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
client.Close()
|
||||
}
|
||||
|
||||
func TestRmqClient_CreateProducer(t *testing.T) {
|
||||
opts := client3.Options{}
|
||||
client, err := NewClient(opts)
|
||||
defer client.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
defer client.Close()
|
||||
|
||||
topic := "TestRmqClient_CreateProducer"
|
||||
proOpts := common.ProducerOptions{Topic: topic}
|
||||
|
@ -143,9 +146,9 @@ func TestRmqClient_GetLatestMsg(t *testing.T) {
|
|||
|
||||
func TestRmqClient_Subscribe(t *testing.T) {
|
||||
client, err := createRmqClient()
|
||||
defer client.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
defer client.Close()
|
||||
|
||||
topic := "TestRmqClient_Subscribe"
|
||||
proOpts := common.ProducerOptions{Topic: topic}
|
||||
|
@ -178,9 +181,9 @@ func TestRmqClient_Subscribe(t *testing.T) {
|
|||
|
||||
consumerOpts.Topic = topic
|
||||
consumer, err = client.Subscribe(context.TODO(), consumerOpts)
|
||||
defer consumer.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, consumer)
|
||||
defer consumer.Close()
|
||||
assert.Equal(t, consumer.Subscription(), subName)
|
||||
|
||||
msg := &common.ProducerMessage{
|
||||
|
|
|
@ -43,13 +43,13 @@ func Test_SliceContain(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range cases {
|
||||
switch test.item.(type) {
|
||||
switch v := test.item.(type) {
|
||||
case string:
|
||||
if got := SliceContain(test.s.([]string), test.item.(string)); got != test.want {
|
||||
if got := SliceContain(test.s.([]string), v); got != test.want {
|
||||
t.Errorf("SliceContain(%v, %v) = %v", test.s, test.item, test.want)
|
||||
}
|
||||
case int:
|
||||
if got := SliceContain(test.s.([]int), test.item.(int)); got != test.want {
|
||||
if got := SliceContain(test.s.([]int), v); got != test.want {
|
||||
t.Errorf("SliceContain(%v, %v) = %v", test.s, test.item, test.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,9 +78,7 @@ func TestServerIDInterceptor(t *testing.T) {
|
|||
return nil, nil
|
||||
}
|
||||
serverInfo := &grpc.UnaryServerInfo{FullMethod: method}
|
||||
interceptor := ServerIDValidationUnaryServerInterceptor(func() int64 {
|
||||
return paramtable.GetNodeID()
|
||||
})
|
||||
interceptor := ServerIDValidationUnaryServerInterceptor(paramtable.GetNodeID)
|
||||
|
||||
// no md in context
|
||||
_, err := interceptor(context.Background(), req, serverInfo, handler)
|
||||
|
@ -114,9 +112,7 @@ func TestServerIDInterceptor(t *testing.T) {
|
|||
handler := func(srv interface{}, stream grpc.ServerStream) error {
|
||||
return nil
|
||||
}
|
||||
interceptor := ServerIDValidationStreamServerInterceptor(func() int64 {
|
||||
return paramtable.GetNodeID()
|
||||
})
|
||||
interceptor := ServerIDValidationStreamServerInterceptor(paramtable.GetNodeID)
|
||||
|
||||
// no md in context
|
||||
err := interceptor(nil, newMockSS(context.Background()), nil, handler)
|
||||
|
|
|
@ -154,7 +154,7 @@ func oldCode(code int32) commonpb.ErrorCode {
|
|||
case ErrNodeNotMatch.code():
|
||||
return commonpb.ErrorCode_NodeIDNotMatch
|
||||
|
||||
case ErrCollectionNotFound.code(), ErrPartitionNotFound.code(), ErrReplicaNotFound.code():
|
||||
case ErrPartitionNotFound.code(), ErrReplicaNotFound.code():
|
||||
return commonpb.ErrorCode_MetaFailed
|
||||
|
||||
case ErrReplicaNotAvailable.code(), ErrChannelNotAvailable.code(), ErrNodeNotAvailable.code():
|
||||
|
|
|
@ -100,7 +100,6 @@ func (s *ChannelSuite) TestCompare() {
|
|||
channel3, err := ParseChannel(virtualName1, mapper)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.True(channel1.Equal(channel1))
|
||||
s.False(channel1.Equal(channel2))
|
||||
s.False(channel2.Equal(channel1))
|
||||
s.True(channel1.Equal(channel3))
|
||||
|
|
|
@ -1558,11 +1558,11 @@ func ComparePKInSlice(data *schemapb.IDs, i, j int) bool {
|
|||
|
||||
// ComparePK returns if i-th PK of dataA > j-th PK of dataB
|
||||
func ComparePK(pkA, pkB interface{}) bool {
|
||||
switch pkA.(type) {
|
||||
switch v := pkA.(type) {
|
||||
case int64:
|
||||
return pkA.(int64) < pkB.(int64)
|
||||
return v < pkB.(int64)
|
||||
case string:
|
||||
return pkA.(string) < pkB.(string)
|
||||
return v < pkB.(string)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -167,7 +167,6 @@ issues:
|
|||
- ifElseChain
|
||||
- elseif
|
||||
- commentFormatting
|
||||
- var-naming
|
||||
- exitAfterDefer
|
||||
- captLocal
|
||||
- singleCaseSwitch
|
||||
|
@ -178,9 +177,6 @@ issues:
|
|||
- SA9009
|
||||
- SA1006
|
||||
- S1009
|
||||
- unlambda
|
||||
- dupCase
|
||||
- dupArg
|
||||
- offBy1
|
||||
- unslice
|
||||
# Integer overflow conversion
|
||||
|
|
Loading…
Reference in New Issue