enhance: Add nilness linter and fix some small issues (#34049)

Add `nilness` for govet linter and fixed some detected issues

Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
pull/34092/head
congqixia 2024-06-24 14:52:03 +08:00 committed by GitHub
parent a1248a19f8
commit fd922d921a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 10 additions and 26 deletions

View File

@ -41,6 +41,9 @@ linters-settings:
- default
- prefix(github.com/milvus-io)
custom-order: true
govet:
enable: # add extra linters
- nilness
gofumpt:
lang-version: "1.18"
module-path: github.com/milvus-io

View File

@ -74,10 +74,6 @@ func (at *analyzeTask) Execute(ctx context.Context) error {
zap.Int64("partitionID", at.req.GetPartitionID()), zap.Int64("fieldID", at.req.GetFieldID()))
log.Info("Begin to build analyze task")
if err != nil {
log.Warn("create analyze info failed", zap.Error(err))
return err
}
storageConfig := &clusteringpb.StorageConfig{
Address: at.req.GetStorageConfig().GetAddress(),
@ -103,19 +99,11 @@ func (at *analyzeTask) Execute(ctx context.Context) error {
numRows := stats.GetNumRows()
numRowsMap[segID] = numRows
log.Info("append segment rows", zap.Int64("segment id", segID), zap.Int64("rows", numRows))
if err != nil {
log.Warn("append segment num rows failed", zap.Error(err))
return err
}
insertFiles := make([]string, 0, len(stats.GetLogIDs()))
for _, id := range stats.GetLogIDs() {
path := metautil.BuildInsertLogPath(at.req.GetStorageConfig().RootPath,
at.req.GetCollectionID(), at.req.GetPartitionID(), segID, at.req.GetFieldID(), id)
insertFiles = append(insertFiles, path)
if err != nil {
log.Warn("append insert binlog path failed", zap.Error(err))
return err
}
}
segmentInsertFilesMap[segID] = &clusteringpb.InsertFiles{InsertFiles: insertFiles}
}

View File

@ -92,7 +92,7 @@ func (m *SimpleLimiter) Check(dbID int64, collectionIDToPartIDs map[int64][]int6
}
// 2. check database level rate limits
if ret == nil && dbID != util.InvalidDBID {
if dbID != util.InvalidDBID {
dbRateLimiters := m.rateLimiter.GetOrCreateDatabaseLimiters(dbID, newDatabaseLimiter)
ret = dbRateLimiters.Check(rt, n)
if ret != nil {

View File

@ -465,7 +465,7 @@ func SaveBinLog(ctx context.Context,
k := JoinIDPath(collectionID, partitionID, segmentID, fieldID)
key := path.Join(chunkManager.RootPath(), "stats-log", k)
kvs[key] = blob.Value[:]
kvs[key] = blob.Value
statsBinlog = append(statsBinlog, &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []*datapb.Binlog{{LogPath: key}},
@ -653,7 +653,7 @@ func SaveDeltaLog(collectionID int64,
key := JoinIDPath(collectionID, partitionID, segmentID, pkFieldID)
// keyPath := path.Join(defaultLocalStorage, "delta-log", key)
keyPath := path.Join(cm.RootPath(), "delta-log", key)
kvs[keyPath] = blob.Value[:]
kvs[keyPath] = blob.Value
fieldBinlog = append(fieldBinlog, &datapb.FieldBinlog{
FieldID: pkFieldID,
Binlogs: []*datapb.Binlog{{

View File

@ -174,13 +174,6 @@ func (b *BlobReader) Seek(offset int64, whence int) (int64, error) {
}
func (AzureObjectStorage *AzureObjectStorage) GetObject(ctx context.Context, bucketName, objectName string, offset int64, size int64) (FileReader, error) {
opts := azblob.DownloadStreamOptions{}
if offset > 0 {
opts.Range = azblob.HTTPRange{
Offset: offset,
Count: size,
}
}
return NewBlobReader(AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName), offset)
}

View File

@ -441,10 +441,10 @@ func TestPrintDDFiles(t *testing.T) {
dropPartitionString, err := proto.Marshal(&dropPartitionReq)
assert.NoError(t, err)
ddRequests := []string{
string(createCollString[:]),
string(dropCollString[:]),
string(createPartitionString[:]),
string(dropPartitionString[:]),
string(createCollString),
string(dropCollString),
string(createPartitionString),
string(dropPartitionString),
}
eventTypeCodes := []EventTypeCode{
CreateCollectionEventType,