Check error by Error() and NoError() for better report message (#24736)

Signed-off-by: yah01 <yang.cen@zilliz.com>
pull/24623/head
yah01 2023-06-08 15:36:36 +08:00 committed by GitHub
parent bc2112354d
commit ebd0279d3f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
146 changed files with 2987 additions and 2987 deletions

View File

@ -83,32 +83,32 @@ func TestGlobalTSOAllocator_All(t *testing.T) {
t.Run("Initialize", func(t *testing.T) {
err := gTestIDAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("AllocOne", func(t *testing.T) {
one, err := gTestIDAllocator.AllocOne()
assert.Nil(t, err)
assert.NoError(t, err)
ano, err := gTestIDAllocator.AllocOne()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotEqual(t, one, ano)
})
t.Run("Alloc", func(t *testing.T) {
count := uint32(2 << 10)
idStart, idEnd, err := gTestIDAllocator.Alloc(count)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, count, uint32(idEnd-idStart))
})
t.Run("Alloc2", func(t *testing.T) {
count1 := uint32(2 << 18)
id1, err := gTestIDAllocator.allocator.GenerateTSO(count1)
assert.Nil(t, err)
assert.NoError(t, err)
count2 := uint32(2 << 8)
id2, err := gTestIDAllocator.allocator.GenerateTSO(count2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id2-id1, uint64(count2))
})
}

View File

@ -49,21 +49,21 @@ func TestIDAllocator(t *testing.T) {
mockIDAllocator := newMockIDAllocator()
idAllocator, err := NewIDAllocator(ctx, mockIDAllocator, int64(1))
assert.Nil(t, err)
assert.NoError(t, err)
err = idAllocator.Start()
assert.Nil(t, err)
assert.NoError(t, err)
idStart, idEnd, err := idAllocator.Alloc(20000)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, idStart, int64(1))
assert.Equal(t, idEnd, int64(20001))
id, err := idAllocator.AllocOne()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, int64(20001))
id, err = idAllocator.AllocOne()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, int64(20002))
}

View File

@ -44,7 +44,7 @@ func TestAllocator_Basic(t *testing.T) {
ms := newMockRootCoordService()
allocator := newRootCoordAllocator(ms)
err := ms.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
_, err = allocator.allocTimestamp(ctx)
assert.Error(t, err)

View File

@ -913,7 +913,7 @@ func TestChannelManager_Reload(t *testing.T) {
defer cancel()
cm, err := NewChannelManager(metakv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Nil(t, cm.AddNode(1))
assert.Nil(t, cm.AddNode(2))
cm.store = &ChannelStore{
@ -934,7 +934,7 @@ func TestChannelManager_Reload(t *testing.T) {
require.NoError(t, err)
cm2, err := NewChannelManager(metakv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Nil(t, cm2.Startup(ctx, []int64{3}))
waitAndCheckState(t, metakv, datapb.ChannelWatchState_ToWatch, 3, "channel1", 1)

View File

@ -55,7 +55,7 @@ func TestClusterCreate(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
@ -65,7 +65,7 @@ func TestClusterCreate(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
dataNodes := sessionManager.GetSessions()
assert.EqualValues(t, 1, len(dataNodes))
assert.EqualValues(t, "localhost:8080", dataNodes[0].info.Address)
@ -85,18 +85,18 @@ func TestClusterCreate(t *testing.T) {
},
}
info1Data, err := proto.Marshal(info1)
assert.Nil(t, err)
assert.NoError(t, err)
err = kv.Save(Params.CommonCfg.DataCoordWatchSubPath.GetValue()+"/1/channel1", string(info1Data))
assert.Nil(t, err)
assert.NoError(t, err)
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
err = cluster.Startup(ctx, []*NodeInfo{{NodeID: 1, Address: "localhost:9999"}})
assert.Nil(t, err)
assert.NoError(t, err)
channels := channelManager.GetChannels()
assert.EqualValues(t, []*NodeChannelInfo{{1, []*channel{{Name: "channel1", CollectionID: 1}}}}, channels)
@ -110,7 +110,7 @@ func TestClusterCreate(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
addr := "localhost:8080"
@ -120,10 +120,10 @@ func TestClusterCreate(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.UnRegister(info)
assert.Nil(t, err)
assert.NoError(t, err)
sessions := sessionManager.GetSessions()
assert.Empty(t, sessions)
@ -131,7 +131,7 @@ func TestClusterCreate(t *testing.T) {
sessionManager2 := NewSessionManager()
channelManager2, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
clusterReload := NewCluster(sessionManager2, channelManager2)
defer clusterReload.Close()
@ -142,7 +142,7 @@ func TestClusterCreate(t *testing.T) {
}
nodes = []*NodeInfo{info}
err = clusterReload.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
sessions = sessionManager2.GetSessions()
assert.EqualValues(t, 1, len(sessions))
assert.EqualValues(t, 2, sessions[0].info.NodeID)
@ -157,7 +157,7 @@ func TestClusterCreate(t *testing.T) {
fkv := &loadPrefixFailKV{MetaKv: kv}
_, err := NewChannelManager(fkv, newMockHandler())
assert.NotNil(t, err)
assert.Error(t, err)
})
}
@ -186,18 +186,18 @@ func TestRegister(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
err = cluster.Startup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
info := &NodeInfo{
NodeID: 1,
Address: addr,
}
err = cluster.Register(info)
assert.Nil(t, err)
assert.NoError(t, err)
sessions := sessionManager.GetSessions()
assert.EqualValues(t, 1, len(sessions))
assert.EqualValues(t, "localhost:8080", sessions[0].info.Address)
@ -211,23 +211,23 @@ func TestRegister(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
err = channelManager.Watch(&channel{
Name: "ch1",
CollectionID: 0,
})
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
err = cluster.Startup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
info := &NodeInfo{
NodeID: 1,
Address: addr,
}
err = cluster.Register(info)
assert.Nil(t, err)
assert.NoError(t, err)
bufferChannels := channelManager.GetBufferChannels()
assert.Empty(t, bufferChannels.Channels)
nodeChannels := channelManager.GetChannels()
@ -244,22 +244,22 @@ func TestRegister(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
addr := "localhost:8080"
err = cluster.Startup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
info := &NodeInfo{
NodeID: 1,
Address: addr,
}
err = cluster.Register(info)
assert.Nil(t, err)
assert.NoError(t, err)
cluster.Close()
sessionManager2 := NewSessionManager()
channelManager2, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
restartCluster := NewCluster(sessionManager2, channelManager2)
defer restartCluster.Close()
channels := channelManager2.GetChannels()
@ -282,7 +282,7 @@ func TestUnregister(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
@ -292,9 +292,9 @@ func TestUnregister(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.UnRegister(nodes[0])
assert.Nil(t, err)
assert.NoError(t, err)
sessions := sessionManager.GetSessions()
assert.Empty(t, sessions)
})
@ -307,7 +307,7 @@ func TestUnregister(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
@ -321,11 +321,11 @@ func TestUnregister(t *testing.T) {
}
nodes := []*NodeInfo{nodeInfo1, nodeInfo2}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("ch1", 1)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.UnRegister(nodeInfo1)
assert.Nil(t, err)
assert.NoError(t, err)
channels := channelManager.GetChannels()
assert.EqualValues(t, 1, len(channels))
@ -345,7 +345,7 @@ func TestUnregister(t *testing.T) {
}
sessionManager := NewSessionManager(withSessionCreator(mockSessionCreator))
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
@ -354,11 +354,11 @@ func TestUnregister(t *testing.T) {
NodeID: 1,
}
err = cluster.Startup(ctx, []*NodeInfo{nodeInfo})
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("ch_1", 1)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.UnRegister(nodeInfo)
assert.Nil(t, err)
assert.NoError(t, err)
channels := channelManager.GetChannels()
assert.Empty(t, channels)
channel := channelManager.GetBufferChannels()
@ -386,7 +386,7 @@ func TestWatchIfNeeded(t *testing.T) {
}
sessionManager := NewSessionManager(withSessionCreator(mockSessionCreator))
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
@ -397,9 +397,9 @@ func TestWatchIfNeeded(t *testing.T) {
}
err = cluster.Startup(ctx, []*NodeInfo{info})
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("ch1", 1)
assert.Nil(t, err)
assert.NoError(t, err)
channels := channelManager.GetChannels()
assert.EqualValues(t, 1, len(channels))
assert.EqualValues(t, "ch1", channels[0].Channels[0].Name)
@ -410,12 +410,12 @@ func TestWatchIfNeeded(t *testing.T) {
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
err = cluster.Watch("ch1", 1)
assert.Nil(t, err)
assert.NoError(t, err)
channels := channelManager.GetChannels()
assert.Empty(t, channels)
@ -436,7 +436,7 @@ func TestConsistentHashPolicy(t *testing.T) {
chash := consistent.New()
factory := NewConsistentHashChannelPolicyFactory(chash)
channelManager, err := NewChannelManager(kv, newMockHandler(), withFactory(factory))
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
@ -458,51 +458,51 @@ func TestConsistentHashPolicy(t *testing.T) {
Address: "localhost:3333",
}
err = cluster.Register(nodeInfo1)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Register(nodeInfo2)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Register(nodeInfo3)
assert.Nil(t, err)
assert.NoError(t, err)
channels := []string{"ch1", "ch2", "ch3"}
for _, c := range channels {
err = cluster.Watch(c, 1)
assert.Nil(t, err)
assert.NoError(t, err)
idstr, err := hash.Get(c)
assert.Nil(t, err)
assert.NoError(t, err)
id, err := deformatNodeID(idstr)
assert.Nil(t, err)
assert.NoError(t, err)
match := channelManager.Match(id, c)
assert.True(t, match)
}
hash.Remove("1")
err = cluster.UnRegister(nodeInfo1)
assert.Nil(t, err)
assert.NoError(t, err)
for _, c := range channels {
idstr, err := hash.Get(c)
assert.Nil(t, err)
assert.NoError(t, err)
id, err := deformatNodeID(idstr)
assert.Nil(t, err)
assert.NoError(t, err)
match := channelManager.Match(id, c)
assert.True(t, match)
}
hash.Remove("2")
err = cluster.UnRegister(nodeInfo2)
assert.Nil(t, err)
assert.NoError(t, err)
for _, c := range channels {
idstr, err := hash.Get(c)
assert.Nil(t, err)
assert.NoError(t, err)
id, err := deformatNodeID(idstr)
assert.Nil(t, err)
assert.NoError(t, err)
match := channelManager.Match(id, c)
assert.True(t, match)
}
hash.Remove("3")
err = cluster.UnRegister(nodeInfo3)
assert.Nil(t, err)
assert.NoError(t, err)
bufferChannels := channelManager.GetBufferChannels()
assert.EqualValues(t, 3, len(bufferChannels.Channels))
}
@ -518,7 +518,7 @@ func TestCluster_Flush(t *testing.T) {
defer cancel()
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
@ -528,7 +528,7 @@ func TestCluster_Flush(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("chan-1", 1)
assert.NoError(t, err)
@ -565,7 +565,7 @@ func TestCluster_Import(t *testing.T) {
defer cancel()
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
@ -575,7 +575,7 @@ func TestCluster_Import(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("chan-1", 1)
assert.NoError(t, err)
@ -601,7 +601,7 @@ func TestCluster_ReCollectSegmentStats(t *testing.T) {
}
sessionManager := NewSessionManager(withSessionCreator(mockSessionCreator))
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
@ -611,7 +611,7 @@ func TestCluster_ReCollectSegmentStats(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("chan-1", 1)
assert.NoError(t, err)
@ -627,7 +627,7 @@ func TestCluster_ReCollectSegmentStats(t *testing.T) {
defer cancel()
sessionManager := NewSessionManager()
channelManager, err := NewChannelManager(kv, newMockHandler())
assert.Nil(t, err)
assert.NoError(t, err)
cluster := NewCluster(sessionManager, channelManager)
defer cluster.Close()
addr := "localhost:8080"
@ -637,7 +637,7 @@ func TestCluster_ReCollectSegmentStats(t *testing.T) {
}
nodes := []*NodeInfo{info}
err = cluster.Startup(ctx, nodes)
assert.Nil(t, err)
assert.NoError(t, err)
err = cluster.Watch("chan-1", 1)
assert.NoError(t, err)

View File

@ -56,7 +56,7 @@ func Test_garbageCollector_basic(t *testing.T) {
require.NoError(t, err)
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("normal gc", func(t *testing.T) {
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
@ -109,7 +109,7 @@ func Test_garbageCollector_scan(t *testing.T) {
require.NoError(t, err)
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("key is reference", func(t *testing.T) {
gc := newGarbageCollector(meta, newMockHandler(), GcOption{

View File

@ -39,7 +39,7 @@ func TestIndexNodeManager_AddNode(t *testing.T) {
t.Run("success", func(t *testing.T) {
err := nm.AddNode(1, "indexnode-1")
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("fail", func(t *testing.T) {

View File

@ -150,7 +150,7 @@ func TestMeta_Basic(t *testing.T) {
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
testSchema := newTestSchema()
@ -186,22 +186,22 @@ func TestMeta_Basic(t *testing.T) {
meta.AddCollection(collInfoWoPartition)
// create seg0 for partition0, seg0/seg1 for partition1
segID0_0, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo0_0 := buildSegment(collID, partID0, segID0_0, channelName, true)
segID1_0, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo1_0 := buildSegment(collID, partID1, segID1_0, channelName, false)
segID1_1, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo1_1 := buildSegment(collID, partID1, segID1_1, channelName, false)
// check AddSegment
err = meta.AddSegment(segInfo0_0)
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.AddSegment(segInfo1_0)
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.AddSegment(segInfo1_1)
assert.Nil(t, err)
assert.NoError(t, err)
// check GetSegment
info0_0 := meta.GetHealthySegment(segID0_0)
@ -229,15 +229,15 @@ func TestMeta_Basic(t *testing.T) {
// check DropSegment
err = meta.DropSegment(segID1_0)
assert.Nil(t, err)
assert.NoError(t, err)
segIDs = meta.GetSegmentsIDOfPartition(collID, partID1)
assert.EqualValues(t, 1, len(segIDs))
assert.Contains(t, segIDs, segID1_1)
err = meta.SetState(segID0_0, commonpb.SegmentState_Sealed)
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.SetState(segID0_0, commonpb.SegmentState_Flushed)
assert.Nil(t, err)
assert.NoError(t, err)
info0_0 = meta.GetHealthySegment(segID0_0)
assert.NotNil(t, info0_0)
@ -273,28 +273,28 @@ func TestMeta_Basic(t *testing.T) {
fkv := &saveFailKV{MetaKv: memoryKV}
catalog := datacoord.NewCatalog(fkv, "", "")
meta, err := newMeta(context.TODO(), catalog, nil)
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.AddSegment(NewSegmentInfo(&datapb.SegmentInfo{}))
assert.NotNil(t, err)
assert.Error(t, err)
fkv2 := &removeFailKV{MetaKv: memoryKV}
catalog = datacoord.NewCatalog(fkv2, "", "")
meta, err = newMeta(context.TODO(), catalog, nil)
assert.Nil(t, err)
assert.NoError(t, err)
// nil, since no segment yet
err = meta.DropSegment(0)
assert.Nil(t, err)
assert.NoError(t, err)
// nil, since Save error not injected
err = meta.AddSegment(NewSegmentInfo(&datapb.SegmentInfo{}))
assert.Nil(t, err)
assert.NoError(t, err)
// error injected
err = meta.DropSegment(0)
assert.NotNil(t, err)
assert.Error(t, err)
catalog = datacoord.NewCatalog(fkv, "", "")
meta, err = newMeta(context.TODO(), catalog, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Test GetCount", func(t *testing.T) {
@ -307,19 +307,19 @@ func TestMeta_Basic(t *testing.T) {
// add seg1 with 100 rows
segID0, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo0 := buildSegment(collID, partID0, segID0, channelName, false)
segInfo0.NumOfRows = rowCount0
err = meta.AddSegment(segInfo0)
assert.Nil(t, err)
assert.NoError(t, err)
// add seg2 with 300 rows
segID1, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo1 := buildSegment(collID, partID0, segID1, channelName, false)
segInfo1.NumOfRows = rowCount1
err = meta.AddSegment(segInfo1)
assert.Nil(t, err)
assert.NoError(t, err)
// check partition/collection statistics
nums = meta.GetNumRowsOfPartition(collID, partID0)
@ -373,19 +373,19 @@ func TestMeta_Basic(t *testing.T) {
// add seg0 with size0
segID0, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo0 := buildSegment(collID, partID0, segID0, channelName, false)
segInfo0.size.Store(size0)
err = meta.AddSegment(segInfo0)
assert.Nil(t, err)
assert.NoError(t, err)
// add seg1 with size1
segID1, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
segInfo1 := buildSegment(collID, partID0, segID1, channelName, false)
segInfo1.size.Store(size1)
err = meta.AddSegment(segInfo1)
assert.Nil(t, err)
assert.NoError(t, err)
// check TotalBinlogSize
total, collectionBinlogSize := meta.GetCollectionBinlogSize()
@ -397,7 +397,7 @@ func TestMeta_Basic(t *testing.T) {
func TestGetUnFlushedSegments(t *testing.T) {
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
s1 := &datapb.SegmentInfo{
ID: 0,
CollectionID: 0,
@ -405,7 +405,7 @@ func TestGetUnFlushedSegments(t *testing.T) {
State: commonpb.SegmentState_Growing,
}
err = meta.AddSegment(NewSegmentInfo(s1))
assert.Nil(t, err)
assert.NoError(t, err)
s2 := &datapb.SegmentInfo{
ID: 1,
CollectionID: 0,
@ -413,10 +413,10 @@ func TestGetUnFlushedSegments(t *testing.T) {
State: commonpb.SegmentState_Flushed,
}
err = meta.AddSegment(NewSegmentInfo(s2))
assert.Nil(t, err)
assert.NoError(t, err)
segments := meta.GetUnFlushedSegments()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(segments))
assert.EqualValues(t, 0, segments[0].ID)
@ -426,18 +426,18 @@ func TestGetUnFlushedSegments(t *testing.T) {
func TestUpdateFlushSegmentsInfo(t *testing.T) {
t.Run("normal", func(t *testing.T) {
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
segment1 := &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{ID: 1, State: commonpb.SegmentState_Growing, Binlogs: []*datapb.FieldBinlog{getFieldBinlogPaths(1, getInsertLogPath("binlog0", 1))},
Statslogs: []*datapb.FieldBinlog{getFieldBinlogPaths(1, getStatsLogPath("statslog0", 1))}}}
err = meta.AddSegment(segment1)
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.UpdateFlushSegmentsInfo(1, true, false, true, []*datapb.FieldBinlog{getFieldBinlogPathsWithEntry(1, 10, getInsertLogPath("binlog1", 1))},
[]*datapb.FieldBinlog{getFieldBinlogPaths(1, getStatsLogPath("statslog1", 1))},
[]*datapb.FieldBinlog{{Binlogs: []*datapb.Binlog{{EntriesNum: 1, TimestampFrom: 100, TimestampTo: 200, LogSize: 1000, LogPath: getDeltaLogPath("deltalog1", 1)}}}},
[]*datapb.CheckPoint{{SegmentID: 1, NumOfRows: 10}}, []*datapb.SegmentStartPosition{{SegmentID: 1, StartPosition: &msgpb.MsgPosition{MsgID: []byte{1, 2, 3}}}})
assert.Nil(t, err)
assert.NoError(t, err)
updated := meta.GetHealthySegment(1)
expected := &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
@ -462,24 +462,24 @@ func TestUpdateFlushSegmentsInfo(t *testing.T) {
t.Run("update non-existed segment", func(t *testing.T) {
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.UpdateFlushSegmentsInfo(1, false, false, false, nil, nil, nil, nil, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("update checkpoints and start position of non existed segment", func(t *testing.T) {
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
segment1 := &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{ID: 1, State: commonpb.SegmentState_Growing}}
err = meta.AddSegment(segment1)
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.UpdateFlushSegmentsInfo(1, false, false, false, nil, nil, nil, []*datapb.CheckPoint{{SegmentID: 2, NumOfRows: 10}},
[]*datapb.SegmentStartPosition{{SegmentID: 2, StartPosition: &msgpb.MsgPosition{MsgID: []byte{1, 2, 3}}}})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Nil(t, meta.GetHealthySegment(2))
})
@ -488,7 +488,7 @@ func TestUpdateFlushSegmentsInfo(t *testing.T) {
failedKv := &saveFailKV{kv}
catalog := datacoord.NewCatalog(failedKv, "", "")
meta, err := newMeta(context.TODO(), catalog, nil)
assert.Nil(t, err)
assert.NoError(t, err)
segmentInfo := &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{
@ -503,7 +503,7 @@ func TestUpdateFlushSegmentsInfo(t *testing.T) {
[]*datapb.FieldBinlog{getFieldBinlogPaths(1, getInsertLogPath("statslog", 1))},
[]*datapb.FieldBinlog{{Binlogs: []*datapb.Binlog{{EntriesNum: 1, TimestampFrom: 100, TimestampTo: 200, LogSize: 1000, LogPath: getDeltaLogPath("deltalog", 1)}}}},
[]*datapb.CheckPoint{{SegmentID: 1, NumOfRows: 10}}, []*datapb.SegmentStartPosition{{SegmentID: 1, StartPosition: &msgpb.MsgPosition{MsgID: []byte{1, 2, 3}}}})
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, "mocked fail", err.Error())
segmentInfo = meta.GetHealthySegment(1)
assert.EqualValues(t, 0, segmentInfo.NumOfRows)
@ -614,7 +614,7 @@ func TestMeta_PrepareCompleteCompactionMutation(t *testing.T) {
NumOfRows: 2,
}
beforeCompact, afterCompact, newSegment, metricMutation, err := m.PrepareCompleteCompactionMutation(inCompactionLogs, inCompactionResult)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, beforeCompact)
assert.NotNil(t, afterCompact)
assert.NotNil(t, newSegment)

View File

@ -62,11 +62,11 @@ func TestGetDataNodeMetrics(t *testing.T) {
req := &milvuspb.GetMetricsRequest{}
// nil node
_, err := svr.getDataNodeMetrics(ctx, req, nil)
assert.NotNil(t, err)
assert.Error(t, err)
// nil client node
_, err = svr.getDataNodeMetrics(ctx, req, NewSession(&NodeInfo{}, nil))
assert.NotNil(t, err)
assert.Error(t, err)
creator := func(ctx context.Context, addr string) (types.DataNode, error) {
return newMockDataNodeClient(100, nil)
@ -75,14 +75,14 @@ func TestGetDataNodeMetrics(t *testing.T) {
// mock datanode client
session := NewSession(&NodeInfo{}, creator)
info, err := svr.getDataNodeMetrics(ctx, req, session)
assert.Nil(t, err)
assert.NoError(t, err)
assert.False(t, info.HasError)
assert.Equal(t, metricsinfo.ConstructComponentName(typeutil.DataNodeRole, 100), info.BaseComponentInfos.Name)
getMockFailedClientCreator := func(mockFunc func() (*milvuspb.GetMetricsResponse, error)) dataNodeCreatorFunc {
return func(ctx context.Context, addr string) (types.DataNode, error) {
cli, err := creator(ctx, addr)
assert.Nil(t, err)
assert.NoError(t, err)
return &mockMetricDataNodeClient{DataNode: cli, mock: mockFunc}, nil
}
}
@ -92,7 +92,7 @@ func TestGetDataNodeMetrics(t *testing.T) {
})
info, err = svr.getDataNodeMetrics(ctx, req, NewSession(&NodeInfo{}, mockFailClientCreator))
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, info.HasError)
// mock status not success
@ -106,7 +106,7 @@ func TestGetDataNodeMetrics(t *testing.T) {
})
info, err = svr.getDataNodeMetrics(ctx, req, NewSession(&NodeInfo{}, mockFailClientCreator))
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, info.HasError)
assert.Equal(t, "mocked error", info.ErrorReason)
@ -121,7 +121,7 @@ func TestGetDataNodeMetrics(t *testing.T) {
})
info, err = svr.getDataNodeMetrics(ctx, req, NewSession(&NodeInfo{}, mockFailClientCreator))
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, info.HasError)
}
@ -134,13 +134,13 @@ func TestGetIndexNodeMetrics(t *testing.T) {
req := &milvuspb.GetMetricsRequest{}
// nil node
_, err := svr.getIndexNodeMetrics(ctx, req, nil)
assert.NotNil(t, err)
assert.Error(t, err)
// return error
info, err := svr.getIndexNodeMetrics(ctx, req, &mockMetricIndexNodeClient{mock: func() (*milvuspb.GetMetricsResponse, error) {
return nil, errors.New("mock error")
}})
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, info.HasError)
// failed
@ -156,7 +156,7 @@ func TestGetIndexNodeMetrics(t *testing.T) {
}, nil
},
})
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, info.HasError)
assert.Equal(t, metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, 100), info.BaseComponentInfos.Name)
@ -173,7 +173,7 @@ func TestGetIndexNodeMetrics(t *testing.T) {
}, nil
},
})
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, info.HasError)
assert.Equal(t, metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, 100), info.BaseComponentInfos.Name)
@ -211,7 +211,7 @@ func TestGetIndexNodeMetrics(t *testing.T) {
},
})
assert.Nil(t, err)
assert.NoError(t, err)
assert.False(t, info.HasError)
assert.Equal(t, metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, 100), info.BaseComponentInfos.Name)
}

View File

@ -87,7 +87,7 @@ func TestUpperLimitCalBySchema(t *testing.T) {
for _, c := range testCases {
result, err := calBySchemaPolicy(c.schema)
if c.expectErr {
assert.NotNil(t, err)
assert.Error(t, err)
} else {
assert.Equal(t, c.expected, result)
}

View File

@ -37,7 +37,7 @@ func TestManagerOptions(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
segmentManager := newSegmentManager(meta, mockAllocator, nil)
t.Run("test with alloc helper", func(t *testing.T) {
@ -98,17 +98,17 @@ func TestAllocSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
segmentManager := newSegmentManager(meta, mockAllocator, nil)
schema := newTestSchema()
collID, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
t.Run("normal allocation", func(t *testing.T) {
allocations, err := segmentManager.AllocSegment(ctx, collID, 100, "c1", 100)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
assert.EqualValues(t, 100, allocations[0].NumOfRows)
assert.NotEqualValues(t, 0, allocations[0].SegmentID)
@ -121,7 +121,7 @@ func TestAllocSegment(t *testing.T) {
}
segmentManager := newSegmentManager(meta, failsAllocator, nil)
_, err := segmentManager.AllocSegment(ctx, collID, 100, "c2", 100)
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("allocation fails 2", func(t *testing.T) {
@ -130,7 +130,7 @@ func TestAllocSegment(t *testing.T) {
}
segmentManager := newSegmentManager(meta, failsAllocator, nil)
_, err := segmentManager.AllocSegment(ctx, collID, 100, "c1", 100)
assert.NotNil(t, err)
assert.Error(t, err)
})
}
@ -139,18 +139,18 @@ func TestAllocSegmentForImport(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
ms := newMockRootCoordService()
segmentManager := newSegmentManager(meta, mockAllocator, ms)
schema := newTestSchema()
collID, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
t.Run("normal allocation", func(t *testing.T) {
allocation, err := segmentManager.allocSegmentForImport(ctx, collID, 100, "c1", 100, 0)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, allocation)
assert.EqualValues(t, 100, allocation.NumOfRows)
assert.NotEqualValues(t, 0, allocation.SegmentID)
@ -163,7 +163,7 @@ func TestAllocSegmentForImport(t *testing.T) {
}
segmentManager := newSegmentManager(meta, failsAllocator, ms)
_, err := segmentManager.allocSegmentForImport(ctx, collID, 100, "c1", 100, 0)
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("allocation fails 2", func(t *testing.T) {
@ -172,13 +172,13 @@ func TestAllocSegmentForImport(t *testing.T) {
}
segmentManager := newSegmentManager(meta, failsAllocator, ms)
_, err := segmentManager.allocSegmentForImport(ctx, collID, 100, "c1", 100, 0)
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("nil RootCoord", func(t *testing.T) {
segmentManager := newSegmentManager(meta, mockAllocator, nil)
_, err := segmentManager.allocSegmentForImport(ctx, collID, 100, "c1", 100, 0)
assert.NotNil(t, err)
assert.Error(t, err)
})
}
@ -187,11 +187,11 @@ func TestLoadSegmentsFromMeta(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
sealedSegment := &datapb.SegmentInfo{
@ -222,11 +222,11 @@ func TestLoadSegmentsFromMeta(t *testing.T) {
LastExpireTime: 1000,
}
err = meta.AddSegment(NewSegmentInfo(sealedSegment))
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.AddSegment(NewSegmentInfo(growingSegment))
assert.Nil(t, err)
assert.NoError(t, err)
err = meta.AddSegment(NewSegmentInfo(flushedSegment))
assert.Nil(t, err)
assert.NoError(t, err)
segmentManager := newSegmentManager(meta, mockAllocator, nil)
segments := segmentManager.segments
@ -237,18 +237,18 @@ func TestSaveSegmentsToMeta(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil)
allocations, err := segmentManager.AllocSegment(context.Background(), collID, 0, "c1", 1000)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
_, err = segmentManager.SealAllSegments(context.Background(), collID, nil, false)
assert.Nil(t, err)
assert.NoError(t, err)
segment := meta.GetHealthySegment(allocations[0].SegmentID)
assert.NotNil(t, segment)
assert.EqualValues(t, segment.LastExpireTime, allocations[0].ExpireTime)
@ -259,18 +259,18 @@ func TestSaveSegmentsToMetaWithSpecificSegments(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil)
allocations, err := segmentManager.AllocSegment(context.Background(), collID, 0, "c1", 1000)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
_, err = segmentManager.SealAllSegments(context.Background(), collID, []int64{allocations[0].SegmentID}, false)
assert.Nil(t, err)
assert.NoError(t, err)
segment := meta.GetHealthySegment(allocations[0].SegmentID)
assert.NotNil(t, segment)
assert.EqualValues(t, segment.LastExpireTime, allocations[0].ExpireTime)
@ -281,15 +281,15 @@ func TestDropSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil)
allocations, err := segmentManager.AllocSegment(context.Background(), collID, 0, "c1", 1000)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
segID := allocations[0].SegmentID
segment := meta.GetHealthySegment(segID)
@ -304,11 +304,11 @@ func TestAllocRowsLargerThanOneSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
var mockPolicy = func(schema *schemapb.CollectionSchema) (int, error) {
@ -316,7 +316,7 @@ func TestAllocRowsLargerThanOneSegment(t *testing.T) {
}
segmentManager := newSegmentManager(meta, mockAllocator, nil, withCalUpperLimitPolicy(mockPolicy))
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 2, len(allocations))
assert.EqualValues(t, 1, allocations[0].NumOfRows)
assert.EqualValues(t, 1, allocations[1].NumOfRows)
@ -326,11 +326,11 @@ func TestExpireAllocation(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
var mockPolicy = func(schema *schemapb.CollectionSchema) (int, error) {
@ -342,7 +342,7 @@ func TestExpireAllocation(t *testing.T) {
var id int64 = -1
for i := 0; i < 100; i++ {
allocs, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "ch1", 100)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocs))
if id == -1 {
id = allocs[0].SegmentID
@ -358,7 +358,7 @@ func TestExpireAllocation(t *testing.T) {
assert.NotNil(t, segment)
assert.EqualValues(t, 100, len(segment.allocations))
err = segmentManager.ExpireAllocations("ch1", maxts)
assert.Nil(t, err)
assert.NoError(t, err)
segment = meta.GetHealthySegment(id)
assert.NotNil(t, segment)
assert.EqualValues(t, 0, len(segment.allocations))
@ -369,25 +369,25 @@ func TestCleanExpiredBulkloadSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
ms := newMockRootCoordService()
segmentManager := newSegmentManager(meta, mockAllocator, ms)
allocation, err := segmentManager.allocSegmentForImport(context.TODO(), collID, 0, "c1", 2, 1)
assert.Nil(t, err)
assert.NoError(t, err)
ids, err := segmentManager.GetFlushableSegments(context.TODO(), "c1", allocation.ExpireTime)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, len(ids), 0)
assert.EqualValues(t, len(segmentManager.segments), 1)
ids, err = segmentManager.GetFlushableSegments(context.TODO(), "c1", allocation.ExpireTime+1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Empty(t, ids)
assert.EqualValues(t, len(ids), 0)
@ -400,42 +400,42 @@ func TestGetFlushableSegments(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil)
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
ids, err := segmentManager.SealAllSegments(context.TODO(), collID, nil, false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(ids))
assert.EqualValues(t, allocations[0].SegmentID, ids[0])
meta.SetCurrentRows(allocations[0].SegmentID, 1)
ids, err = segmentManager.GetFlushableSegments(context.TODO(), "c1", allocations[0].ExpireTime)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(ids))
assert.EqualValues(t, allocations[0].SegmentID, ids[0])
meta.SetLastFlushTime(allocations[0].SegmentID, time.Now())
ids, err = segmentManager.GetFlushableSegments(context.TODO(), "c1", allocations[0].ExpireTime)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Empty(t, ids)
meta.SetLastFlushTime(allocations[0].SegmentID, time.Now().Local().Add(-flushInterval))
ids, err = segmentManager.GetFlushableSegments(context.TODO(), "c1", allocations[0].ExpireTime)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(ids))
assert.EqualValues(t, allocations[0].SegmentID, ids[0])
meta.SetCurrentRows(allocations[0].SegmentID, 0)
ids, err = segmentManager.GetFlushableSegments(context.TODO(), "c1", allocations[0].ExpireTime)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Empty(t, ids)
assert.Nil(t, meta.GetHealthySegment(allocations[0].SegmentID))
})
@ -446,21 +446,21 @@ func TestTryToSealSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil, withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64))) //always seal
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
ts, err := segmentManager.allocator.allocTimestamp(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
err = segmentManager.tryToSealSegment(ts, "c1")
assert.Nil(t, err)
assert.NoError(t, err)
for _, seg := range segmentManager.meta.segments.segments {
assert.Equal(t, commonpb.SegmentState_Sealed, seg.GetState())
@ -471,21 +471,21 @@ func TestTryToSealSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil, withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) //always seal
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
ts, err := segmentManager.allocator.allocTimestamp(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
err = segmentManager.tryToSealSegment(ts, "c1")
assert.Nil(t, err)
assert.NoError(t, err)
for _, seg := range segmentManager.meta.segments.segments {
assert.Equal(t, commonpb.SegmentState_Sealed, seg.GetState())
@ -496,23 +496,23 @@ func TestTryToSealSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil,
withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64)),
withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) //always seal
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
ts, err := segmentManager.allocator.allocTimestamp(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
err = segmentManager.tryToSealSegment(ts, "c1")
assert.Nil(t, err)
assert.NoError(t, err)
for _, seg := range segmentManager.meta.segments.segments {
assert.Equal(t, commonpb.SegmentState_Sealed, seg.GetState())
@ -523,24 +523,24 @@ func TestTryToSealSegment(t *testing.T) {
Params.Init()
mockAllocator := newMockAllocator()
meta, err := newMemoryMeta()
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil)
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
ts, err := segmentManager.allocator.allocTimestamp(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
// No seal polices
{
err = segmentManager.tryToSealSegment(ts, "c1")
assert.Nil(t, err)
assert.NoError(t, err)
segments := segmentManager.meta.segments.segments
assert.Equal(t, 1, len(segments))
for _, seg := range segments {
@ -567,7 +567,7 @@ func TestTryToSealSegment(t *testing.T) {
},
}
err = segmentManager.tryToSealSegment(ts, "c1")
assert.Nil(t, err)
assert.NoError(t, err)
seg = segmentManager.meta.segments.segments[seg.ID]
assert.Equal(t, commonpb.SegmentState_Growing, seg.GetState())
}
@ -597,7 +597,7 @@ func TestTryToSealSegment(t *testing.T) {
},
}
err = segmentManager.tryToSealSegment(ts, "c1")
assert.Nil(t, err)
assert.NoError(t, err)
seg = segmentManager.meta.segments.segments[seg.ID]
assert.Equal(t, commonpb.SegmentState_Sealed, seg.GetState())
}
@ -611,23 +611,23 @@ func TestTryToSealSegment(t *testing.T) {
fkv := &saveFailKV{MetaKv: memoryKV}
catalog := datacoord.NewCatalog(memoryKV, "", "")
meta, err := newMeta(context.TODO(), catalog, nil)
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil, withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64))) //always seal
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
segmentManager.meta.catalog = &datacoord.Catalog{MetaKv: fkv}
ts, err := segmentManager.allocator.allocTimestamp(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
err = segmentManager.tryToSealSegment(ts, "c1")
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("seal with channel policy with kv fails", func(t *testing.T) {
@ -637,23 +637,23 @@ func TestTryToSealSegment(t *testing.T) {
fkv := &saveFailKV{MetaKv: memoryKV}
catalog := datacoord.NewCatalog(memoryKV, "", "")
meta, err := newMeta(context.TODO(), catalog, nil)
assert.Nil(t, err)
assert.NoError(t, err)
schema := newTestSchema()
collID, err := mockAllocator.allocID(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
segmentManager := newSegmentManager(meta, mockAllocator, nil, withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) //always seal
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(allocations))
segmentManager.meta.catalog = &datacoord.Catalog{MetaKv: fkv}
ts, err := segmentManager.allocator.allocTimestamp(context.Background())
assert.Nil(t, err)
assert.NoError(t, err)
err = segmentManager.tryToSealSegment(ts, "c1")
assert.NotNil(t, err)
assert.Error(t, err)
})
}

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ func TestBroadcastAlteredCollection(t *testing.T) {
ctx := context.Background()
resp, err := s.BroadcastAlteredCollection(ctx, nil)
assert.NotNil(t, resp.Reason)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test meta non exist", func(t *testing.T) {
@ -111,7 +111,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
CollectionID: 0,
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
@ -166,7 +166,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
IndexID: 0,
IndexName: "",
})
assert.Nil(t, err)
assert.NoError(t, err)
seg1 := createSegment(0, 0, 0, 100, 10, "vchan1", commonpb.SegmentState_Flushed)
seg1.Binlogs = []*datapb.FieldBinlog{
@ -205,35 +205,35 @@ func TestGetRecoveryInfoV2(t *testing.T) {
},
}
err = svr.meta.AddSegment(NewSegmentInfo(seg1))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg2))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: seg1.ID,
BuildID: seg1.ID,
})
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: seg1.ID,
State: commonpb.IndexState_Finished,
})
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: seg2.ID,
BuildID: seg2.ID,
})
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: seg2.ID,
State: commonpb.IndexState_Finished,
})
assert.Nil(t, err)
assert.NoError(t, err)
req := &datapb.GetRecoveryInfoRequestV2{
CollectionID: 0,
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.EqualValues(t, 0, len(resp.GetChannels()[0].GetUnflushedSegmentIds()))
@ -300,15 +300,15 @@ func TestGetRecoveryInfoV2(t *testing.T) {
},
}
err = svr.meta.AddSegment(NewSegmentInfo(seg1))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg2))
assert.Nil(t, err)
assert.NoError(t, err)
req := &datapb.GetRecoveryInfoRequestV2{
CollectionID: 0,
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
@ -372,7 +372,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
segment := createSegment(0, 0, 1, 100, 10, "vchan1", commonpb.SegmentState_Flushed)
err := svr.meta.AddSegment(NewSegmentInfo(segment))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{
TenantID: "",
@ -381,25 +381,25 @@ func TestGetRecoveryInfoV2(t *testing.T) {
IndexID: 0,
IndexName: "",
})
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: segment.ID,
BuildID: segment.ID,
})
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: segment.ID,
State: commonpb.IndexState_Finished,
})
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.channelManager.AddNode(0)
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.channelManager.Watch(&channel{Name: "vchan1", CollectionID: 0})
assert.Nil(t, err)
assert.NoError(t, err)
sResp, err := svr.SaveBinlogPaths(context.TODO(), binlogReq)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, sResp.ErrorCode)
req := &datapb.GetRecoveryInfoRequestV2{
@ -407,7 +407,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
PartitionIDs: []int64{1},
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, 1, len(resp.GetSegments()))
assert.EqualValues(t, 0, resp.GetSegments()[0].GetID())
@ -439,15 +439,15 @@ func TestGetRecoveryInfoV2(t *testing.T) {
seg1 := createSegment(7, 0, 0, 100, 30, "vchan1", commonpb.SegmentState_Growing)
seg2 := createSegment(8, 0, 0, 100, 40, "vchan1", commonpb.SegmentState_Dropped)
err = svr.meta.AddSegment(NewSegmentInfo(seg1))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg2))
assert.Nil(t, err)
assert.NoError(t, err)
req := &datapb.GetRecoveryInfoRequestV2{
CollectionID: 0,
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
@ -480,15 +480,15 @@ func TestGetRecoveryInfoV2(t *testing.T) {
seg2 := createSegment(8, 0, 0, 100, 40, "vchan1", commonpb.SegmentState_Flushed)
seg2.IsFake = true
err = svr.meta.AddSegment(NewSegmentInfo(seg1))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg2))
assert.Nil(t, err)
assert.NoError(t, err)
req := &datapb.GetRecoveryInfoRequestV2{
CollectionID: 0,
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
@ -523,15 +523,15 @@ func TestGetRecoveryInfoV2(t *testing.T) {
seg5 := createSegment(13, 0, 0, 100, 40, "vchan1", commonpb.SegmentState_Flushed)
seg5.CompactionFrom = []int64{11, 12}
err = svr.meta.AddSegment(NewSegmentInfo(seg1))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg2))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg3))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg4))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.AddSegment(NewSegmentInfo(seg5))
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{
TenantID: "",
CollectionID: 0,
@ -545,7 +545,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
IsAutoIndex: false,
UserIndexParams: nil,
})
assert.Nil(t, err)
assert.NoError(t, err)
svr.meta.segments.SetSegmentIndex(seg4.ID, &model.SegmentIndex{
SegmentID: seg4.ID,
CollectionID: 0,
@ -567,7 +567,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
CollectionID: 0,
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
assert.NotEqual(t, 0, resp.GetChannels()[0].GetSeekPosition().GetTimestamp())
@ -580,7 +580,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
svr := newTestServer(t, nil)
closeTestServer(t, svr)
resp, err := svr.GetRecoveryInfoV2(context.TODO(), &datapb.GetRecoveryInfoRequestV2{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
assert.Equal(t, serverNotServingErrMsg, resp.GetStatus().GetReason())
})

View File

@ -234,7 +234,7 @@ func TestChannelMeta_segmentFlushed(t *testing.T) {
startPos: nil,
endPos: nil,
})
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Test segmentFlushed", func(t *testing.T) {
@ -532,7 +532,7 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.NotNil(t, err)
assert.Error(t, err)
err = channel.addSegment(
addSegmentReq{
segType: datapb.SegmentType_Flushed,
@ -543,7 +543,7 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Test_addSegmentStatsError", func(t *testing.T) {
@ -561,7 +561,7 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.NotNil(t, err)
assert.Error(t, err)
err = channel.addSegment(
addSegmentReq{
segType: datapb.SegmentType_Flushed,
@ -572,7 +572,7 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Test_addSegmentPkfilterError", func(t *testing.T) {
@ -590,7 +590,7 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.NotNil(t, err)
assert.Error(t, err)
err = channel.addSegment(
addSegmentReq{
segType: datapb.SegmentType_Flushed,
@ -601,7 +601,7 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Test_mergeFlushedSegments", func(t *testing.T) {
@ -790,7 +790,7 @@ func TestChannelMeta_UpdatePKRange(t *testing.T) {
startPos: startPos,
endPos: endPos,
})
assert.Nil(t, err)
assert.NoError(t, err)
err = channel.addSegment(
addSegmentReq{
segType: datapb.SegmentType_Normal,
@ -801,7 +801,7 @@ func TestChannelMeta_UpdatePKRange(t *testing.T) {
statsBinLogs: []*datapb.FieldBinlog{getSimpleFieldBinlog()},
recoverTs: 0,
})
assert.Nil(t, err)
assert.NoError(t, err)
segNew := channel.segments[1]
segNormal := channel.segments[2]

View File

@ -95,13 +95,13 @@ func TestDataNode(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
node.SetEtcdClient(etcdCli)
err = node.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = node.Start()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Empty(t, node.GetAddress())
node.SetAddress("address")
assert.Equal(t, "address", node.GetAddress())
@ -201,7 +201,7 @@ func TestDataNode(t *testing.T) {
for _, test := range testDataSyncs {
err = node.flowgraphManager.addAndStart(node, &datapb.VchannelInfo{CollectionID: 1, ChannelName: test.dmChannelName}, nil, genTestTickler())
assert.Nil(t, err)
assert.NoError(t, err)
vchanNameCh <- test.dmChannelName
}
@ -228,16 +228,16 @@ func TestWatchChannel(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
node.SetEtcdClient(etcdCli)
err = node.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = node.Start()
assert.Nil(t, err)
assert.NoError(t, err)
defer node.Stop()
err = node.Register()
assert.Nil(t, err)
assert.NoError(t, err)
defer cancel()
@ -261,9 +261,9 @@ func TestWatchChannel(t *testing.T) {
Vchan: vchan,
}
val, err := proto.Marshal(info)
assert.Nil(t, err)
assert.NoError(t, err)
err = kv.Save(path, string(val))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
exist := node.flowgraphManager.exist(ch)
@ -283,7 +283,7 @@ func TestWatchChannel(t *testing.T) {
}, 3*time.Second, 100*time.Millisecond)
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.CommonCfg.DataCoordWatchSubPath.GetValue(), paramtable.GetNodeID()))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
exist := node.flowgraphManager.exist(ch)
@ -331,9 +331,9 @@ func TestWatchChannel(t *testing.T) {
Vchan: vchan,
}
val, err := proto.Marshal(info)
assert.Nil(t, err)
assert.NoError(t, err)
err = kv.Save(path, string(val))
assert.Nil(t, err)
assert.NoError(t, err)
// wait for check goroutine received 2 events
<-c
@ -341,7 +341,7 @@ func TestWatchChannel(t *testing.T) {
assert.False(t, exist)
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.CommonCfg.DataCoordWatchSubPath.GetValue(), paramtable.GetNodeID()))
assert.Nil(t, err)
assert.NoError(t, err)
//TODO there is not way to sync Release done, use sleep for now
time.Sleep(100 * time.Millisecond)

View File

@ -287,7 +287,7 @@ func TestDataSyncService_Start(t *testing.T) {
}
sync, err := newDataSyncService(ctx, flushChan, resendTTChan, channel, alloc, dispClient, factory, vchan, signalCh, dataCoord, newCache(), cm, newCompactionExecutor(), genTestTickler(), 0)
assert.Nil(t, err)
assert.NoError(t, err)
sync.flushListener = make(chan *segmentFlushPack)
defer close(sync.flushListener)
@ -444,7 +444,7 @@ func TestDataSyncService_Close(t *testing.T) {
channel := newChannel(insertChannelName, collMeta.ID, collMeta.GetSchema(), mockRootCoord, cm)
sync, err := newDataSyncService(ctx, flushChan, resendTTChan, channel, alloc, dispClient, factory, vchan, signalCh, mockDataCoord, newCache(), cm, newCompactionExecutor(), genTestTickler(), 0)
assert.Nil(t, err)
assert.NoError(t, err)
sync.flushListener = make(chan *segmentFlushPack, 10)
defer close(sync.flushListener)
@ -600,22 +600,22 @@ func TestBytesReader(t *testing.T) {
var fvector = make([]float32, 2)
err := binary.Read(rawDataReader, common.Endian, &fvector)
assert.Nil(t, err)
assert.NoError(t, err)
assert.ElementsMatch(t, fvector, []float32{1, 2})
var bvector = make([]byte, 4)
err = binary.Read(rawDataReader, common.Endian, &bvector)
assert.Nil(t, err)
assert.NoError(t, err)
assert.ElementsMatch(t, bvector, []byte{255, 255, 255, 0})
var fieldBool bool
err = binary.Read(rawDataReader, common.Endian, &fieldBool)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, fieldBool)
var dataInt8 int8
err = binary.Read(rawDataReader, common.Endian, &dataInt8)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, int8(100), dataInt8)
}
@ -625,7 +625,7 @@ func TestGetSegmentInfos(t *testing.T) {
dataCoord: dataCoord,
}
segmentInfos, err := dsService.getSegmentInfos([]int64{1})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, len(segmentInfos))
dataCoord.GetSegmentInfosError = true

View File

@ -52,7 +52,7 @@ func TestFlowGraphDeleteNode_newDeleteNode(te *testing.T) {
for _, test := range tests {
te.Run(test.description, func(t *testing.T) {
dn, err := newDeleteNode(test.ctx, nil, nil, make(chan string, 1), test.config)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, dn)
assert.Equal(t, "deleteNode-"+dn.channelName, dn.Name())
@ -194,7 +194,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
}
dn, err := newDeleteNode(context.Background(), fm, delBufManager, make(chan string, 1), c)
assert.Nil(t, err)
assert.NoError(t, err)
segID2Pks, _ := dn.filterSegmentByPK(0, varCharPks, tss)
expected := map[int64][]primaryKey{
@ -228,7 +228,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
}
dn, err := newDeleteNode(context.Background(), fm, delBufManager, make(chan string, 1), c)
assert.Nil(t, err)
assert.NoError(t, err)
segID2Pks, _ := dn.filterSegmentByPK(0, int64Pks, tss)
t.Log(segID2Pks)
@ -325,7 +325,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
}
sig := make(chan string, 1)
delNode, err := newDeleteNode(ctx, fm, delBufManager, sig, c)
assert.Nil(t, err)
assert.NoError(t, err)
msg := genFlowGraphDeleteMsg(int64Pks, chanName)
msg.segmentsToSync = segIDs
@ -364,7 +364,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
delBufHeap: &PriorityQueue{},
}
delNode, err := newDeleteNode(ctx, fm, delBufManager, make(chan string, 1), c)
assert.Nil(t, err)
assert.NoError(t, err)
compactedSegment := UniqueID(10020987)
seg := Segment{
@ -420,7 +420,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
recordFlushedSeg: true,
}
delNode, err := newDeleteNode(ctx, mockFlushManager, delBufManager, make(chan string, 1), c)
assert.Nil(t, err)
assert.NoError(t, err)
//2. here we set flushing segments inside fgmsg to empty
//in order to verify the validity of auto flush function

View File

@ -110,5 +110,5 @@ func TestNewDmInputNode(t *testing.T) {
msFactory: &mockMsgStreamFactory{},
vChannelName: "mock_vchannel_0",
})
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -1027,14 +1027,14 @@ func TestInsertBufferNode_bufferInsertMsg(t *testing.T) {
for _, msg := range inMsg.insertMessages {
msg.EndTimestamp = 101 // ts valid
err = iBNode.bufferInsertMsg(msg, &msgpb.MsgPosition{}, &msgpb.MsgPosition{})
assert.Nil(t, err)
assert.NoError(t, err)
}
for _, msg := range inMsg.insertMessages {
msg.EndTimestamp = 101 // ts valid
msg.RowIDs = []int64{} //misaligned data
err = iBNode.bufferInsertMsg(msg, &msgpb.MsgPosition{}, &msgpb.MsgPosition{})
assert.NotNil(t, err)
assert.Error(t, err)
}
}
}

View File

@ -42,7 +42,7 @@ func TestFlowGraphManager(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
node := newIDLEDataNodeMock(ctx, schemapb.DataType_Int64)

View File

@ -92,7 +92,7 @@ func TestMetaServiceRootCoodFails(t *testing.T) {
ms := newMetaService(rc, collectionID0)
_, err := ms.getCollectionSchema(context.Background(), collectionID1, 0)
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Test Describe wit nil response", func(t *testing.T) {
@ -102,6 +102,6 @@ func TestMetaServiceRootCoodFails(t *testing.T) {
ms := newMetaService(rc, collectionID0)
_, err := ms.getCollectionSchema(context.Background(), collectionID1, 0)
assert.NotNil(t, err)
assert.Error(t, err)
})
}

View File

@ -77,7 +77,7 @@ func TestConnectionManager(t *testing.T) {
t.Run("rootCoord", func(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:")
assert.Nil(t, err)
assert.NoError(t, err)
defer lis.Close()
rootCoord := &testRootCoord{}
grpcServer := grpc.NewServer()
@ -94,7 +94,7 @@ func TestConnectionManager(t *testing.T) {
t.Run("queryCoord", func(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:")
assert.Nil(t, err)
assert.NoError(t, err)
defer lis.Close()
queryCoord := &testQueryCoord{}
grpcServer := grpc.NewServer()
@ -111,7 +111,7 @@ func TestConnectionManager(t *testing.T) {
t.Run("dataCoord", func(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:")
assert.Nil(t, err)
assert.NoError(t, err)
defer lis.Close()
dataCoord := &testDataCoord{}
grpcServer := grpc.NewServer()
@ -128,7 +128,7 @@ func TestConnectionManager(t *testing.T) {
t.Run("queryNode", func(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:")
assert.Nil(t, err)
assert.NoError(t, err)
defer lis.Close()
queryNode := &testQueryNode{}
grpcServer := grpc.NewServer()
@ -145,7 +145,7 @@ func TestConnectionManager(t *testing.T) {
t.Run("dataNode", func(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:")
assert.Nil(t, err)
assert.NoError(t, err)
defer lis.Close()
dataNode := &testDataNode{}
grpcServer := grpc.NewServer()
@ -162,7 +162,7 @@ func TestConnectionManager(t *testing.T) {
t.Run("indexNode", func(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:")
assert.Nil(t, err)
assert.NoError(t, err)
defer lis.Close()
indexNode := &testIndexNode{}
grpcServer := grpc.NewServer()

View File

@ -66,28 +66,28 @@ func Test_NewClient(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
client, err := NewClient(ctx, proxy.Params.EtcdCfg.MetaRootPath.GetValue(), etcdCli)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
checkFunc := func(retNotNil bool) {
retCheck := func(notNil bool, ret any, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -230,7 +230,7 @@ func Test_NewClient(t *testing.T) {
// special case since this method didn't use recall()
ret, err := client.SaveBinlogPaths(ctx, nil)
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
client.grpcClient = &mock.GRPCClientBase[datapb.DataCoordClient]{
GetGrpcClientErr: nil,
@ -244,7 +244,7 @@ func Test_NewClient(t *testing.T) {
// special case since this method didn't use recall()
ret, err = client.SaveBinlogPaths(ctx, nil)
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
client.grpcClient = &mock.GRPCClientBase[datapb.DataCoordClient]{
GetGrpcClientErr: nil,
@ -258,8 +258,8 @@ func Test_NewClient(t *testing.T) {
// special case since this method didn't use recall()
ret, err = client.SaveBinlogPaths(ctx, nil)
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -296,7 +296,7 @@ func Test_NewServer(t *testing.T) {
//server.indexCoord = indexCoord
err := server.Run()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetComponentStates", func(t *testing.T) {
@ -304,7 +304,7 @@ func Test_NewServer(t *testing.T) {
states: &milvuspb.ComponentStates{},
}
states, err := server.GetComponentStates(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, states)
})
@ -313,7 +313,7 @@ func Test_NewServer(t *testing.T) {
strResp: &milvuspb.StringResponse{},
}
resp, err := server.GetTimeTickChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -322,7 +322,7 @@ func Test_NewServer(t *testing.T) {
strResp: &milvuspb.StringResponse{},
}
resp, err := server.GetStatisticsChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -331,7 +331,7 @@ func Test_NewServer(t *testing.T) {
infoResp: &datapb.GetSegmentInfoResponse{},
}
resp, err := server.GetSegmentInfo(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -340,7 +340,7 @@ func Test_NewServer(t *testing.T) {
flushResp: &datapb.FlushResponse{},
}
resp, err := server.Flush(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -349,7 +349,7 @@ func Test_NewServer(t *testing.T) {
assignResp: &datapb.AssignSegmentIDResponse{},
}
resp, err := server.AssignSegmentID(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -358,7 +358,7 @@ func Test_NewServer(t *testing.T) {
segStateResp: &datapb.GetSegmentStatesResponse{},
}
resp, err := server.GetSegmentStates(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -367,7 +367,7 @@ func Test_NewServer(t *testing.T) {
binResp: &datapb.GetInsertBinlogPathsResponse{},
}
resp, err := server.GetInsertBinlogPaths(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -376,7 +376,7 @@ func Test_NewServer(t *testing.T) {
colStatResp: &datapb.GetCollectionStatisticsResponse{},
}
resp, err := server.GetCollectionStatistics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -385,7 +385,7 @@ func Test_NewServer(t *testing.T) {
partStatResp: &datapb.GetPartitionStatisticsResponse{},
}
resp, err := server.GetPartitionStatistics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -394,7 +394,7 @@ func Test_NewServer(t *testing.T) {
strResp: &milvuspb.StringResponse{},
}
resp, err := server.GetSegmentInfoChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -403,7 +403,7 @@ func Test_NewServer(t *testing.T) {
status: &commonpb.Status{},
}
resp, err := server.SaveBinlogPaths(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -412,7 +412,7 @@ func Test_NewServer(t *testing.T) {
recoverResp: &datapb.GetRecoveryInfoResponse{},
}
resp, err := server.GetRecoveryInfo(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -421,7 +421,7 @@ func Test_NewServer(t *testing.T) {
flushSegResp: &datapb.GetFlushedSegmentsResponse{},
}
resp, err := server.GetFlushedSegments(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -430,7 +430,7 @@ func Test_NewServer(t *testing.T) {
configResp: &internalpb.ShowConfigurationsResponse{},
}
resp, err := server.ShowConfigurations(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -439,7 +439,7 @@ func Test_NewServer(t *testing.T) {
metricResp: &milvuspb.GetMetricsResponse{},
}
resp, err := server.GetMetrics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -448,7 +448,7 @@ func Test_NewServer(t *testing.T) {
watchChannelsResp: &datapb.WatchChannelsResponse{},
}
resp, err := server.WatchChannels(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -457,7 +457,7 @@ func Test_NewServer(t *testing.T) {
getFlushStateResp: &milvuspb.GetFlushStateResponse{},
}
resp, err := server.GetFlushState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -466,7 +466,7 @@ func Test_NewServer(t *testing.T) {
getFlushAllStateResp: &milvuspb.GetFlushAllStateResponse{},
}
resp, err := server.GetFlushAllState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -475,7 +475,7 @@ func Test_NewServer(t *testing.T) {
dropVChanResp: &datapb.DropVirtualChannelResponse{},
}
resp, err := server.DropVirtualChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -484,7 +484,7 @@ func Test_NewServer(t *testing.T) {
manualCompactionResp: &milvuspb.ManualCompactionResponse{},
}
resp, err := server.ManualCompaction(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -493,7 +493,7 @@ func Test_NewServer(t *testing.T) {
compactionStateResp: &milvuspb.GetCompactionStateResponse{},
}
resp, err := server.GetCompactionState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -502,7 +502,7 @@ func Test_NewServer(t *testing.T) {
compactionPlansResp: &milvuspb.GetCompactionPlansResponse{},
}
resp, err := server.GetCompactionStateWithPlans(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -511,7 +511,7 @@ func Test_NewServer(t *testing.T) {
setSegmentStateResp: &datapb.SetSegmentStateResponse{},
}
resp, err := server.SetSegmentState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -522,7 +522,7 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.Import(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -533,7 +533,7 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.UpdateSegmentStatistics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -544,7 +544,7 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.UpdateChannelCheckpoint(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -555,7 +555,7 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.SaveImportSegment(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -566,7 +566,7 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.UnsetIsImportingState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -577,7 +577,7 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.MarkSegmentsDropped(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -586,14 +586,14 @@ func Test_NewServer(t *testing.T) {
broadCastResp: &commonpb.Status{},
}
resp, err := server.BroadcastAlteredCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("CheckHealth", func(t *testing.T) {
server.dataCoord = &MockDataCoord{}
ret, err := server.CheckHealth(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, ret.IsHealthy)
})
@ -670,7 +670,7 @@ func Test_NewServer(t *testing.T) {
})
err := server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_Run(t *testing.T) {

View File

@ -35,29 +35,29 @@ func Test_NewClient(t *testing.T) {
ctx := context.Background()
client, err := NewClient(ctx, "")
assert.Nil(t, client)
assert.NotNil(t, err)
assert.Error(t, err)
client, err = NewClient(ctx, "test")
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
checkFunc := func(retNotNil bool) {
retCheck := func(notNil bool, ret interface{}, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -130,5 +130,5 @@ func Test_NewClient(t *testing.T) {
checkFunc(true)
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -213,7 +213,7 @@ func Test_NewServer(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.newRootCoordClient = func(string, *clientv3.Client) (types.RootCoord, error) {
@ -227,7 +227,7 @@ func Test_NewServer(t *testing.T) {
t.Run("Run", func(t *testing.T) {
server.datanode = &MockDataNode{}
err = server.Run()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetComponentStates", func(t *testing.T) {
@ -235,7 +235,7 @@ func Test_NewServer(t *testing.T) {
states: &milvuspb.ComponentStates{},
}
states, err := server.GetComponentStates(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, states)
})
@ -244,7 +244,7 @@ func Test_NewServer(t *testing.T) {
strResp: &milvuspb.StringResponse{},
}
states, err := server.GetStatisticsChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, states)
})
@ -253,7 +253,7 @@ func Test_NewServer(t *testing.T) {
status: &commonpb.Status{},
}
states, err := server.WatchDmChannels(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, states)
})
@ -262,7 +262,7 @@ func Test_NewServer(t *testing.T) {
status: &commonpb.Status{},
}
states, err := server.FlushSegments(ctx, nil)
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotNil(t, states)
})
@ -271,7 +271,7 @@ func Test_NewServer(t *testing.T) {
configResp: &internalpb.ShowConfigurationsResponse{},
}
resp, err := server.ShowConfigurations(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -280,7 +280,7 @@ func Test_NewServer(t *testing.T) {
metricResp: &milvuspb.GetMetricsResponse{},
}
resp, err := server.GetMetrics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -289,7 +289,7 @@ func Test_NewServer(t *testing.T) {
status: &commonpb.Status{},
}
resp, err := server.Compaction(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -298,7 +298,7 @@ func Test_NewServer(t *testing.T) {
status: &commonpb.Status{},
}
resp, err := server.Import(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -307,7 +307,7 @@ func Test_NewServer(t *testing.T) {
resendResp: &datapb.ResendSegmentStatsResponse{},
}
resp, err := server.ResendSegmentStats(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -321,18 +321,18 @@ func Test_NewServer(t *testing.T) {
},
}
resp, err := server.AddImportSegment(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_Run(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.datanode = &MockDataNode{

View File

@ -42,29 +42,29 @@ func Test_NewClient(t *testing.T) {
ctx := context.Background()
client, err := NewClient(ctx, "", false)
assert.Nil(t, client)
assert.NotNil(t, err)
assert.Error(t, err)
client, err = NewClient(ctx, "test", false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
checkFunc := func(retNotNil bool) {
retCheck := func(notNil bool, ret interface{}, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -119,7 +119,7 @@ func Test_NewClient(t *testing.T) {
checkFunc(true)
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestIndexNodeClient(t *testing.T) {
@ -128,7 +128,7 @@ func TestIndexNodeClient(t *testing.T) {
factory := dependency.NewDefaultFactory(true)
ins, err := grpcindexnode.NewServer(ctx, factory)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, ins)
inm := indexnode.NewIndexNodeMock()
@ -143,31 +143,31 @@ func TestIndexNodeClient(t *testing.T) {
assert.NoError(t, err)
inm.SetEtcdClient(etcdCli)
err = ins.SetClient(inm)
assert.Nil(t, err)
assert.NoError(t, err)
err = ins.Run()
assert.Nil(t, err)
assert.NoError(t, err)
inc, err := NewClient(ctx, "localhost:21121", false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, inc)
err = inc.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = inc.Start()
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("GetComponentStates", func(t *testing.T) {
states, err := inc.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.StateCode_Healthy, states.State.StateCode)
assert.Equal(t, commonpb.ErrorCode_Success, states.Status.ErrorCode)
})
t.Run("GetStatisticsChannel", func(t *testing.T) {
resp, err := inc.GetStatisticsChannel(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -177,21 +177,21 @@ func TestIndexNodeClient(t *testing.T) {
BuildID: 0,
}
resp, err := inc.CreateJob(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("QueryJob", func(t *testing.T) {
req := &indexpb.QueryJobsRequest{}
resp, err := inc.QueryJobs(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("DropJob", func(t *testing.T) {
req := &indexpb.DropJobsRequest{}
resp, err := inc.DropJobs(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -200,15 +200,15 @@ func TestIndexNodeClient(t *testing.T) {
Pattern: "",
}
resp, err := inc.ShowConfigurations(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("GetMetrics", func(t *testing.T) {
req, err := metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
assert.Nil(t, err)
assert.NoError(t, err)
resp, err := inc.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -220,8 +220,8 @@ func TestIndexNodeClient(t *testing.T) {
})
err = ins.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
err = inc.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -40,7 +40,7 @@ func TestIndexNodeServer(t *testing.T) {
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
server, err := NewServer(ctx, factory)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
inm := indexnode.NewIndexNodeMock()
@ -56,22 +56,22 @@ func TestIndexNodeServer(t *testing.T) {
assert.NoError(t, err)
inm.SetEtcdClient(etcdCli)
err = server.SetClient(inm)
assert.Nil(t, err)
assert.NoError(t, err)
err = server.Run()
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("GetComponentStates", func(t *testing.T) {
req := &milvuspb.GetComponentStatesRequest{}
states, err := server.GetComponentStates(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.StateCode_Healthy, states.State.StateCode)
})
t.Run("GetStatisticsChannel", func(t *testing.T) {
req := &internalpb.GetStatisticsChannelRequest{}
resp, err := server.GetStatisticsChannel(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -83,21 +83,21 @@ func TestIndexNodeServer(t *testing.T) {
DataPaths: []string{},
}
resp, err := server.CreateJob(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("QueryJob", func(t *testing.T) {
req := &indexpb.QueryJobsRequest{}
resp, err := server.QueryJobs(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("DropJobs", func(t *testing.T) {
req := &indexpb.DropJobsRequest{}
resp, err := server.DropJobs(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -106,15 +106,15 @@ func TestIndexNodeServer(t *testing.T) {
Pattern: "",
}
resp, err := server.ShowConfigurations(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("GetMetrics", func(t *testing.T) {
req, err := metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
assert.Nil(t, err)
assert.NoError(t, err)
resp, err := server.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -126,5 +126,5 @@ func TestIndexNodeServer(t *testing.T) {
})
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -36,26 +36,26 @@ func Test_NewClient(t *testing.T) {
ctx := context.Background()
client, err := NewClient(ctx, "")
assert.Nil(t, client)
assert.NotNil(t, err)
assert.Error(t, err)
client, err = NewClient(ctx, "test")
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
checkFunc := func(retNotNil bool) {
retCheck := func(notNil bool, ret interface{}, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -120,7 +120,7 @@ func Test_NewClient(t *testing.T) {
retCheck := func(ret interface{}, err error) {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
r1Timeout, err := client.GetComponentStates(shortCtx)
@ -145,5 +145,5 @@ func Test_NewClient(t *testing.T) {
// cleanup
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -989,364 +989,364 @@ func Test_NewServer(t *testing.T) {
var err error
t.Run("Run", func(t *testing.T) {
err = runAndWaitForServerReady(server)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetComponentStates", func(t *testing.T) {
_, err := server.GetComponentStates(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetStatisticsChannel", func(t *testing.T) {
_, err := server.GetStatisticsChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("InvalidateCollectionMetaCache", func(t *testing.T) {
_, err := server.InvalidateCollectionMetaCache(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreateCollection", func(t *testing.T) {
_, err := server.CreateCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DropCollection", func(t *testing.T) {
_, err := server.DropCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("HasCollection", func(t *testing.T) {
_, err := server.HasCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("LoadCollection", func(t *testing.T) {
_, err := server.LoadCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ReleaseCollection", func(t *testing.T) {
_, err := server.ReleaseCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DescribeCollection", func(t *testing.T) {
_, err := server.DescribeCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetCollectionStatistics", func(t *testing.T) {
_, err := server.GetCollectionStatistics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ShowCollections", func(t *testing.T) {
_, err := server.ShowCollections(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreatePartition", func(t *testing.T) {
_, err := server.CreatePartition(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DropPartition", func(t *testing.T) {
_, err := server.DropPartition(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("HasPartition", func(t *testing.T) {
_, err := server.HasPartition(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("LoadPartitions", func(t *testing.T) {
_, err := server.LoadPartitions(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ReleasePartitions", func(t *testing.T) {
_, err := server.ReleasePartitions(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetPartitionStatistics", func(t *testing.T) {
_, err := server.GetPartitionStatistics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ShowPartitions", func(t *testing.T) {
_, err := server.ShowPartitions(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetLoadingProgress", func(t *testing.T) {
_, err := server.GetLoadingProgress(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreateIndex", func(t *testing.T) {
_, err := server.CreateIndex(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DropIndex", func(t *testing.T) {
_, err := server.DropIndex(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DescribeIndex", func(t *testing.T) {
_, err := server.DescribeIndex(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetIndexStatistics", func(t *testing.T) {
_, err := server.GetIndexStatistics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetIndexBuildProgress", func(t *testing.T) {
_, err := server.GetIndexBuildProgress(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetIndexState", func(t *testing.T) {
_, err := server.GetIndexState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Insert", func(t *testing.T) {
_, err := server.Insert(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Delete", func(t *testing.T) {
_, err := server.Delete(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Upsert", func(t *testing.T) {
_, err := server.Upsert(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Search", func(t *testing.T) {
_, err := server.Search(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Flush", func(t *testing.T) {
_, err := server.Flush(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Query", func(t *testing.T) {
_, err := server.Query(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CalcDistance", func(t *testing.T) {
_, err := server.CalcDistance(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetDdChannel", func(t *testing.T) {
_, err := server.GetDdChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetPersistentSegmentInfo", func(t *testing.T) {
_, err := server.GetPersistentSegmentInfo(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetQuerySegmentInfo", func(t *testing.T) {
_, err := server.GetQuerySegmentInfo(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Dummy", func(t *testing.T) {
_, err := server.Dummy(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("RegisterLink", func(t *testing.T) {
_, err := server.RegisterLink(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetMetrics", func(t *testing.T) {
_, err := server.GetMetrics(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("LoadBalance", func(t *testing.T) {
_, err := server.LoadBalance(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreateAlias", func(t *testing.T) {
_, err := server.CreateAlias(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DropAlias", func(t *testing.T) {
_, err := server.DropAlias(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("AlterAlias", func(t *testing.T) {
_, err := server.AlterAlias(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetCompactionState", func(t *testing.T) {
_, err := server.GetCompactionState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ManualCompaction", func(t *testing.T) {
_, err := server.ManualCompaction(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetCompactionStateWithPlans", func(t *testing.T) {
_, err := server.GetCompactionStateWithPlans(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreateCredential", func(t *testing.T) {
_, err := server.CreateCredential(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("UpdateCredential", func(t *testing.T) {
_, err := server.UpdateCredential(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DeleteCredential", func(t *testing.T) {
_, err := server.DeleteCredential(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ListCredUsers", func(t *testing.T) {
_, err := server.ListCredUsers(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("InvalidateCredentialCache", func(t *testing.T) {
_, err := server.InvalidateCredentialCache(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("UpdateCredentialCache", func(t *testing.T) {
_, err := server.UpdateCredentialCache(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreateRole", func(t *testing.T) {
_, err := server.CreateRole(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DropRole", func(t *testing.T) {
_, err := server.DropRole(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("OperateUserRole", func(t *testing.T) {
_, err := server.OperateUserRole(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("SelectRole", func(t *testing.T) {
_, err := server.SelectRole(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("SelectUser", func(t *testing.T) {
_, err := server.SelectUser(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("OperatePrivilege", func(t *testing.T) {
_, err := server.OperatePrivilege(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("SelectGrant", func(t *testing.T) {
_, err := server.SelectGrant(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("RefreshPrivilegeInfoCache", func(t *testing.T) {
_, err := server.RefreshPolicyInfoCache(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CheckHealth", func(t *testing.T) {
_, err := server.CheckHealth(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("RenameCollection", func(t *testing.T) {
_, err := server.RenameCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("CreateResourceGroup", func(t *testing.T) {
_, err := server.CreateResourceGroup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DropResourceGroup", func(t *testing.T) {
_, err := server.DropResourceGroup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("TransferNode", func(t *testing.T) {
_, err := server.TransferNode(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("TransferReplica", func(t *testing.T) {
_, err := server.TransferReplica(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("ListResourceGroups", func(t *testing.T) {
_, err := server.ListResourceGroups(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("DescribeResourceGroup", func(t *testing.T) {
_, err := server.DescribeResourceGroup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("FlushAll", func(t *testing.T) {
_, err := server.FlushAll(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetFlushAllState", func(t *testing.T) {
_, err := server.GetFlushAllState(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
// Update config and start server again to test with different config set.
// This works as config will be initialized only once
paramtable.Get().Save(proxy.Params.ProxyCfg.GinLogging.Key, "false")
err = runAndWaitForServerReady(server)
assert.Nil(t, err)
assert.NoError(t, err)
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServer_Check(t *testing.T) {
@ -1356,13 +1356,13 @@ func TestServer_Check(t *testing.T) {
req := &grpc_health_v1.HealthCheckRequest{Service: ""}
ret, err := server.Check(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
mockProxy.On("GetComponentStates", ctx).Return(nil, fmt.Errorf("mock grpc unexpected error")).Once()
ret, err = server.Check(ctx, req)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
componentInfo := &milvuspb.ComponentInfo{
@ -1378,22 +1378,22 @@ func TestServer_Check(t *testing.T) {
mockProxy.On("GetComponentStates", ctx).Return(componentState, nil)
ret, err = server.Check(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
status.ErrorCode = commonpb.ErrorCode_Success
ret, err = server.Check(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
componentInfo.StateCode = commonpb.StateCode_Initializing
ret, err = server.Check(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
componentInfo.StateCode = commonpb.StateCode_Healthy
ret, err = server.Check(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
}
@ -1409,14 +1409,14 @@ func TestServer_Watch(t *testing.T) {
err := server.Watch(req, watchServer)
ret := <-resultChan
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
mockProxy.On("GetComponentStates", ctx).Return(nil, fmt.Errorf("mock grpc unexpected error")).Once()
err = server.Watch(req, watchServer)
ret = <-resultChan
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
componentInfo := &milvuspb.ComponentInfo{
@ -1433,25 +1433,25 @@ func TestServer_Watch(t *testing.T) {
err = server.Watch(req, watchServer)
ret = <-resultChan
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
status.ErrorCode = commonpb.ErrorCode_Success
err = server.Watch(req, watchServer)
ret = <-resultChan
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
componentInfo.StateCode = commonpb.StateCode_Initializing
err = server.Watch(req, watchServer)
ret = <-resultChan
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, ret.Status)
componentInfo.StateCode = commonpb.StateCode_Healthy
err = server.Watch(req, watchServer)
ret = <-resultChan
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
}
@ -1460,9 +1460,9 @@ func Test_NewServer_HTTPServer_Enabled(t *testing.T) {
paramtable.Get().Save(proxy.Params.HTTPCfg.Enabled.Key, "true")
err := runAndWaitForServerReady(server)
assert.Nil(t, err)
assert.NoError(t, err)
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
defer func() {
e := recover()
@ -1478,7 +1478,7 @@ func getServer(t *testing.T) *Server {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.NotNil(t, server)
assert.Nil(t, err)
assert.NoError(t, err)
server.proxy = &MockProxy{}
server.rootCoordClient = &MockRootCoord{}
@ -1511,10 +1511,10 @@ func Test_NewServer_TLS_TwoWay(t *testing.T) {
paramtable.Get().Save(proxy.Params.HTTPCfg.Enabled.Key, "false")
err := runAndWaitForServerReady(server)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server.grpcExternalServer)
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_NewServer_TLS_OneWay(t *testing.T) {
@ -1527,10 +1527,10 @@ func Test_NewServer_TLS_OneWay(t *testing.T) {
paramtable.Get().Save(proxy.Params.HTTPCfg.Enabled.Key, "false")
err := runAndWaitForServerReady(server)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server.grpcExternalServer)
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_NewServer_TLS_FileNotExisted(t *testing.T) {
@ -1542,25 +1542,25 @@ func Test_NewServer_TLS_FileNotExisted(t *testing.T) {
paramtable.Get().Save(Params.ServerKeyPath.Key, "../../../configs/cert/server.key")
paramtable.Get().Save(proxy.Params.HTTPCfg.Enabled.Key, "false")
err := runAndWaitForServerReady(server)
assert.NotNil(t, err)
assert.Error(t, err)
server.Stop()
paramtable.Get().Save(Params.TLSMode.Key, "2")
paramtable.Get().Save(Params.ServerPemPath.Key, "../not/existed/server.pem")
paramtable.Get().Save(Params.CaPemPath.Key, "../../../configs/cert/ca.pem")
err = runAndWaitForServerReady(server)
assert.NotNil(t, err)
assert.Error(t, err)
server.Stop()
paramtable.Get().Save(Params.ServerPemPath.Key, "../../../configs/cert/server.pem")
paramtable.Get().Save(Params.CaPemPath.Key, "../not/existed/ca.pem")
err = runAndWaitForServerReady(server)
assert.NotNil(t, err)
assert.Error(t, err)
server.Stop()
paramtable.Get().Save(Params.CaPemPath.Key, "service.go")
err = runAndWaitForServerReady(server)
assert.NotNil(t, err)
assert.Error(t, err)
server.Stop()
}
@ -1570,7 +1570,7 @@ func Test_NewServer_GetVersion(t *testing.T) {
server := getServer(t)
resp, err := server.GetVersion(context.TODO(), req)
assert.Empty(t, resp.GetVersion())
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test get version failed", func(t *testing.T) {
@ -1579,6 +1579,6 @@ func Test_NewServer_GetVersion(t *testing.T) {
assert.NoError(t, err)
resp, err := server.GetVersion(context.TODO(), req)
assert.Equal(t, "v1", resp.GetVersion())
assert.Nil(t, err)
assert.NoError(t, err)
})
}

View File

@ -70,26 +70,26 @@ func Test_NewClient(t *testing.T) {
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.NoError(t, err)
client, err := NewClient(ctx, proxy.Params.EtcdCfg.MetaRootPath.GetValue(), etcdCli)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
checkFunc := func(retNotNil bool) {
retCheck := func(notNil bool, ret any, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -207,5 +207,5 @@ func Test_NewClient(t *testing.T) {
checkFunc(true)
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -110,7 +110,7 @@ func TestMain(m *testing.M) {
func Test_NewServer(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
mdc := &MockDataCoord{
@ -132,7 +132,7 @@ func Test_NewServer(t *testing.T) {
server.rootCoord = mrc
err = server.Run()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetComponentStates", func(t *testing.T) {
@ -147,7 +147,7 @@ func Test_NewServer(t *testing.T) {
req := &milvuspb.GetComponentStatesRequest{}
states, err := server.GetComponentStates(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.StateCode_Healthy, states.State.StateCode)
})
@ -159,7 +159,7 @@ func Test_NewServer(t *testing.T) {
}, nil,
)
resp, err := server.GetStatisticsChannel(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -171,7 +171,7 @@ func Test_NewServer(t *testing.T) {
}, nil,
)
resp, err := server.GetTimeTickChannel(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -182,7 +182,7 @@ func Test_NewServer(t *testing.T) {
}, nil,
)
resp, err := server.ShowCollections(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -190,48 +190,48 @@ func Test_NewServer(t *testing.T) {
t.Run("LoadCollection", func(t *testing.T) {
mqc.EXPECT().LoadCollection(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.LoadCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("ReleaseCollection", func(t *testing.T) {
mqc.EXPECT().ReleaseCollection(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.ReleaseCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("ShowPartitions", func(t *testing.T) {
mqc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&querypb.ShowPartitionsResponse{Status: successStatus}, nil)
resp, err := server.ShowPartitions(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("GetPartitionStates", func(t *testing.T) {
mqc.EXPECT().GetPartitionStates(mock.Anything, mock.Anything).Return(&querypb.GetPartitionStatesResponse{Status: successStatus}, nil)
resp, err := server.GetPartitionStates(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("LoadPartitions", func(t *testing.T) {
mqc.EXPECT().LoadPartitions(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.LoadPartitions(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("ReleasePartitions", func(t *testing.T) {
mqc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.ReleasePartitions(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
t.Run("GetTimeTickChannel", func(t *testing.T) {
mqc.EXPECT().GetTimeTickChannel(mock.Anything).Return(&milvuspb.StringResponse{Status: successStatus}, nil)
resp, err := server.GetTimeTickChannel(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, resp)
})
@ -239,7 +239,7 @@ func Test_NewServer(t *testing.T) {
req := &querypb.GetSegmentInfoRequest{}
mqc.EXPECT().GetSegmentInfo(mock.Anything, req).Return(&querypb.GetSegmentInfoResponse{Status: successStatus}, nil)
resp, err := server.GetSegmentInfo(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -247,7 +247,7 @@ func Test_NewServer(t *testing.T) {
req := &querypb.LoadBalanceRequest{}
mqc.EXPECT().LoadBalance(mock.Anything, req).Return(successStatus, nil)
resp, err := server.LoadBalance(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -257,7 +257,7 @@ func Test_NewServer(t *testing.T) {
}
mqc.EXPECT().GetMetrics(mock.Anything, req).Return(&milvuspb.GetMetricsResponse{Status: successStatus}, nil)
resp, err := server.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -265,35 +265,35 @@ func Test_NewServer(t *testing.T) {
mqc.EXPECT().CheckHealth(mock.Anything, mock.Anything).Return(
&milvuspb.CheckHealthResponse{Status: successStatus, IsHealthy: true}, nil)
ret, err := server.CheckHealth(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, ret.IsHealthy)
})
t.Run("CreateResourceGroup", func(t *testing.T) {
mqc.EXPECT().CreateResourceGroup(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.CreateResourceGroup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("DropResourceGroup", func(t *testing.T) {
mqc.EXPECT().DropResourceGroup(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.DropResourceGroup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("TransferNode", func(t *testing.T) {
mqc.EXPECT().TransferNode(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.TransferNode(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
t.Run("TransferReplica", func(t *testing.T) {
mqc.EXPECT().TransferReplica(mock.Anything, mock.Anything).Return(successStatus, nil)
resp, err := server.TransferReplica(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -301,19 +301,19 @@ func Test_NewServer(t *testing.T) {
req := &milvuspb.ListResourceGroupsRequest{}
mqc.EXPECT().ListResourceGroups(mock.Anything, req).Return(&milvuspb.ListResourceGroupsResponse{Status: successStatus}, nil)
resp, err := server.ListResourceGroups(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("DescribeResourceGroup", func(t *testing.T) {
mqc.EXPECT().DescribeResourceGroup(mock.Anything, mock.Anything).Return(&querypb.DescribeResourceGroupResponse{Status: successStatus}, nil)
resp, err := server.DescribeResourceGroup(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
// This test will no longer return error immediately.
@ -321,7 +321,7 @@ func TestServer_Run1(t *testing.T) {
t.Skip()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
mqc := getQueryCoord()
@ -331,13 +331,13 @@ func TestServer_Run1(t *testing.T) {
assert.Error(t, err)
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServer_Run2(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.queryCoord = getQueryCoord()
@ -346,7 +346,7 @@ func TestServer_Run2(t *testing.T) {
}
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func getQueryCoord() *types.MockQueryCoord {
@ -367,7 +367,7 @@ func getQueryCoord() *types.MockQueryCoord {
func TestServer_Run3(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.queryCoord = getQueryCoord()
server.rootCoord = &MockRootCoord{
@ -375,14 +375,14 @@ func TestServer_Run3(t *testing.T) {
}
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServer_Run4(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.queryCoord = getQueryCoord()
@ -392,13 +392,13 @@ func TestServer_Run4(t *testing.T) {
}
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServer_Run5(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.queryCoord = getQueryCoord()
@ -408,5 +408,5 @@ func TestServer_Run5(t *testing.T) {
}
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -35,17 +35,17 @@ func Test_NewClient(t *testing.T) {
ctx := context.Background()
client, err := NewClient(ctx, "")
assert.Nil(t, client)
assert.NotNil(t, err)
assert.Error(t, err)
client, err = NewClient(ctx, "test")
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
ctx, cancel := context.WithCancel(ctx)
@ -53,10 +53,10 @@ func Test_NewClient(t *testing.T) {
retCheck := func(notNil bool, ret any, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -158,5 +158,5 @@ func Test_NewClient(t *testing.T) {
checkFunc(false)
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -81,7 +81,7 @@ func TestMain(m *testing.M) {
func Test_NewServer(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
mockQN := types.NewMockQueryNode(t)
@ -96,7 +96,7 @@ func Test_NewServer(t *testing.T) {
t.Run("Run", func(t *testing.T) {
err = server.Run()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetComponentStates", func(t *testing.T) {
@ -106,7 +106,7 @@ func Test_NewServer(t *testing.T) {
}}, nil)
req := &milvuspb.GetComponentStatesRequest{}
states, err := server.GetComponentStates(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.StateCode_Healthy, states.State.StateCode)
})
@ -114,7 +114,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().GetStatisticsChannel(mock.Anything).Return(&milvuspb.StringResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}}, nil)
req := &internalpb.GetStatisticsChannelRequest{}
resp, err := server.GetStatisticsChannel(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -122,7 +122,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().GetTimeTickChannel(mock.Anything).Return(&milvuspb.StringResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}}, nil)
req := &internalpb.GetTimeTickChannelRequest{}
resp, err := server.GetTimeTickChannel(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -130,7 +130,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().WatchDmChannels(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
req := &querypb.WatchDmChannelsRequest{}
resp, err := server.WatchDmChannels(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -138,7 +138,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().LoadSegments(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
req := &querypb.LoadSegmentsRequest{}
resp, err := server.LoadSegments(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -146,7 +146,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().ReleaseCollection(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
req := &querypb.ReleaseCollectionRequest{}
resp, err := server.ReleaseCollection(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -154,7 +154,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().LoadPartitions(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
req := &querypb.LoadPartitionsRequest{}
resp, err := server.LoadPartitions(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -162,7 +162,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
req := &querypb.ReleasePartitionsRequest{}
resp, err := server.ReleasePartitions(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -170,7 +170,7 @@ func Test_NewServer(t *testing.T) {
mockQN.EXPECT().ReleaseSegments(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
req := &querypb.ReleaseSegmentsRequest{}
resp, err := server.ReleaseSegments(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
@ -179,7 +179,7 @@ func Test_NewServer(t *testing.T) {
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}}, nil)
req := &querypb.GetSegmentInfoRequest{}
resp, err := server.GetSegmentInfo(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -190,7 +190,7 @@ func Test_NewServer(t *testing.T) {
Request: "",
}
resp, err := server.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
@ -251,13 +251,13 @@ func Test_NewServer(t *testing.T) {
})
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_Run(t *testing.T) {
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
mockQN := types.NewMockQueryNode(t)

View File

@ -69,26 +69,26 @@ func Test_NewClient(t *testing.T) {
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.NoError(t, err)
client, err := NewClient(ctx, proxy.Params.EtcdCfg.MetaRootPath.GetValue(), etcdCli)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
err = client.Init()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Start()
assert.Nil(t, err)
assert.NoError(t, err)
err = client.Register()
assert.Nil(t, err)
assert.NoError(t, err)
checkFunc := func(retNotNil bool) {
retCheck := func(notNil bool, ret interface{}, err error) {
if notNil {
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
}
@ -292,7 +292,7 @@ func Test_NewClient(t *testing.T) {
retCheck := func(ret interface{}, err error) {
assert.Nil(t, ret)
assert.NotNil(t, err)
assert.Error(t, err)
}
{
rTimeout, err := client.GetComponentStates(shortCtx)
@ -452,5 +452,5 @@ func Test_NewClient(t *testing.T) {
}
// clean up
err = client.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -149,7 +149,7 @@ func TestRun(t *testing.T) {
rcServerConfig := &paramtable.Get().RootCoordGrpcServerCfg
paramtable.Get().Save(rcServerConfig.Port.Key, "1000000")
err := svr.Run()
assert.NotNil(t, err)
assert.Error(t, err)
assert.EqualError(t, err, "listen tcp: address 1000000: invalid port")
svr.newDataCoordClient = func(string, *clientv3.Client) types.DataCoord {
@ -176,33 +176,33 @@ func TestRun(t *testing.T) {
etcdConfig.EtcdTLSKey.GetValue(),
etcdConfig.EtcdTLSCACert.GetValue(),
etcdConfig.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
sessKey := path.Join(rootcoord.Params.EtcdCfg.MetaRootPath.GetValue(), sessionutil.DefaultServiceRoot)
_, err = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix())
assert.Nil(t, err)
assert.NoError(t, err)
err = svr.Run()
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("CheckHealth", func(t *testing.T) {
ret, err := svr.CheckHealth(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, ret.IsHealthy)
})
t.Run("RenameCollection", func(t *testing.T) {
_, err := svr.RenameCollection(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
})
err = svr.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServerRun_DataCoordClientInitErr(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.newDataCoordClient = func(string, *clientv3.Client) types.DataCoord {
@ -211,14 +211,14 @@ func TestServerRun_DataCoordClientInitErr(t *testing.T) {
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServerRun_DataCoordClientStartErr(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.newDataCoordClient = func(string, *clientv3.Client) types.DataCoord {
@ -227,14 +227,14 @@ func TestServerRun_DataCoordClientStartErr(t *testing.T) {
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServerRun_QueryCoordClientInitErr(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.newQueryCoordClient = func(string, *clientv3.Client) types.QueryCoord {
@ -243,14 +243,14 @@ func TestServerRun_QueryCoordClientInitErr(t *testing.T) {
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestServer_QueryCoordClientStartErr(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, server)
server.newQueryCoordClient = func(string, *clientv3.Client) types.QueryCoord {
@ -259,5 +259,5 @@ func TestServer_QueryCoordClientStartErr(t *testing.T) {
assert.Panics(t, func() { server.Run() })
err = server.Stop()
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -54,7 +54,7 @@ func TestIndexNodeSimple(t *testing.T) {
defer in.Stop()
ctx := context.TODO()
state, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Healthy)
@ -107,7 +107,7 @@ func TestIndexNodeSimple(t *testing.T) {
StorageConfig: genStorageConfig(),
}
status, err := in.CreateJob(ctx, createReq)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
})
@ -127,7 +127,7 @@ func TestIndexNodeSimple(t *testing.T) {
default:
time.Sleep(1 * time.Millisecond)
resp, err := in.QueryJobs(ctx, queryJob)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, resp.ClusterID, clusterID)
@ -153,7 +153,7 @@ func TestIndexNodeSimple(t *testing.T) {
}
jobNumRet, err := in.GetJobStats(ctx, &indexpb.GetJobStatsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, jobNumRet.Status.GetErrorCode(), commonpb.ErrorCode_Success)
assert.Equal(t, jobNumRet.TotalJobNum, int64(0))
assert.Equal(t, jobNumRet.InProgressJobNum, int64(0))
@ -173,7 +173,7 @@ func TestIndexNodeSimple(t *testing.T) {
ClusterID: clusterID,
BuildIDs: []int64{100001},
})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
})
}
@ -234,7 +234,7 @@ func TestIndexNodeComplex(t *testing.T) {
defer in.Stop()
ctx := context.TODO()
state, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Healthy)
@ -286,7 +286,7 @@ func TestIndexNodeComplex(t *testing.T) {
go func() {
defer testwg.Done()
status, err := in.CreateJob(ctx, req)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
}()
@ -298,7 +298,7 @@ func TestIndexNodeComplex(t *testing.T) {
ClusterID: clusterID,
BuildIDs: []int64{tasks[idx].buildID},
})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
}
}(i)
@ -313,7 +313,7 @@ Loop:
t.Fatal("timeout testing")
default:
jobNumRet, err := in.GetJobStats(ctx, &indexpb.GetJobStatsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, jobNumRet.Status.ErrorCode, commonpb.ErrorCode_Success)
if jobNumRet.TotalJobNum == 0 {
break Loop
@ -329,7 +329,7 @@ Loop:
ClusterID: clusterID,
BuildIDs: buildIDs,
})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, jobresp.Status.ErrorCode, jobresp.Status.ErrorCode)
for _, job := range jobresp.IndexInfos {
@ -357,31 +357,31 @@ Loop:
func TestAbnormalIndexNode(t *testing.T) {
in, err := NewMockIndexNodeComponent(context.TODO())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Nil(t, in.Stop())
ctx := context.TODO()
status, err := in.CreateJob(ctx, &indexpb.CreateJobRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
qresp, err := in.QueryJobs(ctx, &indexpb.QueryJobsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, qresp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
status, err = in.DropJobs(ctx, &indexpb.DropJobsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
jobNumRsp, err := in.GetJobStats(ctx, &indexpb.GetJobStatsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, jobNumRsp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
metricsResp, err := in.GetMetrics(ctx, &milvuspb.GetMetricsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, metricsResp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
configurationResp, err := in.ShowConfigurations(ctx, &internalpb.ShowConfigurationsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, configurationResp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
}
@ -391,10 +391,10 @@ func TestGetMetrics(t *testing.T) {
metricReq, _ = metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
)
in, err := NewMockIndexNodeComponent(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
defer in.Stop()
resp, err := in.GetMetrics(ctx, metricReq)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
t.Logf("Component: %s, Metrics: %s", resp.ComponentName, resp.Response)
}
@ -405,20 +405,20 @@ func TestGetMetricsError(t *testing.T) {
)
in, err := NewMockIndexNodeComponent(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
defer in.Stop()
errReq := &milvuspb.GetMetricsRequest{
Request: `{"metric_typ": "system_info"}`,
}
resp, err := in.GetMetrics(ctx, errReq)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
unsupportedReq := &milvuspb.GetMetricsRequest{
Request: `{"metric_type": "application_info"}`,
}
resp, err = in.GetMetrics(ctx, unsupportedReq)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, resp.Status.Reason, metricsinfo.MsgUnimplementedMetric)
}

View File

@ -33,7 +33,7 @@ import (
// )
// Params.Init()
// in, err := NewIndexNode(ctx, factory)
// assert.Nil(t, err)
// assert.NoError(t, err)
// in.SetEtcdClient(getEtcdClient())
// assert.Nil(t, in.initSession())
// assert.Nil(t, in.Register())
@ -42,7 +42,7 @@ import (
// key = fmt.Sprintf("%s-%d", key, in.session.ServerID)
// }
// resp, err := getEtcdClient().Get(ctx, path.Join(Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot, key))
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, int64(1), resp.Count)
// sess := &sessionutil.Session{}
// assert.Nil(t, json.Unmarshal(resp.Kvs[0].Value, sess))
@ -93,7 +93,7 @@ import (
// },
// }
// binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
// assert.Nil(t, err)
// assert.NoError(t, err)
// kvs := make(map[string][]byte, len(binLogs))
// paths := make([]string, 0, len(binLogs))
// for i, blob := range binLogs {
@ -102,7 +102,7 @@ import (
// kvs[key] = blob.Value[:]
// }
// err = in.chunkManager.MultiWrite(kvs)
// assert.Nil(t, err)
// assert.NoError(t, err)
//
// indexMeta := &indexpb.IndexMeta{
// IndexBuildID: indexBuildID1,
@ -111,9 +111,9 @@ import (
// }
//
// value, err := proto.Marshal(indexMeta)
// assert.Nil(t, err)
// assert.NoError(t, err)
// err = in.etcdKV.Save(metaPath1, string(value))
// assert.Nil(t, err)
// assert.NoError(t, err)
// req := &indexpb.CreateIndexRequest{
// IndexBuildID: indexBuildID1,
// IndexName: "FloatVector",
@ -151,19 +151,19 @@ import (
// assert.Nil(t, err3)
// indexMetaTmp := indexpb.IndexMeta{}
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// assert.NoError(t, err)
// for indexMetaTmp.State != commonpb.IndexState_Finished {
// time.Sleep(100 * time.Millisecond)
// strValue, err := in.etcdKV.Load(metaPath1)
// assert.Nil(t, err)
// assert.NoError(t, err)
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// assert.NoError(t, err)
// }
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFileKeys)
// defer func() {
// for k := range kvs {
// err = in.chunkManager.Remove(k)
// assert.Nil(t, err)
// assert.NoError(t, err)
// }
// }()
//
@ -208,7 +208,7 @@ import (
// },
// }
// binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
// assert.Nil(t, err)
// assert.NoError(t, err)
// kvs := make(map[string][]byte, len(binLogs))
// paths := make([]string, 0, len(binLogs))
// for i, blob := range binLogs {
@ -217,7 +217,7 @@ import (
// kvs[key] = blob.Value[:]
// }
// err = in.chunkManager.MultiWrite(kvs)
// assert.Nil(t, err)
// assert.NoError(t, err)
//
// indexMeta := &indexpb.IndexMeta{
// IndexBuildID: indexBuildID2,
@ -226,9 +226,9 @@ import (
// }
//
// value, err := proto.Marshal(indexMeta)
// assert.Nil(t, err)
// assert.NoError(t, err)
// err = in.etcdKV.Save(metaPath2, string(value))
// assert.Nil(t, err)
// assert.NoError(t, err)
// req := &indexpb.CreateIndexRequest{
// IndexBuildID: indexBuildID2,
// IndexName: "BinaryVector",
@ -262,19 +262,19 @@ import (
// assert.Nil(t, err3)
// indexMetaTmp := indexpb.IndexMeta{}
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// assert.NoError(t, err)
// for indexMetaTmp.State != commonpb.IndexState_Finished {
// time.Sleep(100 * time.Millisecond)
// strValue, err = in.etcdKV.Load(metaPath2)
// assert.Nil(t, err)
// assert.NoError(t, err)
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// assert.NoError(t, err)
// }
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFileKeys)
// defer func() {
// for k := range kvs {
// err = in.chunkManager.Remove(k)
// assert.Nil(t, err)
// assert.NoError(t, err)
// }
// }()
//
@ -320,7 +320,7 @@ import (
// },
// }
// binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
// assert.Nil(t, err)
// assert.NoError(t, err)
// kvs := make(map[string][]byte, len(binLogs))
// paths := make([]string, 0, len(binLogs))
// for i, blob := range binLogs {
@ -329,7 +329,7 @@ import (
// kvs[key] = blob.Value[:]
// }
// err = in.chunkManager.MultiWrite(kvs)
// assert.Nil(t, err)
// assert.NoError(t, err)
//
// indexMeta := &indexpb.IndexMeta{
// IndexBuildID: indexBuildID1,
@ -339,9 +339,9 @@ import (
// }
//
// value, err := proto.Marshal(indexMeta)
// assert.Nil(t, err)
// assert.NoError(t, err)
// err = in.etcdKV.Save(metaPath3, string(value))
// assert.Nil(t, err)
// assert.NoError(t, err)
// req := &indexpb.CreateIndexRequest{
// IndexBuildID: indexBuildID1,
// IndexName: "FloatVector",
@ -379,21 +379,21 @@ import (
// assert.Nil(t, err3)
// indexMetaTmp := indexpb.IndexMeta{}
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, true, indexMetaTmp.MarkDeleted)
// assert.Equal(t, int64(1), indexMetaTmp.IndexVersion)
// //for indexMetaTmp.State != commonpb.IndexState_Finished {
// // time.Sleep(100 * time.Millisecond)
// // strValue, err := in.etcdKV.Load(metaPath3)
// // assert.Nil(t, err)
// // assert.NoError(t, err)
// // err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// // assert.Nil(t, err)
// // assert.NoError(t, err)
// //}
// defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFileKeys)
// defer func() {
// for k := range kvs {
// err = in.chunkManager.Remove(k)
// assert.Nil(t, err)
// assert.NoError(t, err)
// }
// }()
//
@ -402,20 +402,20 @@ import (
//
// t.Run("GetComponentStates", func(t *testing.T) {
// resp, err := in.GetComponentStates(ctx)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, commonpb.StateCode_Healthy, resp.State.StateCode)
// })
//
// t.Run("GetTimeTickChannel", func(t *testing.T) {
// resp, err := in.GetTimeTickChannel(ctx)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// })
//
// t.Run("GetStatisticsChannel", func(t *testing.T) {
// resp, err := in.GetStatisticsChannel(ctx)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// })
//
@ -438,18 +438,18 @@ import (
//
// t.Run("GetMetrics_system_info", func(t *testing.T) {
// req, err := metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
// assert.Nil(t, err)
// assert.NoError(t, err)
// resp, err := in.GetMetrics(ctx, req)
// assert.Nil(t, err)
// assert.NoError(t, err)
// log.Info("GetMetrics_system_info",
// zap.String("resp", resp.Response),
// zap.String("name", resp.ComponentName))
// })
// err = in.etcdKV.RemoveWithPrefix("session/IndexNode")
// assert.Nil(t, err)
// assert.NoError(t, err)
//
// resp, err = getEtcdClient().Get(ctx, path.Join(Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot, in.session.ServerName))
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, resp.Count, int64(0))
//}
@ -464,26 +464,26 @@ func TestComponentState(t *testing.T) {
in := NewIndexNode(ctx, factory)
in.SetEtcdClient(getEtcdClient())
state, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Abnormal)
assert.Nil(t, in.Init())
state, err = in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Initializing)
assert.Nil(t, in.Start())
state, err = in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Healthy)
assert.Nil(t, in.Stop())
assert.Nil(t, in.Stop())
state, err = in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Abnormal)
}
@ -498,7 +498,7 @@ func TestGetTimeTickChannel(t *testing.T) {
Params.Init()
in := NewIndexNode(ctx, factory)
ret, err := in.GetTimeTickChannel(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, ret.Status.ErrorCode, commonpb.ErrorCode_Success)
}
@ -513,7 +513,7 @@ func TestGetStatisticChannel(t *testing.T) {
in := NewIndexNode(ctx, factory)
ret, err := in.GetStatisticsChannel(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, ret.Status.ErrorCode, commonpb.ErrorCode_Success)
}
@ -570,7 +570,7 @@ func TestInitErr(t *testing.T) {
// ctx = context.TODO()
// )
// in, err := NewIndexNode(ctx, factory)
// assert.Nil(t, err)
// assert.NoError(t, err)
// in.SetEtcdClient(getEtcdClient())
// assert.Error(t, in.Init())
}

View File

@ -261,7 +261,7 @@ func TestEmbedEtcd(te *testing.T) {
rootPath := "/etcd/test/root/LoadBytesWithRevision"
_metaKv, err := embed_etcd_kv.NewMetaKvFactory(rootPath, &param.EtcdCfg)
metaKv := _metaKv.(*embed_etcd_kv.EmbedEtcdKV)
assert.Nil(t, err)
assert.NoError(t, err)
defer metaKv.Close()
defer metaKv.RemoveWithPrefix("")
@ -306,7 +306,7 @@ func TestEmbedEtcd(te *testing.T) {
te.Run("EtcdKV MultiSaveAndMultiLoad", func(t *testing.T) {
rootPath := "/etcd/test/root/multi_save_and_multi_load"
metaKv, err := embed_etcd_kv.NewMetaKvFactory(rootPath, &param.EtcdCfg)
assert.Nil(t, err)
assert.NoError(t, err)
defer metaKv.Close()
defer metaKv.RemoveWithPrefix("")
@ -416,7 +416,7 @@ func TestEmbedEtcd(te *testing.T) {
rootPath := "/etcd/test/root/multi_save_and_multi_load"
_metaKv, err := embed_etcd_kv.NewMetaKvFactory(rootPath, &param.EtcdCfg)
metaKv := _metaKv.(*embed_etcd_kv.EmbedEtcdKV)
assert.Nil(t, err)
assert.NoError(t, err)
defer metaKv.Close()
defer metaKv.RemoveWithPrefix("")
@ -702,7 +702,7 @@ func TestEmbedEtcd(te *testing.T) {
te.Run("EtcdKV Watch", func(t *testing.T) {
rootPath := "/etcd/test/root/watch"
metaKv, err := embed_etcd_kv.NewMetaKvFactory(rootPath, &param.EtcdCfg)
assert.Nil(t, err)
assert.NoError(t, err)
defer metaKv.Close()
defer metaKv.RemoveWithPrefix("")
@ -720,7 +720,7 @@ func TestEmbedEtcd(te *testing.T) {
rootPath := "/etcd/test/root/revision_bytes"
_metaKv, err := embed_etcd_kv.NewMetaKvFactory(rootPath, &param.EtcdCfg)
metaKv := _metaKv.(*embed_etcd_kv.EmbedEtcdKV)
assert.Nil(t, err)
assert.NoError(t, err)
defer metaKv.Close()
defer metaKv.RemoveWithPrefix("")
@ -767,7 +767,7 @@ func TestEmbedEtcd(te *testing.T) {
te.Run("Etcd WalkWithPagination", func(t *testing.T) {
rootPath := "/etcd/test/root/walkWithPagination"
metaKv, err := embed_etcd_kv.NewMetaKvFactory(rootPath, &param.EtcdCfg)
assert.Nil(t, err)
assert.NoError(t, err)
defer metaKv.Close()
defer metaKv.RemoveWithPrefix("")

View File

@ -39,13 +39,13 @@ func TestRocksdbKV(t *testing.T) {
defer rocksdbKV.RemoveWithPrefix("")
err = rocksdbKV.Save("abc", "123")
assert.Nil(t, err)
assert.NoError(t, err)
err = rocksdbKV.SaveBytes("abcd", []byte("1234"))
assert.Nil(t, err)
assert.NoError(t, err)
val, err := rocksdbKV.Load("abc")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, val, "123")
value, err := rocksdbKV.LoadBytes("abc")
assert.NoError(t, err)
@ -57,7 +57,7 @@ func TestRocksdbKV(t *testing.T) {
assert.Error(t, err)
keys, vals, err := rocksdbKV.LoadWithPrefix("abc")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 2)
@ -78,21 +78,21 @@ func TestRocksdbKV(t *testing.T) {
assert.Equal(t, values[1], []byte("1234"))
err = rocksdbKV.Save("key_1", "123")
assert.Nil(t, err)
assert.NoError(t, err)
err = rocksdbKV.Save("key_2", "456")
assert.Nil(t, err)
assert.NoError(t, err)
err = rocksdbKV.Save("key_3", "789")
assert.Nil(t, err)
assert.NoError(t, err)
keys = []string{"key_1", "key_2"}
vals, err = rocksdbKV.MultiLoad(keys)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(vals), len(keys))
assert.Equal(t, vals[0], "123")
assert.Equal(t, vals[1], "456")
values, err = rocksdbKV.MultiLoadBytes(keys)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(values), len(keys))
assert.Equal(t, values[0], []byte("123"))
assert.Equal(t, values[1], []byte("456"))
@ -130,10 +130,10 @@ func TestRocksdbKV_Prefix(t *testing.T) {
"abddqqq": "1234555",
}
err = rocksdbKV.MultiSave(kvs)
assert.Nil(t, err)
assert.NoError(t, err)
keys, vals, err := rocksdbKV.LoadWithPrefix("abc")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), 1)
assert.Equal(t, len(vals), 1)
@ -147,46 +147,46 @@ func TestRocksdbKV_Prefix(t *testing.T) {
}
err = rocksdbKV.MultiSaveBytes(bytesKvs)
assert.Nil(t, err)
assert.NoError(t, err)
var values [][]byte
keys, values, err = rocksdbKV.LoadBytesWithPrefix("abc")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), 1)
assert.Equal(t, len(values), 1)
assert.Equal(t, keys[0], "abcd")
assert.Equal(t, values[0], []byte("123"))
err = rocksdbKV.RemoveWithPrefix("abc")
assert.Nil(t, err)
assert.NoError(t, err)
val, err := rocksdbKV.Load("abc")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(val), 0)
val, err = rocksdbKV.Load("abdd")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, val, "1234")
val, err = rocksdbKV.Load("abddqqq")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, val, "1234555")
// test remove ""
err = rocksdbKV.RemoveWithPrefix("")
assert.Nil(t, err)
assert.NoError(t, err)
// test remove from an empty cf
err = rocksdbKV.RemoveWithPrefix("")
assert.Nil(t, err)
assert.NoError(t, err)
val, err = rocksdbKV.Load("abddqqq")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(val), 0)
// test we can still save after drop
err = rocksdbKV.Save("abcd", "123")
assert.Nil(t, err)
assert.NoError(t, err)
val, err = rocksdbKV.Load("abcd")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, val, "123")
}
@ -261,7 +261,7 @@ func TestRocksdbKV_Txn(t *testing.T) {
func TestRocksdbKV_Goroutines(t *testing.T) {
name := "/tmp/rocksdb"
rocksdbkv, err := rocksdbkv.NewRocksdbKV(name)
assert.Nil(t, err)
assert.NoError(t, err)
defer os.RemoveAll(name)
defer rocksdbkv.Close()
defer rocksdbkv.RemoveWithPrefix("")
@ -274,10 +274,10 @@ func TestRocksdbKV_Goroutines(t *testing.T) {
key := "key_" + strconv.Itoa(i)
val := "val_" + strconv.Itoa(i)
err := rocksdbkv.Save(key, val)
assert.Nil(t, err)
assert.NoError(t, err)
getVal, err := rocksdbkv.Load(key)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, getVal, val)
}(i)
}
@ -287,7 +287,7 @@ func TestRocksdbKV_Goroutines(t *testing.T) {
func TestRocksdbKV_DummyDB(t *testing.T) {
name := "/tmp/rocksdb_dummy"
rocksdbkv, err := rocksdbkv.NewRocksdbKV(name)
assert.Nil(t, err)
assert.NoError(t, err)
defer os.RemoveAll(name)
defer rocksdbkv.Close()
defer rocksdbkv.RemoveWithPrefix("")
@ -322,7 +322,7 @@ func TestRocksdbKV_DummyDB(t *testing.T) {
func TestRocksdbKV_CornerCase(t *testing.T) {
name := "/tmp/rocksdb_corner"
rocksdbkv, err := rocksdbkv.NewRocksdbKV(name)
assert.Nil(t, err)
assert.NoError(t, err)
defer os.RemoveAll(name)
defer rocksdbkv.Close()
defer rocksdbkv.RemoveWithPrefix("")

View File

@ -35,7 +35,7 @@ func TestCollectionAlias_Insert(t *testing.T) {
// actual
err := aliasTestDb.Insert(collAliases)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestCollectionAlias_Insert_Error(t *testing.T) {
@ -75,7 +75,7 @@ func TestCollectionAlias_GetCollectionIDByName(t *testing.T) {
// actual
res, err := aliasTestDb.GetCollectionIDByAlias(tenantID, alias, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collID1, res)
}
@ -129,7 +129,7 @@ func TestCollectionAlias_ListCidTs(t *testing.T) {
// actual
res, err := aliasTestDb.ListCollectionIDTs(tenantID, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collAliases, res)
}
@ -177,7 +177,7 @@ func TestCollectionAlias_List(t *testing.T) {
// actual
res, err := aliasTestDb.List(tenantID, cidTsPairs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, out, res)
}

View File

@ -35,7 +35,7 @@ func TestCollectionChannel_GetByCollID(t *testing.T) {
// actual
res, err := channelTestDb.GetByCollectionID(tenantID, collID1, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collChannels, res)
}
@ -75,7 +75,7 @@ func TestCollectionChannel_Insert(t *testing.T) {
// actual
err := channelTestDb.Insert(collChannels)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestCollectionChannel_Insert_Error(t *testing.T) {

View File

@ -121,7 +121,7 @@ func TestCollection_GetCidTs_Ts0(t *testing.T) {
// actual
res, err := collTestDb.GetCollectionIDTs(tenantID, collID1, noTs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, res)
}
@ -141,7 +141,7 @@ func TestCollection_GetCidTs_TsNot0(t *testing.T) {
// actual
res, err := collTestDb.GetCollectionIDTs(tenantID, collID1, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, res)
}
@ -191,7 +191,7 @@ func TestCollection_ListCidTs_TsNot0(t *testing.T) {
// actual
res, err := collTestDb.ListCollectionIDTs(tenantID, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, res)
}
@ -229,7 +229,7 @@ func TestCollection_ListCidTs_Ts0(t *testing.T) {
// actual
res, err := collTestDb.ListCollectionIDTs(tenantID, noTs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, res)
}
@ -255,7 +255,7 @@ func TestCollection_Get(t *testing.T) {
// actual
res, err := collTestDb.Get(tenantID, collID1, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, res)
}
@ -319,7 +319,7 @@ func TestCollection_GetCollectionIDByName(t *testing.T) {
// actual
res, err := collTestDb.GetCollectionIDByName(tenantID, collectionName, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collID1, res)
}
@ -377,7 +377,7 @@ func TestCollection_Insert(t *testing.T) {
// actual
err := collTestDb.Insert(collection)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestCollection_Insert_Error(t *testing.T) {
@ -462,7 +462,7 @@ func Test_collectionDb_Update(t *testing.T) {
// actual
err := collTestDb.Update(collection)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("error", func(t *testing.T) {

View File

@ -38,7 +38,7 @@ func TestField_GetByCollID(t *testing.T) {
// actual
res, err := fieldTestDb.GetByCollectionID(tenantID, collID1, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fields, res)
}
@ -83,7 +83,7 @@ func TestField_Insert(t *testing.T) {
// actual
err := fieldTestDb.Insert(fields)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestField_Insert_Error(t *testing.T) {

View File

@ -35,7 +35,7 @@ func TestIndex_Get(t *testing.T) {
// actual
res, err := indexTestDb.Get(tenantID, collID1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, indexes, res)
}
@ -76,7 +76,7 @@ func TestIndex_List(t *testing.T) {
// actual
res, err := indexTestDb.List(tenantID)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, indexResults, res)
}
@ -120,7 +120,7 @@ func TestIndex_Insert(t *testing.T) {
// actual
err := indexTestDb.Insert(indexes)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestIndex_Insert_Error(t *testing.T) {
@ -170,7 +170,7 @@ func TestIndex_Update(t *testing.T) {
// actual
err := indexTestDb.Update(index)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestIndex_Update_Error(t *testing.T) {
@ -205,7 +205,7 @@ func TestIndex_MarkDeletedByCollID(t *testing.T) {
// actual
err := indexTestDb.MarkDeletedByCollectionID(tenantID, collID1)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestIndex_MarkDeletedByCollID_Error(t *testing.T) {
@ -231,7 +231,7 @@ func TestIndex_MarkDeletedByIdxID(t *testing.T) {
// actual
err := indexTestDb.MarkDeletedByIndexID(tenantID, indexID1)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestIndex_MarkDeletedByIdxID_Error(t *testing.T) {

View File

@ -33,7 +33,7 @@ func TestPartition_GetByCollID(t *testing.T) {
// actual
res, err := partitionTestDb.GetByCollectionID(tenantID, collID1, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, partitions, res)
}
@ -73,7 +73,7 @@ func TestPartition_Insert(t *testing.T) {
// actual
err := partitionTestDb.Insert(partitions)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestPartition_Insert_Error(t *testing.T) {

View File

@ -43,7 +43,7 @@ func TestSegmentIndex_Insert(t *testing.T) {
// actual
err := segIndexTestDb.Insert(segIndexes)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestSegmentIndex_Insert_Error(t *testing.T) {
@ -113,7 +113,7 @@ func TestSegmentIndex_Update(t *testing.T) {
// actual
err := segIndexTestDb.Update(segIndexes[0])
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestSegmentIndex_Upsert_Error(t *testing.T) {
@ -172,7 +172,7 @@ func TestSegmentIndex_MarkDeleted(t *testing.T) {
// actual
err := segIndexTestDb.MarkDeleted(tenantID, segIndexes)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestSegmentIndex_MarkDeleted_Error(t *testing.T) {
@ -205,7 +205,7 @@ func TestSegmentIndex_MarkDeletedByCollID(t *testing.T) {
// actual
err := segIndexTestDb.MarkDeletedByCollectionID(tenantID, collID1)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestSegmentIndex_MarkDeletedByCollID_Error(t *testing.T) {
@ -231,7 +231,7 @@ func TestSegmentIndex_MarkDeletedByBuildID(t *testing.T) {
// actual
err := segIndexTestDb.MarkDeletedByBuildID(tenantID, indexBuildID1)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestSegmentIndex_MarkDeletedByIdxID_Error(t *testing.T) {

View File

@ -30,7 +30,7 @@ func TestUser_GetByUsername(t *testing.T) {
// actual
res, err := userTestDb.GetByUsername(tenantID, username)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, user, res)
}
@ -85,7 +85,7 @@ func TestUser_ListUsername(t *testing.T) {
// actual
res, err := userTestDb.ListUser(tenantID)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 2, len(res))
assert.Equal(t, usernames[0], res[0].Username)
assert.Equal(t, usernames[1], res[1].Username)
@ -123,7 +123,7 @@ func TestUser_Insert(t *testing.T) {
// actual
err := userTestDb.Insert(user)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestUser_Insert_Error(t *testing.T) {
@ -161,7 +161,7 @@ func TestUser_MarkDeletedByUsername(t *testing.T) {
// actual
err := userTestDb.MarkDeletedByUsername(tenantID, username)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestUser_MarkDeletedByUsername_Error(t *testing.T) {
@ -192,7 +192,7 @@ func TestUser_UpdatePassword(t *testing.T) {
// actual
err := userTestDb.UpdatePassword(tenantID, username, encryptedPassword)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestUser_UpdatePassword_Error(t *testing.T) {

View File

@ -282,7 +282,7 @@ func Test_ListSegments(t *testing.T) {
catalog := NewCatalog(txn, rootPath, "")
err := catalog.AddSegment(context.TODO(), segment1)
assert.Nil(t, err)
assert.NoError(t, err)
txn.walkWithPrefix = func(prefix string, paginationSize int, fn func(k []byte, v []byte) error) error {
if strings.HasPrefix(k5, prefix) {
@ -305,7 +305,7 @@ func Test_ListSegments(t *testing.T) {
ret, err := catalog.ListSegments(context.TODO())
assert.NotNil(t, ret)
assert.Nil(t, err)
assert.NoError(t, err)
verifySegments(t, logID, ret)
})
@ -351,7 +351,7 @@ func Test_AddSegments(t *testing.T) {
catalog := NewCatalog(txn, rootPath, "")
err := catalog.AddSegment(context.TODO(), segment1)
assert.Nil(t, err)
assert.NoError(t, err)
adjustedSeg, err := catalog.LoadFromSegmentPath(segment1.CollectionID, segment1.PartitionID, segment1.ID)
assert.NoError(t, err)
// Check that num of rows is corrected from 100 to 5.
@ -400,10 +400,10 @@ func Test_AlterSegments(t *testing.T) {
catalog := NewCatalog(txn, rootPath, "")
err := catalog.AlterSegments(context.TODO(), []*datapb.SegmentInfo{})
assert.Nil(t, err)
assert.NoError(t, err)
err = catalog.AlterSegments(context.TODO(), []*datapb.SegmentInfo{segment1})
assert.Nil(t, err)
assert.NoError(t, err)
_, ok := savedKvs[k4]
assert.False(t, ok)
@ -433,7 +433,7 @@ func Test_AlterSegments(t *testing.T) {
catalog := NewCatalog(txn, rootPath, "")
err := catalog.AlterSegments(context.TODO(), []*datapb.SegmentInfo{})
assert.Nil(t, err)
assert.NoError(t, err)
var binlogXL []*datapb.FieldBinlog
for i := 0; i < 255; i++ {
@ -460,7 +460,7 @@ func Test_AlterSegments(t *testing.T) {
}
err = catalog.AlterSegments(context.TODO(), []*datapb.SegmentInfo{segmentXL})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 255+3, len(savedKvs))
assert.Equal(t, 3, opGroupCount)
@ -603,7 +603,7 @@ func Test_SaveDroppedSegmentsInBatch_MultiSave(t *testing.T) {
{
var segments []*datapb.SegmentInfo
err := catalog.SaveDroppedSegmentsInBatch(context.TODO(), segments)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, count)
assert.Equal(t, 0, kvSize)
}
@ -619,7 +619,7 @@ func Test_SaveDroppedSegmentsInBatch_MultiSave(t *testing.T) {
}
err := catalog.SaveDroppedSegmentsInBatch(context.TODO(), segments1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, count)
assert.Equal(t, 1, kvSize)
}
@ -638,7 +638,7 @@ func Test_SaveDroppedSegmentsInBatch_MultiSave(t *testing.T) {
count = 0
kvSize = 0
err := catalog.SaveDroppedSegmentsInBatch(context.TODO(), segments2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 2, count)
assert.Equal(t, 129, kvSize)
}

View File

@ -302,7 +302,7 @@ func Test_SuffixSnapshotLoad(t *testing.T) {
}
ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, ss)
defer ss.Close()
@ -310,22 +310,22 @@ func Test_SuffixSnapshotLoad(t *testing.T) {
vtso = typeutil.Timestamp(100 + i*5)
ts := ftso()
err = ss.Save("key", fmt.Sprintf("value-%d", i), ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, vtso, ts)
}
for i := 0; i < 20; i++ {
val, err := ss.Load("key", typeutil.Timestamp(100+i*5+2))
t.Log("ts:", typeutil.Timestamp(100+i*5+2), i, val)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("value-%d", i), val)
}
val, err := ss.Load("key", 0)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, "value-19", val)
for i := 0; i < 20; i++ {
val, err := ss.Load("key", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, val, fmt.Sprintf("value-%d", i))
}
@ -358,7 +358,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
}
ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, ss)
defer ss.Close()
@ -367,13 +367,13 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
vtso = typeutil.Timestamp(100 + i*5)
ts := ftso()
err = ss.MultiSave(saves, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, vtso, ts)
}
for i := 0; i < 20; i++ {
keys, vals, err := ss.LoadWithPrefix("k", typeutil.Timestamp(100+i*5+2))
t.Log(i, keys, vals)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 2)
assert.Equal(t, keys[0], "k1")
@ -382,7 +382,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
assert.Equal(t, vals[1], fmt.Sprintf("v2-%d", i))
}
keys, vals, err := ss.LoadWithPrefix("k", 0)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 2)
assert.Equal(t, keys[0], "k1")
@ -392,7 +392,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
for i := 0; i < 20; i++ {
keys, vals, err := ss.LoadWithPrefix("k", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 2)
assert.ElementsMatch(t, keys, []string{"k1", "k2"})
@ -400,9 +400,9 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) {
}
// mix non ts k-v
err = ss.Save("kextra", "extra-value", 0)
assert.Nil(t, err)
assert.NoError(t, err)
keys, vals, err = ss.LoadWithPrefix("k", typeutil.Timestamp(300))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 2)
assert.ElementsMatch(t, keys, []string{"k1", "k2"})
@ -615,7 +615,7 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) {
}
ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, ss)
defer ss.Close()
@ -623,7 +623,7 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) {
vtso = typeutil.Timestamp(100 + i*5)
ts := ftso()
err = ss.Save(fmt.Sprintf("kd-%04d", i), fmt.Sprintf("value-%d", i), ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, vtso, ts)
}
for i := 20; i < 40; i++ {
@ -632,49 +632,49 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) {
vtso = typeutil.Timestamp(100 + i*5)
ts := ftso()
err = ss.MultiSaveAndRemoveWithPrefix(sm, dm, ts)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, vtso, ts)
}
for i := 0; i < 20; i++ {
val, err := ss.Load(fmt.Sprintf("kd-%04d", i), typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("value-%d", i), val)
_, vals, err := ss.LoadWithPrefix("kd-", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, i+1, len(vals))
}
for i := 20; i < 40; i++ {
val, err := ss.Load("ks", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("value-%d", i), val)
_, vals, err := ss.LoadWithPrefix("kd-", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 39-i, len(vals))
}
for i := 0; i < 20; i++ {
val, err := ss.Load(fmt.Sprintf("kd-%04d", i), typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("value-%d", i), val)
_, vals, err := ss.LoadWithPrefix("kd-", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, i+1, len(vals))
}
for i := 20; i < 40; i++ {
val, err := ss.Load("ks", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("value-%d", i), val)
_, vals, err := ss.LoadWithPrefix("kd-", typeutil.Timestamp(100+i*5+2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 39-i, len(vals))
}
// try to load
_, err = ss.Load("kd-0000", 500)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = ss.Load("kd-0000", 0)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = ss.Load("kd-0000", 1)
assert.NotNil(t, err)
assert.Error(t, err)
// cleanup
ss.MultiSaveAndRemoveWithPrefix(map[string]string{}, []string{""}, 0)

View File

@ -36,7 +36,7 @@ func TestMain(m *testing.M) {
func TestClient(t *testing.T) {
client, err := NewClient(Options{})
assert.NotNil(t, client)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestClient_CreateProducer(t *testing.T) {
@ -192,7 +192,7 @@ func TestClient_SeekLatest(t *testing.T) {
Properties: map[string]string{},
}
id, err := producer.Send(msg)
assert.Nil(t, err)
assert.NoError(t, err)
msgChan := consumer1.Chan()
msgRead, ok := <-msgChan
@ -224,7 +224,7 @@ func TestClient_SeekLatest(t *testing.T) {
Payload: make([]byte, 8),
}
_, err = producer.Send(msg)
assert.Nil(t, err)
assert.NoError(t, err)
}
}
@ -265,7 +265,7 @@ func TestClient_consume(t *testing.T) {
Payload: make([]byte, 10),
}
id, err := producer.Send(msg)
assert.Nil(t, err)
assert.NoError(t, err)
msgChan := consumer.Chan()
msgConsume, ok := <-msgChan

View File

@ -29,24 +29,24 @@ func TestConsumer_newConsumer(t *testing.T) {
SubscriptionInitialPosition: mqwrapper.SubscriptionPositionEarliest,
})
assert.Nil(t, consumer)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, InvalidConfiguration, err.(*Error).Result())
consumer, err = newConsumer(newMockClient(), ConsumerOptions{})
assert.Nil(t, consumer)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, InvalidConfiguration, err.(*Error).Result())
consumer, err = getExistedConsumer(newMockClient(), ConsumerOptions{}, nil)
assert.Nil(t, consumer)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, InvalidConfiguration, err.(*Error).Result())
consumer, err = newConsumer(newMockClient(), ConsumerOptions{
Topic: newTopicName(),
})
assert.Nil(t, consumer)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, InvalidConfiguration, err.(*Error).Result())
/////////////////////////////////////////////////
@ -116,7 +116,7 @@ func TestConsumer_Subscription(t *testing.T) {
SubscriptionInitialPosition: mqwrapper.SubscriptionPositionEarliest,
})
assert.Nil(t, consumer)
assert.NotNil(t, err)
assert.Error(t, err)
//assert.Equal(t, consumerName, consumer.Subscription())
}
@ -143,5 +143,5 @@ func TestConsumer_Seek(t *testing.T) {
assert.NotNil(t, consumer)
err = consumer.Seek(0)
assert.NotNil(t, err)
assert.Error(t, err)
}

View File

@ -23,13 +23,13 @@ func TestProducer(t *testing.T) {
Topic: newTopicName(),
})
assert.Nil(t, producer)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, InvalidConfiguration, err.(*Error).Result())
// invalid produceroptions
producer, err = newProducer(newMockClient(), ProducerOptions{})
assert.Nil(t, producer)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, InvalidConfiguration, err.(*Error).Result())
}
@ -39,6 +39,6 @@ func TestProducerTopic(t *testing.T) {
Topic: topicName,
})
assert.Nil(t, producer)
assert.NotNil(t, err)
assert.Error(t, err)
//assert.Equal(t, topicName, producer.Topic())
}

View File

@ -64,7 +64,7 @@ func Test_InitRocksMQ(t *testing.T) {
groupName := "group_register"
_ = Rmq.DestroyConsumerGroup(topicName, groupName)
err = Rmq.CreateConsumerGroup(topicName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
consumer := &Consumer{
Topic: topicName,

View File

@ -184,7 +184,7 @@ func TestRocksmq_RegisterConsumer(t *testing.T) {
defer rmq.DestroyTopic(topicName)
err = rmq.CreateConsumerGroup(topicName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyConsumerGroup(topicName, groupName)
consumer := &Consumer{
@ -205,7 +205,7 @@ func TestRocksmq_RegisterConsumer(t *testing.T) {
pMsgs[0] = pMsgA
_, err = rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
rmq.Notify(topicName, groupName)
@ -237,12 +237,12 @@ func TestRocksmq_Basic(t *testing.T) {
defer os.RemoveAll(rocksdbPath)
paramtable.Init()
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName := "channel_rocks"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
msgA := "a_message"
@ -251,7 +251,7 @@ func TestRocksmq_Basic(t *testing.T) {
pMsgs[0] = pMsgA
_, err = rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
pMsgB := ProducerMessage{Payload: []byte("b_message"), Properties: map[string]string{common.TraceIDKey: "b"}}
pMsgC := ProducerMessage{Payload: []byte("c_message"), Properties: map[string]string{common.TraceIDKey: "c"}}
@ -259,7 +259,7 @@ func TestRocksmq_Basic(t *testing.T) {
pMsgs[0] = pMsgB
pMsgs = append(pMsgs, pMsgC)
_, err = rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
// before 2.2.0, there have no properties in ProducerMessage and ConsumerMessage in rocksmq
// it aims to test if produce before 2.2.0, but consume after 2.2.0
@ -269,17 +269,17 @@ func TestRocksmq_Basic(t *testing.T) {
tMsgs[0] = tMsgD
_, err = rmq.produceBefore(channelName, tMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
groupName := "test_group"
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
// double create consumer group
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Error(t, err)
cMsgs, err := rmq.Consume(channelName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 1)
assert.Equal(t, string(cMsgs[0].Payload), "a_message")
_, ok := cMsgs[0].Properties[common.TraceIDKey]
@ -287,7 +287,7 @@ func TestRocksmq_Basic(t *testing.T) {
assert.Equal(t, cMsgs[0].Properties[common.TraceIDKey], "a")
cMsgs, err = rmq.Consume(channelName, groupName, 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 2)
assert.Equal(t, string(cMsgs[0].Payload), "b_message")
_, ok = cMsgs[0].Properties[common.TraceIDKey]
@ -299,7 +299,7 @@ func TestRocksmq_Basic(t *testing.T) {
assert.Equal(t, cMsgs[1].Properties[common.TraceIDKey], "c")
cMsgs, err = rmq.Consume(channelName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 1)
assert.Equal(t, string(cMsgs[0].Payload), "d_message")
_, ok = cMsgs[0].Properties[common.TraceIDKey]
@ -322,12 +322,12 @@ func TestRocksmq_MultiConsumer(t *testing.T) {
params := paramtable.Get()
params.Save(params.RocksmqCfg.PageSize.Key, "10")
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName := "channel_rocks"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
msgNum := 10
@ -338,14 +338,14 @@ func TestRocksmq_MultiConsumer(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
for i := 0; i <= 10; i++ {
groupName := "group_" + strconv.Itoa(i)
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
consumer := &Consumer{
Topic: channelName,
@ -357,7 +357,7 @@ func TestRocksmq_MultiConsumer(t *testing.T) {
for i := 0; i <= 10; i++ {
groupName := "group_" + strconv.Itoa(i)
cMsgs, err := rmq.Consume(channelName, groupName, 10)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 10)
assert.Equal(t, string(cMsgs[0].Payload), "message_0")
}
@ -374,7 +374,7 @@ func TestRocksmq_Dummy(t *testing.T) {
defer os.RemoveAll(rocksdbPath)
paramtable.Init()
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
_, err = NewRocksMQ("", idAllocator)
@ -382,11 +382,11 @@ func TestRocksmq_Dummy(t *testing.T) {
channelName := "channel_a"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
// create topic twice should be ignored
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
channelName1 := "channel_dummy"
topicMu.Store(channelName1, new(sync.Mutex))
@ -444,7 +444,7 @@ func TestRocksmq_Seek(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
_, err = NewRocksMQ("", idAllocator)
@ -470,7 +470,7 @@ func TestRocksmq_Seek(t *testing.T) {
if i == 51 {
seekID2 = id[0]
}
assert.Nil(t, err)
assert.NoError(t, err)
}
groupName1 := "group_dummy"
@ -495,10 +495,10 @@ func TestRocksmq_Seek(t *testing.T) {
func TestRocksmq_Loop(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -512,13 +512,13 @@ func TestRocksmq_Loop(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
loopNum := 100
channelName := "channel_test"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
// Produce one message once
@ -528,7 +528,7 @@ func TestRocksmq_Loop(t *testing.T) {
pMsgs := make([]ProducerMessage, 1)
pMsgs[0] = pMsg
_, err := rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
}
// Produce loopNum messages once
@ -539,15 +539,15 @@ func TestRocksmq_Loop(t *testing.T) {
pMsgs[i] = pMsg
}
_, err = rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
// Consume loopNum message once
groupName := "test_group"
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs, err := rmq.Consume(channelName, groupName, loopNum)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), loopNum)
assert.Equal(t, string(cMsgs[0].Payload), "message_"+strconv.Itoa(0))
assert.Equal(t, string(cMsgs[loopNum-1].Payload), "message_"+strconv.Itoa(loopNum-1))
@ -555,23 +555,23 @@ func TestRocksmq_Loop(t *testing.T) {
// Consume one message once
for i := 0; i < loopNum; i++ {
oneMsgs, err := rmq.Consume(channelName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(oneMsgs), 1)
assert.Equal(t, string(oneMsgs[0].Payload), "message_"+strconv.Itoa(i+loopNum))
}
cMsgs, err = rmq.Consume(channelName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 0)
}
func TestRocksmq_Goroutines(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -584,13 +584,13 @@ func TestRocksmq_Goroutines(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
loopNum := 100
channelName := "channel_test"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
// Produce two message in each goroutine
@ -608,7 +608,7 @@ func TestRocksmq_Goroutines(t *testing.T) {
pMsgs[1] = pMsg1
ids, err := mq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
msgChan <- msg0
msgChan <- msg1
@ -618,14 +618,14 @@ func TestRocksmq_Goroutines(t *testing.T) {
groupName := "test_group"
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
// Consume one message in each goroutine
for i := 0; i < loopNum; i++ {
go func(group *sync.WaitGroup, mq RocksMQ) {
defer group.Done()
<-msgChan
cMsgs, err := mq.Consume(channelName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 1)
}(&wg, rmq)
}
@ -647,10 +647,10 @@ func TestRocksmq_Goroutines(t *testing.T) {
func TestRocksmq_Throughout(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -663,12 +663,12 @@ func TestRocksmq_Throughout(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName := "channel_throughout_test"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
entityNum := 100000
@ -678,7 +678,7 @@ func TestRocksmq_Throughout(t *testing.T) {
msg := "message_" + strconv.Itoa(i)
pMsg := ProducerMessage{Payload: []byte(msg)}
ids, err := rmq.Produce(channelName, []ProducerMessage{pMsg})
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, len(ids))
}
pt1 := time.Now().UnixNano() / int64(time.Millisecond)
@ -691,14 +691,14 @@ func TestRocksmq_Throughout(t *testing.T) {
groupName := "test_throughout_group"
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyConsumerGroup(groupName, channelName)
// Consume one message in each goroutine
ct0 := time.Now().UnixNano() / int64(time.Millisecond)
for i := 0; i < entityNum; i++ {
cMsgs, err := rmq.Consume(channelName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 1)
}
ct1 := time.Now().UnixNano() / int64(time.Millisecond)
@ -712,10 +712,10 @@ func TestRocksmq_Throughout(t *testing.T) {
func TestRocksmq_MultiChan(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -728,18 +728,18 @@ func TestRocksmq_MultiChan(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName0 := "chan01"
channelName1 := "chan11"
err = rmq.CreateTopic(channelName0)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName0)
err = rmq.CreateTopic(channelName1)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName1)
assert.Nil(t, err)
assert.NoError(t, err)
loopNum := 10
for i := 0; i < loopNum; i++ {
@ -748,17 +748,17 @@ func TestRocksmq_MultiChan(t *testing.T) {
pMsg0 := ProducerMessage{Payload: []byte(msg0)}
pMsg1 := ProducerMessage{Payload: []byte(msg1)}
_, err = rmq.Produce(channelName0, []ProducerMessage{pMsg0})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = rmq.Produce(channelName1, []ProducerMessage{pMsg1})
assert.Nil(t, err)
assert.NoError(t, err)
}
groupName := "test_group"
_ = rmq.DestroyConsumerGroup(channelName1, groupName)
err = rmq.CreateConsumerGroup(channelName1, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs, err := rmq.Consume(channelName1, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 1)
assert.Equal(t, string(cMsgs[0].Payload), "for_chann1_"+strconv.Itoa(0))
}
@ -766,10 +766,10 @@ func TestRocksmq_MultiChan(t *testing.T) {
func TestRocksmq_CopyData(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -782,51 +782,51 @@ func TestRocksmq_CopyData(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName0 := "test_chan01"
channelName1 := "test_chan11"
err = rmq.CreateTopic(channelName0)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName0)
err = rmq.CreateTopic(channelName1)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName1)
assert.Nil(t, err)
assert.NoError(t, err)
msg0 := "abcde"
pMsg0 := ProducerMessage{Payload: []byte(msg0)}
_, err = rmq.Produce(channelName0, []ProducerMessage{pMsg0})
assert.Nil(t, err)
assert.NoError(t, err)
pMsg1 := ProducerMessage{Payload: nil}
_, err = rmq.Produce(channelName1, []ProducerMessage{pMsg1})
assert.Nil(t, err)
assert.NoError(t, err)
pMsg2 := ProducerMessage{Payload: []byte{}}
_, err = rmq.Produce(channelName1, []ProducerMessage{pMsg2})
assert.Nil(t, err)
assert.NoError(t, err)
var emptyTargetData []byte
pMsg3 := ProducerMessage{Payload: emptyTargetData}
_, err = rmq.Produce(channelName1, []ProducerMessage{pMsg3})
assert.Nil(t, err)
assert.NoError(t, err)
groupName := "test_group"
_ = rmq.DestroyConsumerGroup(channelName0, groupName)
err = rmq.CreateConsumerGroup(channelName0, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs0, err := rmq.Consume(channelName0, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs0), 1)
assert.Equal(t, string(cMsgs0[0].Payload), msg0)
_ = rmq.DestroyConsumerGroup(channelName1, groupName)
err = rmq.CreateConsumerGroup(channelName1, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs1, err := rmq.Consume(channelName1, groupName, 3)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 3, len(cMsgs1))
assert.Equal(t, emptyTargetData, cMsgs1[0].Payload)
}
@ -834,10 +834,10 @@ func TestRocksmq_CopyData(t *testing.T) {
func TestRocksmq_SeekToLatest(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -850,12 +850,12 @@ func TestRocksmq_SeekToLatest(t *testing.T) {
paramtable.Init()
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName := "channel_test"
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelName)
loopNum := 100
@ -866,14 +866,14 @@ func TestRocksmq_SeekToLatest(t *testing.T) {
groupName := "group_test"
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
err = rmq.SeekToLatest(channelName, groupName)
assert.NoError(t, err)
channelNamePrev := "channel_tes"
err = rmq.CreateTopic(channelNamePrev)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(channelNamePrev)
pMsgs := make([]ProducerMessage, loopNum)
for i := 0; i < loopNum; i++ {
@ -882,18 +882,18 @@ func TestRocksmq_SeekToLatest(t *testing.T) {
pMsgs[i] = pMsg
}
_, err = rmq.Produce(channelNamePrev, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
// should hit the case where channel is null
err = rmq.SeekToLatest(channelName, groupName)
assert.NoError(t, err)
ids, err := rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
// able to read out
cMsgs, err := rmq.Consume(channelName, groupName, loopNum)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), loopNum)
for i := 0; i < loopNum; i++ {
assert.Equal(t, cMsgs[i].MsgID, ids[i])
@ -903,7 +903,7 @@ func TestRocksmq_SeekToLatest(t *testing.T) {
assert.NoError(t, err)
cMsgs, err = rmq.Consume(channelName, groupName, loopNum)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), 0)
pMsgs = make([]ProducerMessage, loopNum)
@ -913,11 +913,11 @@ func TestRocksmq_SeekToLatest(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err = rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
// make sure we only consume the latest message
cMsgs, err = rmq.Consume(channelName, groupName, loopNum)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(cMsgs), loopNum)
for i := 0; i < loopNum; i++ {
assert.Equal(t, cMsgs[i].MsgID, ids[i])
@ -927,10 +927,10 @@ func TestRocksmq_SeekToLatest(t *testing.T) {
func TestRocksmq_GetLatestMsg(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -941,21 +941,21 @@ func TestRocksmq_GetLatestMsg(t *testing.T) {
_ = os.RemoveAll(kvName)
defer os.RemoveAll(kvName)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
channelName := newChanName()
err = rmq.CreateTopic(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
// Consume loopNum message once
groupName := "last_msg_test"
_ = rmq.DestroyConsumerGroup(channelName, groupName)
err = rmq.CreateConsumerGroup(channelName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
msgID, err := rmq.GetLatestMsg(channelName)
assert.Equal(t, msgID, DefaultMessageID)
assert.Nil(t, err)
assert.NoError(t, err)
loopNum := 10
pMsgs1 := make([]ProducerMessage, loopNum)
@ -971,23 +971,23 @@ func TestRocksmq_GetLatestMsg(t *testing.T) {
}
ids, err := rmq.Produce(channelName, pMsgs1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(ids), loopNum)
// test latest msg when one topic is created
msgID, err = rmq.GetLatestMsg(channelName)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, msgID, ids[loopNum-1])
// test latest msg when two topics are created
channelName2 := newChanName()
err = rmq.CreateTopic(channelName2)
assert.Nil(t, err)
assert.NoError(t, err)
ids, err = rmq.Produce(channelName2, pMsgs2)
assert.Nil(t, err)
assert.NoError(t, err)
msgID, err = rmq.GetLatestMsg(channelName2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, msgID, ids[loopNum-1])
// test close rmq
@ -995,7 +995,7 @@ func TestRocksmq_GetLatestMsg(t *testing.T) {
rmq.Close()
msgID, err = rmq.GetLatestMsg(channelName)
assert.Equal(t, msgID, DefaultMessageID)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestRocksmq_CheckPreTopicValid(t *testing.T) {
@ -1009,7 +1009,7 @@ func TestRocksmq_CheckPreTopicValid(t *testing.T) {
defer os.RemoveAll(rocksdbPath)
paramtable.Init()
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
channelName1 := "topic1"
@ -1021,7 +1021,7 @@ func TestRocksmq_CheckPreTopicValid(t *testing.T) {
// topic is not empty
err = rmq.CreateTopic(channelName2)
defer rmq.DestroyTopic(channelName2)
assert.Nil(t, err)
assert.NoError(t, err)
topicMu.Store(channelName2, new(sync.Mutex))
pMsgs := make([]ProducerMessage, 10)
@ -1040,7 +1040,7 @@ func TestRocksmq_CheckPreTopicValid(t *testing.T) {
// pass
err = rmq.CreateTopic(channelName3)
defer rmq.DestroyTopic(channelName3)
assert.Nil(t, err)
assert.NoError(t, err)
topicMu.Store(channelName3, new(sync.Mutex))
err = rmq.CheckTopicValid(channelName3)
@ -1050,10 +1050,10 @@ func TestRocksmq_CheckPreTopicValid(t *testing.T) {
func TestRocksmq_Close(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
@ -1064,7 +1064,7 @@ func TestRocksmq_Close(t *testing.T) {
_ = os.RemoveAll(kvName)
defer os.RemoveAll(kvName)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
atomic.StoreInt64(&rmq.state, RmqStateStopped)
@ -1084,7 +1084,7 @@ func TestRocksmq_Close(t *testing.T) {
func TestRocksmq_SeekWithNoConsumerError(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
@ -1096,7 +1096,7 @@ func TestRocksmq_SeekWithNoConsumerError(t *testing.T) {
_ = os.RemoveAll(kvName)
defer os.RemoveAll(kvName)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
rmq.CreateTopic("test")
@ -1109,7 +1109,7 @@ func TestRocksmq_SeekWithNoConsumerError(t *testing.T) {
func TestRocksmq_SeekTopicNotExistError(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
@ -1121,7 +1121,7 @@ func TestRocksmq_SeekTopicNotExistError(t *testing.T) {
_ = os.RemoveAll(kvName)
defer os.RemoveAll(kvName)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
assert.Error(t, rmq.Seek("test_topic_not_exist", "", 0))
@ -1131,7 +1131,7 @@ func TestRocksmq_SeekTopicNotExistError(t *testing.T) {
func TestRocksmq_SeekTopicMutexError(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
@ -1143,7 +1143,7 @@ func TestRocksmq_SeekTopicMutexError(t *testing.T) {
_ = os.RemoveAll(kvName)
defer os.RemoveAll(kvName)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
topicMu.Store("test_topic_mutix_error", nil)
@ -1154,7 +1154,7 @@ func TestRocksmq_SeekTopicMutexError(t *testing.T) {
func TestRocksmq_moveConsumePosError(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
@ -1166,7 +1166,7 @@ func TestRocksmq_moveConsumePosError(t *testing.T) {
_ = os.RemoveAll(kvName)
defer os.RemoveAll(kvName)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
rmq.CreateTopic("test_moveConsumePos")
@ -1176,7 +1176,7 @@ func TestRocksmq_moveConsumePosError(t *testing.T) {
func TestRocksmq_updateAckedInfoErr(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
@ -1190,7 +1190,7 @@ func TestRocksmq_updateAckedInfoErr(t *testing.T) {
params := paramtable.Get()
params.Save(params.RocksmqCfg.PageSize.Key, "10")
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
topicName := "test_updateAckedInfo"
@ -1206,7 +1206,7 @@ func TestRocksmq_updateAckedInfoErr(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
groupName := "test"
@ -1235,7 +1235,7 @@ func TestRocksmq_updateAckedInfoErr(t *testing.T) {
func TestRocksmq_Info(t *testing.T) {
ep := etcdEndpoints()
etcdCli, err := etcd.GetRemoteEtcdClient(ep)
assert.Nil(t, err)
assert.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(etcdCli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := allocator.NewGlobalIDAllocator("dummy", etcdKV)
@ -1249,7 +1249,7 @@ func TestRocksmq_Info(t *testing.T) {
params := paramtable.Get()
params.Save(params.RocksmqCfg.PageSize.Key, "10")
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
topicName := "test_testinfo"
@ -1264,10 +1264,10 @@ func TestRocksmq_Info(t *testing.T) {
_ = rmq.DestroyConsumerGroup(topicName, groupName)
err = rmq.CreateConsumerGroup(topicName, groupName)
assert.Nil(t, err)
assert.NoError(t, err)
err = rmq.RegisterConsumer(consumer)
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, rmq.Info())

View File

@ -45,14 +45,14 @@ func TestRmqRetention_Basic(t *testing.T) {
params.Save(params.RocksmqCfg.PageSize.Key, "10")
params.Save(params.RocksmqCfg.TickerTimeInSeconds.Key, "2")
rmq, err := NewRocksMQ(rocksdbPath, nil)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
params.Save(params.RocksmqCfg.RetentionSizeInMB.Key, "0")
params.Save(params.RocksmqCfg.RetentionTimeInMinutes.Key, "0")
topicName := "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(topicName)
msgNum := 100
@ -63,7 +63,7 @@ func TestRmqRetention_Basic(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
groupName := "test_group"
@ -76,11 +76,11 @@ func TestRmqRetention_Basic(t *testing.T) {
}
rmq.RegisterConsumer(consumer)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs := make([]ConsumerMessage, 0)
for i := 0; i < msgNum; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), msgNum)
@ -90,9 +90,9 @@ func TestRmqRetention_Basic(t *testing.T) {
// Seek to a previous consumed message, the message should be clean up
err = rmq.ForceSeek(topicName, groupName, cMsgs[msgNum/2].MsgID)
assert.Nil(t, err)
assert.NoError(t, err)
newRes, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(newRes), 0)
// test acked size acked ts and other meta are updated as expect
@ -140,7 +140,7 @@ func TestRmqRetention_NotConsumed(t *testing.T) {
params.Save(params.RocksmqCfg.PageSize.Key, "10")
params.Save(params.RocksmqCfg.TickerTimeInSeconds.Key, "2")
rmq, err := NewRocksMQ(rocksdbPath, nil)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
params.Save(params.RocksmqCfg.RetentionSizeInMB.Key, "0")
@ -148,7 +148,7 @@ func TestRmqRetention_NotConsumed(t *testing.T) {
topicName := "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(topicName)
msgNum := 100
@ -159,7 +159,7 @@ func TestRmqRetention_NotConsumed(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
groupName := "test_group"
@ -172,11 +172,11 @@ func TestRmqRetention_NotConsumed(t *testing.T) {
}
rmq.RegisterConsumer(consumer)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs := make([]ConsumerMessage, 0)
for i := 0; i < 5; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), 5)
@ -194,9 +194,9 @@ func TestRmqRetention_NotConsumed(t *testing.T) {
// Seek to a previous consumed message, the message should be clean up
err = rmq.ForceSeek(topicName, groupName, cMsgs[1].MsgID)
assert.Nil(t, err)
assert.NoError(t, err)
newRes, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(newRes), 1)
assert.Equal(t, newRes[0].MsgID, id+4)
@ -251,7 +251,7 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
params.Save(params.RocksmqCfg.TickerTimeInSeconds.Key, "1")
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
// no retention by size
@ -261,7 +261,7 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
topicName := "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(topicName)
msgNum := 100
@ -272,12 +272,12 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
pMsgs[i] = pMsg
}
ids1, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids1))
topicName = "topic_b"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(topicName)
pMsgs = make([]ProducerMessage, msgNum)
for i := 0; i < msgNum; i++ {
@ -286,7 +286,7 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
pMsgs[i] = pMsg
}
ids2, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids2))
topicName = "topic_a"
@ -304,7 +304,7 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
cMsgs := make([]ConsumerMessage, 0)
for i := 0; i < msgNum; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), msgNum)
@ -313,9 +313,9 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
time.Sleep(time.Duration(3) * time.Second)
err = rmq.ForceSeek(topicName, groupName, ids1[10])
assert.Nil(t, err)
assert.NoError(t, err)
newRes, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(newRes), 0)
// test acked size acked ts and other meta are updated as expect
@ -378,7 +378,7 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
cMsgs = make([]ConsumerMessage, 0)
for i := 0; i < msgNum; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), msgNum)
@ -387,9 +387,9 @@ func TestRmqRetention_MultipleTopic(t *testing.T) {
time.Sleep(time.Duration(3) * time.Second)
err = rmq.ForceSeek(topicName, groupName, ids2[10])
assert.Nil(t, err)
assert.NoError(t, err)
newRes, err = rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(newRes), 0)
}
@ -415,26 +415,26 @@ func TestRetentionInfo_InitRetentionInfo(t *testing.T) {
params := paramtable.Get()
params.Init()
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, rmq)
topicName := "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
rmq.Close()
rmq, err = NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, rmq)
assert.Equal(t, rmq.isClosed(), false)
// write some data, restart and check.
topicName = "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
topicName = "topic_b"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
msgNum := 100
pMsgs := make([]ProducerMessage, msgNum)
@ -444,7 +444,7 @@ func TestRetentionInfo_InitRetentionInfo(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
rmq.Close()
@ -474,7 +474,7 @@ func TestRmqRetention_PageTimeExpire(t *testing.T) {
params.Save(params.RocksmqCfg.TickerTimeInSeconds.Key, "1")
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
// no retention by size
@ -484,7 +484,7 @@ func TestRmqRetention_PageTimeExpire(t *testing.T) {
topicName := "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(topicName)
msgNum := 100
@ -495,7 +495,7 @@ func TestRmqRetention_PageTimeExpire(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
groupName := "test_group"
@ -512,7 +512,7 @@ func TestRmqRetention_PageTimeExpire(t *testing.T) {
cMsgs := make([]ConsumerMessage, 0)
for i := 0; i < msgNum; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), msgNum)
@ -527,14 +527,14 @@ func TestRmqRetention_PageTimeExpire(t *testing.T) {
pMsgs2[i] = pMsg
}
ids2, err := rmq.Produce(topicName, pMsgs2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs2), len(ids2))
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = make([]ConsumerMessage, 0)
for i := 0; i < msgNum; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), msgNum)
@ -542,9 +542,9 @@ func TestRmqRetention_PageTimeExpire(t *testing.T) {
assert.Eventually(t, func() bool {
err = rmq.ForceSeek(topicName, groupName, ids[0])
assert.Nil(t, err)
assert.NoError(t, err)
newRes, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(newRes), 1)
// point to first not consumed messages
return newRes[0].MsgID == ids2[0]
@ -600,7 +600,7 @@ func TestRmqRetention_PageSizeExpire(t *testing.T) {
params.Save(params.RocksmqCfg.TickerTimeInSeconds.Key, "1")
rmq, err := NewRocksMQ(rocksdbPath, idAllocator)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.Close()
// no retention by size
@ -610,7 +610,7 @@ func TestRmqRetention_PageSizeExpire(t *testing.T) {
topicName := "topic_a"
err = rmq.CreateTopic(topicName)
assert.Nil(t, err)
assert.NoError(t, err)
defer rmq.DestroyTopic(topicName)
// need to be larger than 1M
@ -622,7 +622,7 @@ func TestRmqRetention_PageSizeExpire(t *testing.T) {
pMsgs[i] = pMsg
}
ids, err := rmq.Produce(topicName, pMsgs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(pMsgs), len(ids))
groupName := "test_group"
@ -635,11 +635,11 @@ func TestRmqRetention_PageSizeExpire(t *testing.T) {
}
rmq.RegisterConsumer(consumer)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs := make([]ConsumerMessage, 0)
for i := 0; i < msgNum; i++ {
cMsg, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
cMsgs = append(cMsgs, cMsg[0])
}
assert.Equal(t, len(cMsgs), msgNum)
@ -647,9 +647,9 @@ func TestRmqRetention_PageSizeExpire(t *testing.T) {
// wait for enough time for page expiration
time.Sleep(time.Duration(2) * time.Second)
err = rmq.ForceSeek(topicName, groupName, ids[0])
assert.Nil(t, err)
assert.NoError(t, err)
newRes, err := rmq.Consume(topicName, groupName, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(newRes), 1)
// make sure clean up happens
assert.True(t, newRes[0].MsgID > ids[0])

View File

@ -36,14 +36,14 @@ func TestRmsFactory(t *testing.T) {
ctx := context.Background()
_, err := rmsFactory.NewMsgStream(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = rmsFactory.NewTtMsgStream(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = rmsFactory.NewQueryMsgStream(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
err = rmsFactory.NewMsgStreamDisposer(ctx)([]string{"hello"}, "xx")
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -50,7 +50,7 @@ func TestMain(m *testing.M) {
func Test_NewRmqClient(t *testing.T) {
client, err := createRmqClient()
defer client.Close()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
}
@ -58,7 +58,7 @@ func TestRmqClient_CreateProducer(t *testing.T) {
opts := rocksmqimplclient.Options{}
client, err := NewClient(opts)
defer client.Close()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
topic := "TestRmqClient_CreateProducer"
@ -66,7 +66,7 @@ func TestRmqClient_CreateProducer(t *testing.T) {
producer, err := client.CreateProducer(proOpts)
defer producer.Close()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, producer)
rmqProducer := producer.(*rmqProducer)
@ -78,7 +78,7 @@ func TestRmqClient_CreateProducer(t *testing.T) {
Properties: nil,
}
_, err = rmqProducer.Send(context.TODO(), msg)
assert.Nil(t, err)
assert.NoError(t, err)
invalidOpts := mqwrapper.ProducerOptions{Topic: ""}
producer, e := client.CreateProducer(invalidOpts)
@ -88,13 +88,13 @@ func TestRmqClient_CreateProducer(t *testing.T) {
func TestRmqClient_GetLatestMsg(t *testing.T) {
client, err := createRmqClient()
assert.Nil(t, err)
assert.NoError(t, err)
defer client.Close()
topic := fmt.Sprintf("t2GetLatestMsg-%d", rand.Int())
proOpts := mqwrapper.ProducerOptions{Topic: topic}
producer, err := client.CreateProducer(proOpts)
assert.Nil(t, err)
assert.NoError(t, err)
defer producer.Close()
for i := 0; i < 10; i++ {
@ -103,7 +103,7 @@ func TestRmqClient_GetLatestMsg(t *testing.T) {
Properties: nil,
}
_, err = producer.Send(context.TODO(), msg)
assert.Nil(t, err)
assert.NoError(t, err)
}
subName := "subName"
@ -115,10 +115,10 @@ func TestRmqClient_GetLatestMsg(t *testing.T) {
}
consumer, err := client.Subscribe(consumerOpts)
assert.Nil(t, err)
assert.NoError(t, err)
expectLastMsg, err := consumer.GetLatestMsgID()
assert.Nil(t, err)
assert.NoError(t, err)
var actualLastMsg mqwrapper.Message
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
@ -135,21 +135,21 @@ func TestRmqClient_GetLatestMsg(t *testing.T) {
}
require.NotNil(t, actualLastMsg)
ret, err := expectLastMsg.LessOrEqualThan(actualLastMsg.ID().Serialize())
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, ret)
}
func TestRmqClient_Subscribe(t *testing.T) {
client, err := createRmqClient()
defer client.Close()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, client)
topic := "TestRmqClient_Subscribe"
proOpts := mqwrapper.ProducerOptions{Topic: topic}
producer, err := client.CreateProducer(proOpts)
defer producer.Close()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, producer)
subName := "subName"
@ -160,7 +160,7 @@ func TestRmqClient_Subscribe(t *testing.T) {
BufSize: 0,
}
consumer, err := client.Subscribe(consumerOpts)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, consumer)
consumerOpts = mqwrapper.ConsumerOptions{
@ -171,13 +171,13 @@ func TestRmqClient_Subscribe(t *testing.T) {
}
consumer, err = client.Subscribe(consumerOpts)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, consumer)
consumerOpts.Topic = topic
consumer, err = client.Subscribe(consumerOpts)
defer consumer.Close()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, consumer)
assert.Equal(t, consumer.Subscription(), subName)
@ -186,7 +186,7 @@ func TestRmqClient_Subscribe(t *testing.T) {
Properties: nil,
}
_, err = producer.Send(context.TODO(), msg)
assert.Nil(t, err)
assert.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
@ -222,13 +222,13 @@ func TestRmqClient_StringToMsgID(t *testing.T) {
str := "5"
res, err := client.StringToMsgID(str)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, res)
str = "X"
res, err = client.StringToMsgID(str)
assert.Nil(t, res)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestRmqClient_BytesToMsgID(t *testing.T) {
@ -239,7 +239,7 @@ func TestRmqClient_BytesToMsgID(t *testing.T) {
binary := pulsarwrapper.SerializePulsarMsgID(mid)
res, err := client.BytesToMsgID(binary)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, res)
}

View File

@ -54,15 +54,15 @@ func TestLessOrEqualThan(t *testing.T) {
}
ret, err := rid1.LessOrEqualThan(rid2.Serialize())
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, ret)
ret, err = rid2.LessOrEqualThan(rid1.Serialize())
assert.Nil(t, err)
assert.NoError(t, err)
assert.False(t, ret)
ret, err = rid1.LessOrEqualThan(rid1.Serialize())
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, ret)
}
@ -77,14 +77,14 @@ func Test_Equal(t *testing.T) {
{
ret, err := rid1.Equal(rid1.Serialize())
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, ret)
}
{
ret, err := rid1.Equal(rid2.Serialize())
assert.Nil(t, err)
assert.NoError(t, err)
assert.False(t, ret)
}
}

View File

@ -95,7 +95,7 @@ func Test_NewMqMsgStream(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
_, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
}(parameters[i].client)
}
}
@ -110,7 +110,7 @@ func TestMqMsgStream_AsProducer(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// empty channel name
m.AsProducer([]string{""})
@ -128,7 +128,7 @@ func TestMqMsgStream_AsConsumer(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// repeat calling AsConsumer
m.AsConsumer([]string{"a"}, "b", mqwrapper.SubscriptionPositionUnknown)
@ -146,7 +146,7 @@ func TestMqMsgStream_ComputeProduceChannelIndexes(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// empty parameters
reBucketValues := m.ComputeProduceChannelIndexes([]msgstream.TsMsg{})
@ -190,7 +190,7 @@ func TestMqMsgStream_GetProduceChannels(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// empty if not called AsProducer yet
chs := m.GetProduceChannels()
@ -213,7 +213,7 @@ func TestMqMsgStream_Produce(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// Produce before called AsProducer
insertMsg := &msgstream.InsertMsg{
@ -242,7 +242,7 @@ func TestMqMsgStream_Produce(t *testing.T) {
Msgs: []msgstream.TsMsg{insertMsg},
}
err = m.Produce(msgPack)
assert.NotNil(t, err)
assert.Error(t, err)
}(parameters[i].client)
}
}
@ -256,11 +256,11 @@ func TestMqMsgStream_Broadcast(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// Broadcast nil pointer
_, err = m.Broadcast(nil)
assert.NotNil(t, err)
assert.Error(t, err)
}(parameters[i].client)
}
}
@ -277,7 +277,7 @@ func TestMqMsgStream_Consume(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
m, err := msgstream.NewMqMsgStream(ctx, 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
wg.Add(1)
go func() {
@ -315,7 +315,7 @@ func TestMqMsgStream_Chan(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
ch := m.Chan()
assert.NotNil(t, ch)
@ -332,7 +332,7 @@ func TestMqMsgStream_SeekNotSubscribed(t *testing.T) {
for i := range parameters {
func(client mqwrapper.Client) {
m, err := msgstream.NewMqMsgStream(context.Background(), 100, 100, client, factory.NewUnmarshalDispatcher())
assert.Nil(t, err)
assert.NoError(t, err)
// seek in not subscribed channel
p := []*msgpb.MsgPosition{
@ -341,7 +341,7 @@ func TestMqMsgStream_SeekNotSubscribed(t *testing.T) {
},
}
err = m.Seek(p)
assert.NotNil(t, err)
assert.Error(t, err)
}(parameters[i].client)
}
}
@ -521,13 +521,13 @@ func TestStream_RmqTtMsgStream_DuplicatedIDs(t *testing.T) {
inputStream, outputStream := initRmqTtStream(ctx, producerChannels, consumerChannels, consumerSubName)
_, err := inputStream.Broadcast(&msgPack0)
assert.Nil(t, err)
assert.NoError(t, err)
err = inputStream.Produce(&msgPack1)
assert.Nil(t, err)
assert.NoError(t, err)
err = inputStream.Produce(&msgPack2)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = inputStream.Broadcast(&msgPack3)
assert.Nil(t, err)
assert.NoError(t, err)
receivedMsg := consumer(ctx, outputStream)
assert.Equal(t, len(receivedMsg.Msgs), 3)
@ -593,21 +593,21 @@ func TestStream_RmqTtMsgStream_Seek(t *testing.T) {
inputStream, outputStream := initRmqTtStream(ctx, producerChannels, consumerChannels, consumerSubName)
_, err := inputStream.Broadcast(&msgPack0)
assert.Nil(t, err)
assert.NoError(t, err)
err = inputStream.Produce(&msgPack1)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = inputStream.Broadcast(&msgPack2)
assert.Nil(t, err)
assert.NoError(t, err)
err = inputStream.Produce(&msgPack3)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = inputStream.Broadcast(&msgPack4)
assert.Nil(t, err)
assert.NoError(t, err)
err = inputStream.Produce(&msgPack5)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = inputStream.Broadcast(&msgPack6)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = inputStream.Broadcast(&msgPack7)
assert.Nil(t, err)
assert.NoError(t, err)
receivedMsg := consumer(ctx, outputStream)
assert.Equal(t, len(receivedMsg.Msgs), 2)
@ -681,7 +681,7 @@ func TestStream_RMqMsgStream_SeekInvalidMessage(t *testing.T) {
}
err := inputStream.Produce(msgPack)
assert.Nil(t, err)
assert.NoError(t, err)
var seekPosition *msgpb.MsgPosition
for i := 0; i < 10; i++ {
result := consumer(ctx, outputStream)
@ -708,14 +708,14 @@ func TestStream_RMqMsgStream_SeekInvalidMessage(t *testing.T) {
}
err = outputStream2.Seek(p)
assert.Nil(t, err)
assert.NoError(t, err)
for i := 10; i < 20; i++ {
insertMsg := getTsMsg(commonpb.MsgType_Insert, int64(i))
msgPack.Msgs = append(msgPack.Msgs, insertMsg)
}
err = inputStream.Produce(msgPack)
assert.Nil(t, err)
assert.NoError(t, err)
result := consumer(ctx, outputStream2)
assert.Equal(t, result.Msgs[0].ID(), int64(1))

View File

@ -35,7 +35,7 @@ func TestValidAuth(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
res = validAuth(ctx, []string{crypto.Base64Encode("mockUser:mockPass")})
assert.True(t, res)
@ -62,26 +62,26 @@ func TestAuthenticationInterceptor(t *testing.T) {
defer paramtable.Get().Reset(Params.CommonCfg.AuthorizationEnabled.Key) // mock authorization is turned on
// no metadata
_, err := AuthenticationInterceptor(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
// mock metacache
rootCoord := &MockRootCoordClientInterface{}
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err = InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
// with invalid metadata
md := metadata.Pairs("xxx", "yyy")
ctx = metadata.NewIncomingContext(ctx, md)
_, err = AuthenticationInterceptor(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
// with valid username/password
md = metadata.Pairs(util.HeaderAuthorize, crypto.Base64Encode("mockUser:mockPass"))
ctx = metadata.NewIncomingContext(ctx, md)
_, err = AuthenticationInterceptor(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
// with valid sourceId
md = metadata.Pairs("sourceid", crypto.Base64Encode(util.MemberCredID))
ctx = metadata.NewIncomingContext(ctx, md)
_, err = AuthenticationInterceptor(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -32,7 +32,7 @@ func Test_parseDummyRequestType(t *testing.T) {
// not in json format
notInJSONFormatStr := "not in json format string"
_, err = parseDummyRequestType(notInJSONFormatStr)
assert.NotNil(t, err)
assert.Error(t, err)
// only contain other field, in json format
otherField := "other_field"
@ -40,11 +40,11 @@ func Test_parseDummyRequestType(t *testing.T) {
m1 := make(map[string]string)
m1[otherField] = otherFieldValue
bs1, err := json.Marshal(m1)
assert.Nil(t, err)
assert.NoError(t, err)
log.Info("Test_parseDummyRequestType",
zap.String("json", string(bs1)))
ret1, err := parseDummyRequestType(string(bs1))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(ret1.RequestType))
// normal case
@ -53,11 +53,11 @@ func Test_parseDummyRequestType(t *testing.T) {
m2 := make(map[string]string)
m2[key] = value
bs2, err := json.Marshal(m2)
assert.Nil(t, err)
assert.NoError(t, err)
log.Info("Test_parseDummyRequestType",
zap.String("json", string(bs2)))
ret2, err := parseDummyRequestType(string(bs2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, value, ret2.RequestType)
// contain other field and request_type
@ -65,11 +65,11 @@ func Test_parseDummyRequestType(t *testing.T) {
m3[key] = value
m3[otherField] = otherFieldValue
bs3, err := json.Marshal(m3)
assert.Nil(t, err)
assert.NoError(t, err)
log.Info("Test_parseDummyRequestType",
zap.String("json", string(bs3)))
ret3, err := parseDummyRequestType(string(bs3))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, value, ret3.RequestType)
}
@ -79,7 +79,7 @@ func Test_parseDummyQueryRequest(t *testing.T) {
// not in json format
notInJSONFormatStr := "not in json format string"
_, err = parseDummyQueryRequest(notInJSONFormatStr)
assert.NotNil(t, err)
assert.Error(t, err)
// only contain other field, in json format
otherField := "other_field"
@ -89,9 +89,9 @@ func Test_parseDummyQueryRequest(t *testing.T) {
bs1, err := json.Marshal(m1)
log.Info("Test_parseDummyQueryRequest",
zap.String("json", string(bs1)))
assert.Nil(t, err)
assert.NoError(t, err)
ret1, err := parseDummyQueryRequest(string(bs1))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(ret1.RequestType))
assert.Equal(t, 0, len(ret1.DbName))
assert.Equal(t, 0, len(ret1.CollectionName))
@ -123,9 +123,9 @@ func Test_parseDummyQueryRequest(t *testing.T) {
bs2, err := json.Marshal(m2)
log.Info("Test_parseDummyQueryRequest",
zap.String("json", string(bs2)))
assert.Nil(t, err)
assert.NoError(t, err)
ret2, err := parseDummyQueryRequest(string(bs2))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, requestTypeValue, ret2.RequestType)
assert.Equal(t, dbNameValue, ret2.DbName)
assert.Equal(t, collectionNameValue, ret2.CollectionName)
@ -145,9 +145,9 @@ func Test_parseDummyQueryRequest(t *testing.T) {
bs3, err := json.Marshal(m3)
log.Info("Test_parseDummyQueryRequest",
zap.String("json", string(bs3)))
assert.Nil(t, err)
assert.NoError(t, err)
ret3, err := parseDummyQueryRequest(string(bs3))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, requestTypeValue, ret3.RequestType)
assert.Equal(t, dbNameValue, ret3.DbName)
assert.Equal(t, collectionNameValue, ret3.CollectionName)
@ -162,9 +162,9 @@ func Test_parseDummyQueryRequest(t *testing.T) {
bs4, err := json.Marshal(m4)
log.Info("Test_parseDummyQueryRequest",
zap.String("json", string(bs4)))
assert.Nil(t, err)
assert.NoError(t, err)
ret4, err := parseDummyQueryRequest(string(bs4))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, requestTypeValue, ret4.RequestType)
assert.Equal(t, dbNameValue, ret4.DbName)
assert.Equal(t, collectionNameValue, ret2.CollectionName)
@ -180,9 +180,9 @@ func Test_parseDummyQueryRequest(t *testing.T) {
bs5, err := json.Marshal(m5)
log.Info("Test_parseDummyQueryRequest",
zap.String("json", string(bs5)))
assert.Nil(t, err)
assert.NoError(t, err)
ret5, err := parseDummyQueryRequest(string(bs5))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, requestTypeValue, ret5.RequestType)
assert.Equal(t, dbNameValue, ret5.DbName)
assert.Equal(t, collectionNameValue, ret2.CollectionName)
@ -194,11 +194,11 @@ func Test_parseDummyQueryRequest(t *testing.T) {
// func TestParseDummyQueryRequest(t *testing.T) {
// invalidStr := `{"request_type":"query"`
// _, err := parseDummyQueryRequest(invalidStr)
// assert.NotNil(t, err)
// assert.Error(t, err)
// onlytypeStr := `{"request_type":"query"}`
// drr, err := parseDummyQueryRequest(onlytypeStr)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, drr.RequestType, "query")
// assert.Equal(t, len(drr.DbName), 0)
@ -211,7 +211,7 @@ func Test_parseDummyQueryRequest(t *testing.T) {
// "output_fields": ["_id", "age"]
// }`
// drr2, err := parseDummyQueryRequest(fulltypeStr)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, drr2.RequestType, "retrieve")
// assert.Equal(t, len(drr2.DbName), 0)
// assert.Equal(t, drr2.CollectionName, "test")

View File

@ -19,7 +19,7 @@ func TestInitHook(t *testing.T) {
paramtable.Get().Save(Params.ProxyCfg.SoPath.Key, "/a/b/hook.so")
err := initHook()
assert.NotNil(t, err)
assert.Error(t, err)
paramtable.Get().Save(Params.ProxyCfg.SoPath.Key, "")
}

View File

@ -213,17 +213,17 @@ func TestMetaCache_GetCollection(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
id, err := globalMetaCache.GetCollectionID(ctx, "collection1")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(1))
assert.Equal(t, rootCoord.GetAccessCount(), 1)
// should'nt be accessed to remote root coord.
schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.Equal(t, rootCoord.GetAccessCount(), 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -231,11 +231,11 @@ func TestMetaCache_GetCollection(t *testing.T) {
})
id, err = globalMetaCache.GetCollectionID(ctx, "collection2")
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(2))
schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection2")
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -245,11 +245,11 @@ func TestMetaCache_GetCollection(t *testing.T) {
// test to get from cache, this should trigger root request
id, err = globalMetaCache.GetCollectionID(ctx, "collection1")
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(1))
schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -264,17 +264,17 @@ func TestMetaCache_GetCollectionName(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
collection, err := globalMetaCache.GetCollectionName(ctx, 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, "collection1")
assert.Equal(t, rootCoord.GetAccessCount(), 1)
// should'nt be accessed to remote root coord.
schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.Equal(t, rootCoord.GetAccessCount(), 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -282,11 +282,11 @@ func TestMetaCache_GetCollectionName(t *testing.T) {
})
collection, err = globalMetaCache.GetCollectionName(ctx, 1)
assert.Equal(t, rootCoord.GetAccessCount(), 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, "collection1")
schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection2")
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -296,11 +296,11 @@ func TestMetaCache_GetCollectionName(t *testing.T) {
// test to get from cache, this should trigger root request
collection, err = globalMetaCache.GetCollectionName(ctx, 1)
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, collection, "collection1")
schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.Equal(t, rootCoord.GetAccessCount(), 2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -314,17 +314,17 @@ func TestMetaCache_GetCollectionFailure(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
rootCoord.Error = true
schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, schema)
rootCoord.Error = false
schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -333,7 +333,7 @@ func TestMetaCache_GetCollectionFailure(t *testing.T) {
rootCoord.Error = true
// should be cached with no error
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -347,13 +347,13 @@ func TestMetaCache_GetNonExistCollection(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
id, err := globalMetaCache.GetCollectionID(ctx, "collection3")
assert.NotNil(t, err)
assert.Error(t, err)
assert.Equal(t, id, int64(0))
schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection3")
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, schema)
}
@ -363,19 +363,19 @@ func TestMetaCache_GetPartitionID(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
id, err := globalMetaCache.GetPartitionID(ctx, "collection1", "par1")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(1))
id, err = globalMetaCache.GetPartitionID(ctx, "collection1", "par2")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(2))
id, err = globalMetaCache.GetPartitionID(ctx, "collection2", "par1")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(3))
id, err = globalMetaCache.GetPartitionID(ctx, "collection2", "par2")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, id, typeutil.UniqueID(4))
}
@ -385,7 +385,7 @@ func TestMetaCache_ConcurrentTest1(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
var wg sync.WaitGroup
cnt := 100
@ -394,7 +394,7 @@ func TestMetaCache_ConcurrentTest1(t *testing.T) {
for i := 0; i < cnt; i++ {
//GetCollectionSchema will never fail
schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection1")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, schema, &schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{},
@ -439,11 +439,11 @@ func TestMetaCache_GetPartitionError(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
mgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
// Test the case where ShowPartitionsResponse is not aligned
id, err := globalMetaCache.GetPartitionID(ctx, "errorCollection", "par1")
assert.NotNil(t, err)
assert.Error(t, err)
log.Debug(err.Error())
assert.Equal(t, id, typeutil.UniqueID(0))
@ -454,13 +454,13 @@ func TestMetaCache_GetPartitionError(t *testing.T) {
// Test non existed tables
id, err = globalMetaCache.GetPartitionID(ctx, "nonExisted", "par1")
assert.NotNil(t, err)
assert.Error(t, err)
log.Debug(err.Error())
assert.Equal(t, id, typeutil.UniqueID(0))
// Test non existed partition
id, err = globalMetaCache.GetPartitionID(ctx, "collection1", "par3")
assert.NotNil(t, err)
assert.Error(t, err)
log.Debug(err.Error())
assert.Equal(t, id, typeutil.UniqueID(0))
}
@ -613,7 +613,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
return nil, fmt.Errorf("mock error")
}
err := InitMetaCache(context.Background(), client, qc, mgr)
assert.NotNil(t, err)
assert.Error(t, err)
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
return &internalpb.ListPolicyResponse{
@ -624,7 +624,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
}, nil
}
err = InitMetaCache(context.Background(), client, qc, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GetPrivilegeInfo", func(t *testing.T) {
@ -638,7 +638,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
}, nil
}
err := InitMetaCache(context.Background(), client, qc, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
policyInfos := globalMetaCache.GetPrivilegeInfo(context.Background())
assert.Equal(t, 3, len(policyInfos))
roles := globalMetaCache.GetUserRole("foo")
@ -656,32 +656,32 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
}, nil
}
err := InitMetaCache(context.Background(), client, qc, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
err = globalMetaCache.RefreshPolicyInfo(typeutil.CacheOp{OpType: typeutil.CacheGrantPrivilege, OpKey: "policyX"})
assert.Nil(t, err)
assert.NoError(t, err)
policyInfos := globalMetaCache.GetPrivilegeInfo(context.Background())
assert.Equal(t, 4, len(policyInfos))
err = globalMetaCache.RefreshPolicyInfo(typeutil.CacheOp{OpType: typeutil.CacheRevokePrivilege, OpKey: "policyX"})
assert.Nil(t, err)
assert.NoError(t, err)
policyInfos = globalMetaCache.GetPrivilegeInfo(context.Background())
assert.Equal(t, 3, len(policyInfos))
err = globalMetaCache.RefreshPolicyInfo(typeutil.CacheOp{OpType: typeutil.CacheAddUserToRole, OpKey: funcutil.EncodeUserRoleCache("foo", "role3")})
assert.Nil(t, err)
assert.NoError(t, err)
roles := globalMetaCache.GetUserRole("foo")
assert.Equal(t, 3, len(roles))
err = globalMetaCache.RefreshPolicyInfo(typeutil.CacheOp{OpType: typeutil.CacheRemoveUserFromRole, OpKey: funcutil.EncodeUserRoleCache("foo", "role3")})
assert.Nil(t, err)
assert.NoError(t, err)
roles = globalMetaCache.GetUserRole("foo")
assert.Equal(t, 2, len(roles))
err = globalMetaCache.RefreshPolicyInfo(typeutil.CacheOp{OpType: typeutil.CacheGrantPrivilege, OpKey: ""})
assert.NotNil(t, err)
assert.Error(t, err)
err = globalMetaCache.RefreshPolicyInfo(typeutil.CacheOp{OpType: 100, OpKey: "policyX"})
assert.NotNil(t, err)
assert.Error(t, err)
})
}
@ -691,7 +691,7 @@ func TestMetaCache_RemoveCollection(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
shardMgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, shardMgr)
assert.Nil(t, err)
assert.NoError(t, err)
queryCoord.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{
@ -735,7 +735,7 @@ func TestMetaCache_ExpireShardLeaderCache(t *testing.T) {
queryCoord := &types.MockQueryCoord{}
shardMgr := newShardClientMgr()
err := InitMetaCache(ctx, rootCoord, queryCoord, shardMgr)
assert.Nil(t, err)
assert.NoError(t, err)
queryCoord.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{

View File

@ -70,10 +70,10 @@ func assertValidExpr(t *testing.T, schema *typeutil.SchemaHelper, exprStr string
func assertValidExprV2(t *testing.T, schema *typeutil.SchemaHelper, exprStr string) {
expr1, err := parseExpr(schema, exprStr)
assert.Nil(t, err)
assert.NoError(t, err)
expr2, err := planparserv2.ParseExpr(schema, exprStr)
assert.Nil(t, err)
assert.NoError(t, err)
if !planparserv2.CheckPredicatesIdentical(expr1, expr2) {
t.Log("expr: ", exprStr)
@ -98,15 +98,15 @@ func assertInvalidExpr(t *testing.T, schema *typeutil.SchemaHelper, exprStr stri
func assertValidSearchPlan(t *testing.T, schema *schemapb.CollectionSchema, exprStr string, vectorFieldName string, queryInfo *planpb.QueryInfo) {
_, err := createQueryPlan(schema, exprStr, vectorFieldName, queryInfo)
assert.Nil(t, err)
assert.NoError(t, err)
}
func assertValidSearchPlanV2(t *testing.T, schema *schemapb.CollectionSchema, exprStr string, vectorFieldName string, queryInfo *planpb.QueryInfo) {
planProto1, err := createQueryPlan(schema, exprStr, vectorFieldName, queryInfo)
assert.Nil(t, err)
assert.NoError(t, err)
planProto2, err := planparserv2.CreateSearchPlan(schema, exprStr, vectorFieldName, queryInfo)
assert.Nil(t, err)
assert.NoError(t, err)
expr1 := planProto1.GetVectorAnns().GetPredicates()
assert.NotNil(t, expr1)
@ -138,7 +138,7 @@ func assertInvalidSearchPlan(t *testing.T, schema *schemapb.CollectionSchema, ex
func TestParseExpr_Naive(t *testing.T) {
schemaPb := newTestSchema()
schema, err := typeutil.CreateSchemaHelper(schemaPb)
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("test UnaryNode", func(t *testing.T) {
exprStrs := []string{
@ -337,7 +337,7 @@ func TestParsePlanNode_Naive(t *testing.T) {
func TestExternalParser(t *testing.T) {
ast, err := ant_parser.Parse(`!(1 < a < 2 or b in [1, 2, 3]) or (c < 3 and b > 5) and (d > "str1" or d < "str2")`)
// NOTE: probe ast here via IDE
assert.Nil(t, err)
assert.NoError(t, err)
println(ast.Node.Location().Column)
}
@ -363,7 +363,7 @@ func TestExprPlan_Str(t *testing.T) {
// without filter
planProto, err := createQueryPlan(schema, "", "fakevec", queryInfo)
assert.Nil(t, err)
assert.NoError(t, err)
dbgStr := proto.MarshalTextString(planProto)
println(dbgStr)

View File

@ -28,20 +28,20 @@ func TestPrivilegeInterceptor(t *testing.T) {
DbName: "db_test",
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Authorization Enabled", func(t *testing.T) {
paramtable.Get().Save(Params.CommonCfg.AuthorizationEnabled.Key, "true")
_, err := PrivilegeInterceptor(ctx, &milvuspb.HasCollectionRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.LoadCollectionRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
ctx = GetContext(context.Background(), "alice:123456")
client := &MockRootCoordClientInterface{}
@ -71,71 +71,71 @@ func TestPrivilegeInterceptor(t *testing.T) {
DbName: "db_test",
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "root:123456"), &milvuspb.LoadCollectionRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
err = InitMetaCache(ctx, client, queryCoord, mgr)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.HasCollectionRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.LoadCollectionRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.GetLoadingProgressRequest{
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.GetLoadStateRequest{
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
fooCtx := GetContext(context.Background(), "foo:123456")
_, err = PrivilegeInterceptor(fooCtx, &milvuspb.LoadCollectionRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.InsertRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.UpsertRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
_, err = PrivilegeInterceptor(fooCtx, &milvuspb.GetLoadingProgressRequest{
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
_, err = PrivilegeInterceptor(fooCtx, &milvuspb.GetLoadStateRequest{
CollectionName: "col1",
})
assert.NotNil(t, err)
assert.Error(t, err)
_, err = PrivilegeInterceptor(ctx, &milvuspb.FlushRequest{
DbName: "db_test",
CollectionNames: []string{"col1"},
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.LoadCollectionRequest{
DbName: "db_test",
CollectionName: "col1",
})
assert.Nil(t, err)
assert.NoError(t, err)
g := sync.WaitGroup{}
for i := 0; i < 20; i++ {
@ -166,7 +166,7 @@ func TestResourceGroupPrivilege(t *testing.T) {
paramtable.Get().Save(Params.CommonCfg.AuthorizationEnabled.Key, "true")
_, err := PrivilegeInterceptor(ctx, &milvuspb.ListResourceGroupsRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
ctx = GetContext(context.Background(), "fooo:123456")
client := &MockRootCoordClientInterface{}
@ -196,26 +196,26 @@ func TestResourceGroupPrivilege(t *testing.T) {
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.CreateResourceGroupRequest{
ResourceGroup: "rg",
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.DropResourceGroupRequest{
ResourceGroup: "rg",
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.DescribeResourceGroupRequest{
ResourceGroup: "rg",
})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.ListResourceGroupsRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.TransferNodeRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = PrivilegeInterceptor(GetContext(context.Background(), "fooo:123456"), &milvuspb.TransferReplicaRequest{})
assert.Nil(t, err)
assert.NoError(t, err)
})
}

View File

@ -1675,7 +1675,7 @@ func TestProxy(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Healthy)
resp, err := proxy.Import(context.TODO(), req)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Nil(t, err)
assert.NoError(t, err)
// Wait a bit for complete import to start.
time.Sleep(2 * time.Second)
})
@ -3976,7 +3976,7 @@ func Test_GetCompactionState(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Healthy)
resp, err := proxy.GetCompactionState(context.TODO(), nil)
assert.EqualValues(t, &milvuspb.GetCompactionStateResponse{}, resp)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("get compaction state with unhealthy proxy", func(t *testing.T) {
@ -3985,7 +3985,7 @@ func Test_GetCompactionState(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := proxy.GetCompactionState(context.TODO(), nil)
assert.EqualValues(t, unhealthyStatus(), resp.Status)
assert.Nil(t, err)
assert.NoError(t, err)
})
}
@ -3996,7 +3996,7 @@ func Test_ManualCompaction(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Healthy)
resp, err := proxy.ManualCompaction(context.TODO(), nil)
assert.EqualValues(t, &milvuspb.ManualCompactionResponse{}, resp)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test manual compaction with unhealthy", func(t *testing.T) {
datacoord := &DataCoordMock{}
@ -4004,7 +4004,7 @@ func Test_ManualCompaction(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := proxy.ManualCompaction(context.TODO(), nil)
assert.EqualValues(t, unhealthyStatus(), resp.Status)
assert.Nil(t, err)
assert.NoError(t, err)
})
}
@ -4015,7 +4015,7 @@ func Test_GetCompactionStateWithPlans(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Healthy)
resp, err := proxy.GetCompactionStateWithPlans(context.TODO(), nil)
assert.EqualValues(t, &milvuspb.GetCompactionPlansResponse{}, resp)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test get compaction state with plans with unhealthy proxy", func(t *testing.T) {
datacoord := &DataCoordMock{}
@ -4023,7 +4023,7 @@ func Test_GetCompactionStateWithPlans(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := proxy.GetCompactionStateWithPlans(context.TODO(), nil)
assert.EqualValues(t, unhealthyStatus(), resp.Status)
assert.Nil(t, err)
assert.NoError(t, err)
})
}
@ -4034,7 +4034,7 @@ func Test_GetFlushState(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Healthy)
resp, err := proxy.GetFlushState(context.TODO(), nil)
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{}, resp)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test get flush state with unhealthy proxy", func(t *testing.T) {
@ -4043,7 +4043,7 @@ func Test_GetFlushState(t *testing.T) {
proxy.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := proxy.GetFlushState(context.TODO(), nil)
assert.EqualValues(t, unhealthyStatus(), resp.Status)
assert.Nil(t, err)
assert.NoError(t, err)
})
}
@ -4175,14 +4175,14 @@ func TestProxy_GetImportState(t *testing.T) {
resp, err := proxy.GetImportState(context.TODO(), req)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test get import state with unhealthy", func(t *testing.T) {
proxy := &Proxy{rootCoord: rootCoord}
proxy.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := proxy.GetImportState(context.TODO(), req)
assert.EqualValues(t, unhealthyStatus(), resp.Status)
assert.Nil(t, err)
assert.NoError(t, err)
})
}
@ -4196,14 +4196,14 @@ func TestProxy_ListImportTasks(t *testing.T) {
resp, err := proxy.ListImportTasks(context.TODO(), req)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("test list import tasks with unhealthy", func(t *testing.T) {
proxy := &Proxy{rootCoord: rootCoord}
proxy.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := proxy.ListImportTasks(context.TODO(), req)
assert.EqualValues(t, unhealthyStatus(), resp.Status)
assert.Nil(t, err)
assert.NoError(t, err)
})
}

View File

@ -30,7 +30,7 @@ func Test_insertRepackFunc(t *testing.T) {
// tsMsgs is empty
ret1, err := insertRepackFunc(nil, [][]int32{{1, 2}})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(ret1))
// hashKeys is empty
@ -39,17 +39,17 @@ func Test_insertRepackFunc(t *testing.T) {
&msgstream.InsertMsg{}, // not important
}
ret2, err := insertRepackFunc(tsMsgs2, nil)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, ret2)
// len(hashKeys) < len(tsMsgs), 1 < 2
ret2, err = insertRepackFunc(tsMsgs2, [][]int32{{1, 2}})
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, ret2)
// both tsMsgs & hashKeys are empty
ret3, err := insertRepackFunc(nil, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(ret3))
num := rand.Int()%100 + 1
@ -63,7 +63,7 @@ func Test_insertRepackFunc(t *testing.T) {
// len(hashKeys) = len(tsMsgs), but no hash key
hashKeys1 := make([][]int32, num)
ret4, err := insertRepackFunc(tsMsgs4, hashKeys1)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, ret4)
// all messages are shuffled to same bucket
@ -73,7 +73,7 @@ func Test_insertRepackFunc(t *testing.T) {
hashKeys2[i] = []int32{key}
}
ret5, err := insertRepackFunc(tsMsgs4, hashKeys2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, len(ret5))
assert.Equal(t, num, len(ret5[key].Msgs))
@ -83,7 +83,7 @@ func Test_insertRepackFunc(t *testing.T) {
hashKeys3[i] = []int32{int32(i)}
}
ret6, err := insertRepackFunc(tsMsgs4, hashKeys3)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(ret6))
for key := range ret6 {
assert.Equal(t, 1, len(ret6[key].Msgs))
@ -103,7 +103,7 @@ func Test_insertRepackFunc(t *testing.T) {
}
}
ret7, err := insertRepackFunc(tsMsgs4, hashKeys4)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(histogram), len(ret7))
for key := range ret7 {
assert.Equal(t, histogram[key], len(ret7[key].Msgs))
@ -115,7 +115,7 @@ func Test_defaultInsertRepackFunc(t *testing.T) {
// tsMsgs is empty
ret1, err := defaultInsertRepackFunc(nil, [][]int32{{1, 2}})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(ret1))
// hashKeys is empty
@ -124,17 +124,17 @@ func Test_defaultInsertRepackFunc(t *testing.T) {
&msgstream.InsertMsg{}, // not important
}
ret2, err := defaultInsertRepackFunc(tsMsgs2, nil)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, ret2)
// len(hashKeys) < len(tsMsgs), 1 < 2
ret2, err = defaultInsertRepackFunc(tsMsgs2, [][]int32{{1, 2}})
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, ret2)
// both tsMsgs & hashKeys are empty
ret3, err := defaultInsertRepackFunc(nil, nil)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(ret3))
num := rand.Int()%100 + 1
@ -148,7 +148,7 @@ func Test_defaultInsertRepackFunc(t *testing.T) {
// len(hashKeys) = len(tsMsgs), but no hash key
hashKeys1 := make([][]int32, num)
ret4, err := defaultInsertRepackFunc(tsMsgs4, hashKeys1)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, ret4)
// all messages are shuffled to same bucket
@ -158,7 +158,7 @@ func Test_defaultInsertRepackFunc(t *testing.T) {
hashKeys2[i] = []int32{key}
}
ret5, err := defaultInsertRepackFunc(tsMsgs4, hashKeys2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, len(ret5))
assert.Equal(t, num, len(ret5[key].Msgs))
@ -168,7 +168,7 @@ func Test_defaultInsertRepackFunc(t *testing.T) {
hashKeys3[i] = []int32{int32(i)}
}
ret6, err := defaultInsertRepackFunc(tsMsgs4, hashKeys3)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(ret6))
for key := range ret6 {
assert.Equal(t, 1, len(ret6[key].Msgs))
@ -188,7 +188,7 @@ func Test_defaultInsertRepackFunc(t *testing.T) {
}
}
ret7, err := defaultInsertRepackFunc(tsMsgs4, hashKeys4)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(histogram), len(ret7))
for key := range ret7 {
assert.Equal(t, histogram[key], len(ret7[key].Msgs))

View File

@ -93,7 +93,7 @@ func TestSegmentAllocator1(t *testing.T) {
dataCoord := &mockDataCoord{}
dataCoord.expireTime = Timestamp(1000)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick1)
assert.Nil(t, err)
assert.NoError(t, err)
wg := &sync.WaitGroup{}
segAllocator.Start()
@ -108,17 +108,17 @@ func TestSegmentAllocator1(t *testing.T) {
for i := 0; i < 10; i++ {
colName := collNames[i%2]
ret, err := segAllocator.GetSegmentID(1, 1, colName, 1, 1)
assert.Nil(t, err)
assert.NoError(t, err)
total += ret[1]
}
assert.Equal(t, uint32(10), total)
ret, err := segAllocator.GetSegmentID(1, 1, "abc", segCountPerRPC-10, 999)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, uint32(segCountPerRPC-10), ret[1])
_, err = segAllocator.GetSegmentID(1, 1, "abc", 10, 1001)
assert.NotNil(t, err)
assert.Error(t, err)
wg.Wait()
}
@ -139,7 +139,7 @@ func TestSegmentAllocator2(t *testing.T) {
dataCoord := &mockDataCoord{}
dataCoord.expireTime = Timestamp(500)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick2)
assert.Nil(t, err)
assert.NoError(t, err)
wg := &sync.WaitGroup{}
segAllocator.Start()
@ -152,13 +152,13 @@ func TestSegmentAllocator2(t *testing.T) {
total := uint32(0)
for i := 0; i < 10; i++ {
ret, err := segAllocator.GetSegmentID(1, 1, "abc", 1, 200)
assert.Nil(t, err)
assert.NoError(t, err)
total += ret[1]
}
assert.Equal(t, uint32(10), total)
time.Sleep(50 * time.Millisecond)
_, err = segAllocator.GetSegmentID(1, 1, "abc", segCountPerRPC-10, getLastTick2())
assert.NotNil(t, err)
assert.Error(t, err)
wg.Wait()
}
@ -168,7 +168,7 @@ func TestSegmentAllocator3(t *testing.T) {
dataCoord := &mockDataCoord2{}
dataCoord.expireTime = Timestamp(500)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick2)
assert.Nil(t, err)
assert.NoError(t, err)
wg := &sync.WaitGroup{}
segAllocator.Start()
@ -180,7 +180,7 @@ func TestSegmentAllocator3(t *testing.T) {
}(wg)
time.Sleep(50 * time.Millisecond)
_, err = segAllocator.GetSegmentID(1, 1, "abc", 10, 100)
assert.NotNil(t, err)
assert.Error(t, err)
wg.Wait()
}
@ -226,7 +226,7 @@ func TestSegmentAllocator4(t *testing.T) {
dataCoord := &mockDataCoord3{}
dataCoord.expireTime = Timestamp(500)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick2)
assert.Nil(t, err)
assert.NoError(t, err)
wg := &sync.WaitGroup{}
segAllocator.Start()
@ -238,7 +238,7 @@ func TestSegmentAllocator4(t *testing.T) {
}(wg)
time.Sleep(50 * time.Millisecond)
_, err = segAllocator.GetSegmentID(1, 1, "abc", 10, 100)
assert.NotNil(t, err)
assert.Error(t, err)
wg.Wait()
}
@ -261,7 +261,7 @@ func TestSegmentAllocator5(t *testing.T) {
dataCoord := &mockDataCoord5{}
dataCoord.expireTime = Timestamp(500)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick2)
assert.Nil(t, err)
assert.NoError(t, err)
wg := &sync.WaitGroup{}
segAllocator.Start()
@ -273,7 +273,7 @@ func TestSegmentAllocator5(t *testing.T) {
}(wg)
time.Sleep(50 * time.Millisecond)
_, err = segAllocator.GetSegmentID(1, 1, "abc", 10, 100)
assert.NotNil(t, err)
assert.Error(t, err)
wg.Wait()
}
@ -282,7 +282,7 @@ func TestSegmentAllocator6(t *testing.T) {
dataCoord := &mockDataCoord{}
dataCoord.expireTime = Timestamp(500)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick2)
assert.Nil(t, err)
assert.NoError(t, err)
wg := &sync.WaitGroup{}
segAllocator.Start()

View File

@ -42,7 +42,7 @@ func TestCalcDistanceTask_arrangeVectorsByStrID(t *testing.T) {
}
result, err := task.arrangeVectorsByStrID(inputIds, sequence, retrievedVectors)
assert.Nil(t, err)
assert.NoError(t, err)
floatResult := result.GetFloatVector().GetData()
for i := 0; i < 3; i++ {
@ -64,7 +64,7 @@ func TestCalcDistanceTask_arrangeVectorsByStrID(t *testing.T) {
}
result, err = task.arrangeVectorsByStrID(inputIds, sequence, retrievedVectors)
assert.Nil(t, err)
assert.NoError(t, err)
binaryResult := result.GetBinaryVector()
numBytes := dim / 8
@ -105,7 +105,7 @@ func TestCalcDistanceTask_arrangeVectorsByIntID(t *testing.T) {
}
result, err := task.arrangeVectorsByIntID(inputIds, sequence, retrievedVectors)
assert.Nil(t, err)
assert.NoError(t, err)
floatResult := result.GetFloatVector().GetData()
for i := 0; i < 3; i++ {
@ -127,7 +127,7 @@ func TestCalcDistanceTask_arrangeVectorsByIntID(t *testing.T) {
}
result, err = task.arrangeVectorsByIntID(inputIds, sequence, retrievedVectors)
assert.Nil(t, err)
assert.NoError(t, err)
binaryResult := result.GetBinaryVector()
numBytes := dim / 8
@ -159,7 +159,7 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
// left-op empty
calcResult, err := task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
request = &milvuspb.CalcDistanceRequest{
@ -176,7 +176,7 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
// left-op query error
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
fieldIds := make([]int64, 0)
@ -235,7 +235,7 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
task.queryFunc = queryFunc
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
idArray := &milvuspb.VectorsArray{
@ -262,13 +262,13 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
// success
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, calcResult.Status.ErrorCode)
// right-op query error
request.OpRight = nil
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
request.OpRight = &milvuspb.VectorsArray{
@ -288,7 +288,7 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
// right-op arrange error
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
request.OpRight = &milvuspb.VectorsArray{
@ -301,7 +301,7 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
// different dimension
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
request.OpRight = &milvuspb.VectorsArray{
@ -319,7 +319,7 @@ func TestCalcDistanceTask_ExecuteFloat(t *testing.T) {
// calcdistance return error
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
}
@ -407,7 +407,7 @@ func TestCalcDistanceTask_ExecuteBinary(t *testing.T) {
// success
calcResult, err := task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, calcResult.Status.ErrorCode)
floatArray := &milvuspb.VectorsArray{
@ -438,7 +438,7 @@ func TestCalcDistanceTask_ExecuteBinary(t *testing.T) {
// float vs binary
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
request = &milvuspb.CalcDistanceRequest{
@ -451,7 +451,7 @@ func TestCalcDistanceTask_ExecuteBinary(t *testing.T) {
// hamming
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, calcResult.Status.ErrorCode)
request = &milvuspb.CalcDistanceRequest{
@ -464,7 +464,7 @@ func TestCalcDistanceTask_ExecuteBinary(t *testing.T) {
// tanimoto
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, calcResult.Status.ErrorCode)
request = &milvuspb.CalcDistanceRequest{
@ -486,6 +486,6 @@ func TestCalcDistanceTask_ExecuteBinary(t *testing.T) {
// hamming error
calcResult, err = task.Execute(ctx, request)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, calcResult.Status.ErrorCode)
}

View File

@ -34,7 +34,7 @@ func TestRoundRobinPolicy(t *testing.T) {
querier.init()
err = RoundRobinPolicy(ctx, mgr, querier.query, shard2leaders)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, querier.records(), map[UniqueID][]string{0: {"c0", "c2"}, 1: {"c1", "c3"}})
mockerr := fmt.Errorf("mock query node error")
@ -42,7 +42,7 @@ func TestRoundRobinPolicy(t *testing.T) {
querier.failset[0] = mockerr
err = RoundRobinPolicy(ctx, mgr, querier.query, shard2leaders)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, querier.records(), map[int64][]string{1: {"c0", "c1", "c3"}, 2: {"c2"}})
querier.init()

View File

@ -103,11 +103,11 @@ func TestBaseTaskQueue(t *testing.T) {
queue.setMaxTaskNum(10) // not accurate, full also means utBufChan block
for i := 0; i < int(queue.getMaxTaskNum()); i++ {
err = queue.Enqueue(newDefaultMockTask())
assert.Nil(t, err)
assert.NoError(t, err)
}
assert.True(t, queue.utFull())
err = queue.Enqueue(newDefaultMockTask())
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestDdTaskQueue(t *testing.T) {
@ -180,11 +180,11 @@ func TestDdTaskQueue(t *testing.T) {
queue.setMaxTaskNum(10) // not accurate, full also means utBufChan block
for i := 0; i < int(queue.getMaxTaskNum()); i++ {
err = queue.Enqueue(newDefaultMockDdlTask())
assert.Nil(t, err)
assert.NoError(t, err)
}
assert.True(t, queue.utFull())
err = queue.Enqueue(newDefaultMockDdlTask())
assert.NotNil(t, err)
assert.Error(t, err)
}
// test the logic of queue
@ -257,11 +257,11 @@ func TestDmTaskQueue_Basic(t *testing.T) {
queue.setMaxTaskNum(10) // not accurate, full also means utBufChan block
for i := 0; i < int(queue.getMaxTaskNum()); i++ {
err = queue.Enqueue(newDefaultMockDmlTask())
assert.Nil(t, err)
assert.NoError(t, err)
}
assert.True(t, queue.utFull())
err = queue.Enqueue(newDefaultMockDmlTask())
assert.NotNil(t, err)
assert.Error(t, err)
}
// test the timestamp statistics
@ -354,7 +354,7 @@ func TestDmTaskQueue_TimestampStatistics2(t *testing.T) {
return
case <-ticker.C:
stats, err := queue.getPChanStatsInfo()
assert.Nil(t, err)
assert.NoError(t, err)
if currPChanStats == nil {
currPChanStats = stats
} else {
@ -408,7 +408,7 @@ func TestDmTaskQueue_TimestampStatistics2(t *testing.T) {
wgSchedule.Wait()
stats, err := queue.getPChanStatsInfo()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Zero(t, len(stats))
}
@ -482,11 +482,11 @@ func TestDqTaskQueue(t *testing.T) {
queue.setMaxTaskNum(10) // not accurate, full also means utBufChan block
for i := 0; i < int(queue.getMaxTaskNum()); i++ {
err = queue.Enqueue(newDefaultMockDqlTask())
assert.Nil(t, err)
assert.NoError(t, err)
}
assert.True(t, queue.utFull())
err = queue.Enqueue(newDefaultMockDqlTask())
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestTaskScheduler(t *testing.T) {

View File

@ -334,7 +334,7 @@ func TestSearchTask_Reduce(t *testing.T) {
// dataArray = append(dataArray, data1)
// dataArray = append(dataArray, data2)
// res, err := reduceSearchResultData(dataArray, nq, topk, metricType)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.Equal(t, ids, res.Results.Ids.GetIntId().Data)
// assert.Equal(t, []float32{1.0, 2.0, 3.0, 4.0}, res.Results.Scores)
// })
@ -349,7 +349,7 @@ func TestSearchTask_Reduce(t *testing.T) {
// dataArray = append(dataArray, data1)
// dataArray = append(dataArray, data2)
// res, err := reduceSearchResultData(dataArray, nq, topk, metricType)
// assert.Nil(t, err)
// assert.NoError(t, err)
// assert.ElementsMatch(t, []int64{1, 5, 2, 3}, res.Results.Ids.GetIntId().Data)
// })
}

View File

@ -928,7 +928,7 @@ func TestHasCollectionTask(t *testing.T) {
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
// missing collectionID in globalMetaCache
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, false, task.result.Value)
// createCollection in RootCood and fill GlobalMetaCache
rc.CreateCollection(ctx, createColReq)
@ -936,20 +936,20 @@ func TestHasCollectionTask(t *testing.T) {
// success to drop collection
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, task.result.Value)
// illegal name
task.CollectionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
rc.updateState(commonpb.StateCode_Abnormal)
task.CollectionName = collectionName
err = task.PreExecute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
err = task.Execute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
}
@ -992,12 +992,12 @@ func TestDescribeCollectionTask(t *testing.T) {
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
// missing collectionID in globalMetaCache
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
// illegal name
task.CollectionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
// describe collection with id
task.CollectionID = 1
@ -1009,9 +1009,9 @@ func TestDescribeCollectionTask(t *testing.T) {
task.CollectionID = 0
task.CollectionName = collectionName
err = task.PreExecute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.Status.ErrorCode)
}
@ -1070,10 +1070,10 @@ func TestDescribeCollectionTask_ShardsNum1(t *testing.T) {
result: nil,
}
err = task.PreExecute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, shardsNum, task.result.ShardsNum)
assert.Equal(t, collectionName, task.result.GetCollectionName())
@ -1134,10 +1134,10 @@ func TestDescribeCollectionTask_EnableDynamicSchema(t *testing.T) {
result: nil,
}
err = task.PreExecute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, shardsNum, task.result.ShardsNum)
assert.Equal(t, collectionName, task.result.GetCollectionName())
@ -1200,10 +1200,10 @@ func TestDescribeCollectionTask_ShardsNum2(t *testing.T) {
// missing collectionID in globalMetaCache
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
err = task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, common.DefaultShardsNum, task.result.ShardsNum)
assert.Equal(t, collectionName, task.result.GetCollectionName())
@ -1244,16 +1244,16 @@ func TestCreatePartitionTask(t *testing.T) {
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = collectionName
task.PartitionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestDropPartitionTask(t *testing.T) {
@ -1307,16 +1307,16 @@ func TestDropPartitionTask(t *testing.T) {
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = collectionName
task.PartitionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
t.Run("get collectionID error", func(t *testing.T) {
mockCache := newMockCache()
@ -1397,16 +1397,16 @@ func TestHasPartitionTask(t *testing.T) {
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = collectionName
task.PartitionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestShowPartitionsTask(t *testing.T) {
@ -1444,23 +1444,23 @@ func TestShowPartitionsTask(t *testing.T) {
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = "#0xc0de"
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = collectionName
task.ShowPartitionsRequest.Type = milvuspb.ShowType_InMemory
task.PartitionNames = []string{"#0xc0de"}
err = task.PreExecute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
task.CollectionName = collectionName
task.PartitionNames = []string{partitionName}
task.ShowPartitionsRequest.Type = milvuspb.ShowType_InMemory
err = task.Execute(ctx)
assert.NotNil(t, err)
assert.Error(t, err)
}
@ -2775,7 +2775,7 @@ func TestCreateResourceGroupTask(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode)
}
@ -2815,7 +2815,7 @@ func TestDropResourceGroupTask(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode)
}
@ -2857,7 +2857,7 @@ func TestTransferNodeTask(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode)
}
@ -2900,7 +2900,7 @@ func TestTransferReplicaTask(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode)
}
@ -2940,7 +2940,7 @@ func TestListResourceGroupsTask(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
groups := task.result.GetResourceGroups()
assert.Contains(t, groups, meta.DefaultResourceGroupName)
@ -2993,7 +2993,7 @@ func TestDescribeResourceGroupTask(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
groupInfo := task.result.GetResourceGroup()
outgoingNodeNum := groupInfo.GetNumOutgoingNode()
@ -3041,7 +3041,7 @@ func TestDescribeResourceGroupTaskFailed(t *testing.T) {
assert.Equal(t, UniqueID(3), task.Base.GetTargetID())
err := task.Execute(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.Status.ErrorCode)
}

View File

@ -31,7 +31,7 @@ func TestNewTimestampAllocator(t *testing.T) {
peerID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
tsAllocator, err := newTimestampAllocator(tso, peerID)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, tsAllocator)
}
@ -41,12 +41,12 @@ func TestTimestampAllocator_alloc(t *testing.T) {
peerID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
tsAllocator, err := newTimestampAllocator(tso, peerID)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, tsAllocator)
count := rand.Uint32()%100 + 1
ret, err := tsAllocator.alloc(ctx, count)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, int(count), len(ret))
}
@ -56,9 +56,9 @@ func TestTimestampAllocator_AllocOne(t *testing.T) {
peerID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
tsAllocator, err := newTimestampAllocator(tso, peerID)
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, tsAllocator)
_, err = tsAllocator.AllocOne(ctx)
assert.Nil(t, err)
assert.NoError(t, err)
}

View File

@ -727,25 +727,25 @@ func GetContext(ctx context.Context, originValue string) context.Context {
func TestGetCurUserFromContext(t *testing.T) {
_, err := GetCurUserFromContext(context.Background())
assert.NotNil(t, err)
assert.Error(t, err)
_, err = GetCurUserFromContext(metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{})))
assert.NotNil(t, err)
assert.Error(t, err)
_, err = GetCurUserFromContext(GetContext(context.Background(), "123456"))
assert.NotNil(t, err)
assert.Error(t, err)
root := "root"
password := "123456"
username, err := GetCurUserFromContext(GetContext(context.Background(), fmt.Sprintf("%s%s%s", root, util.CredentialSeperator, password)))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, "root", username)
}
func TestGetRole(t *testing.T) {
globalMetaCache = nil
_, err := GetRole("foo")
assert.NotNil(t, err)
assert.Error(t, err)
globalMetaCache = &mockCache{
getUserRoleFunc: func(username string) []string {
if username == "root" {
@ -755,11 +755,11 @@ func TestGetRole(t *testing.T) {
},
}
roles, err := GetRole("root")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 3, len(roles))
roles, err = GetRole("foo")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, len(roles))
}
@ -789,7 +789,7 @@ func TestPasswordVerify(t *testing.T) {
// Sha256Password has not been filled into cache during establish connection firstly
encryptedPwd, err := crypto.PasswordEncrypt(password)
assert.Nil(t, err)
assert.NoError(t, err)
credCache[username] = &internalpb.CredentialInfo{
Username: username,
EncryptedPassword: encryptedPwd,

View File

@ -1089,7 +1089,7 @@ func TestImportManager_setCollectionPartitionName(t *testing.T) {
CreateTs: time.Now().Unix() - 100,
}
err := mgr.setCollectionPartitionName(1, 2, info)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, "c1", info.GetCollectionName())
assert.Equal(t, "p1", info.GetPartitionName())
@ -1120,21 +1120,21 @@ func TestImportManager_isRowbased(t *testing.T) {
files := []string{"1.json"}
rb, err := mgr.isRowbased(files)
assert.Nil(t, err)
assert.NoError(t, err)
assert.True(t, rb)
files = []string{"1.json", "2.json"}
rb, err = mgr.isRowbased(files)
assert.NotNil(t, err)
assert.Error(t, err)
assert.True(t, rb)
files = []string{"1.json", "2.npy"}
rb, err = mgr.isRowbased(files)
assert.NotNil(t, err)
assert.Error(t, err)
assert.True(t, rb)
files = []string{"1.npy", "2.npy"}
rb, err = mgr.isRowbased(files)
assert.Nil(t, err)
assert.NoError(t, err)
assert.False(t, rb)
}

View File

@ -104,7 +104,7 @@ func TestProxyClientManager_GetProxyClients(t *testing.T) {
Params.Init()
core, err := NewCore(context.Background(), nil)
assert.Nil(t, err)
assert.NoError(t, err)
cli, err := etcd.GetEtcdClient(
Params.EtcdCfg.UseEmbedEtcd.GetAsBool(),
Params.EtcdCfg.EtcdUseSSL.GetAsBool(),
@ -114,7 +114,7 @@ func TestProxyClientManager_GetProxyClients(t *testing.T) {
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
defer cli.Close()
assert.Nil(t, err)
assert.NoError(t, err)
core.etcdCli = cli
core.proxyCreator = func(ctx context.Context, addr string) (types.Proxy, error) {
return nil, errors.New("failed")
@ -135,7 +135,7 @@ func TestProxyClientManager_AddProxyClient(t *testing.T) {
Params.Init()
core, err := NewCore(context.Background(), nil)
assert.Nil(t, err)
assert.NoError(t, err)
cli, err := etcd.GetEtcdClient(
Params.EtcdCfg.UseEmbedEtcd.GetAsBool(),
Params.EtcdCfg.EtcdUseSSL.GetAsBool(),
@ -144,7 +144,7 @@ func TestProxyClientManager_AddProxyClient(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
defer cli.Close()
core.etcdCli = cli

View File

@ -43,7 +43,7 @@ func TestProxyManager(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -55,19 +55,19 @@ func TestProxyManager(t *testing.T) {
ServerID: 100,
}
b1, err := json.Marshal(&s1)
assert.Nil(t, err)
assert.NoError(t, err)
k1 := path.Join(sessKey, typeutil.ProxyRole+"-100")
_, err = etcdCli.Put(ctx, k1, string(b1))
assert.Nil(t, err)
assert.NoError(t, err)
s0 := sessionutil.Session{
ServerID: 99,
}
b0, err := json.Marshal(&s0)
assert.Nil(t, err)
assert.NoError(t, err)
k0 := path.Join(sessKey, typeutil.ProxyRole+"-99")
_, err = etcdCli.Put(ctx, k0, string(b0))
assert.Nil(t, err)
assert.NoError(t, err)
f1 := func(sess []*sessionutil.Session) {
assert.Equal(t, len(sess), 2)
@ -76,7 +76,7 @@ func TestProxyManager(t *testing.T) {
t.Log("get sessions", sess[0], sess[1])
}
pm := newProxyManager(ctx, etcdCli, f1)
assert.Nil(t, err)
assert.NoError(t, err)
fa := func(sess *sessionutil.Session) {
assert.Equal(t, int64(101), sess.ServerID)
t.Log("add session", sess)
@ -89,20 +89,20 @@ func TestProxyManager(t *testing.T) {
pm.DelSessionFunc(fd)
err = pm.WatchProxy()
assert.Nil(t, err)
assert.NoError(t, err)
t.Log("======== start watch proxy ==========")
s2 := sessionutil.Session{
ServerID: 101,
}
b2, err := json.Marshal(&s2)
assert.Nil(t, err)
assert.NoError(t, err)
k2 := path.Join(sessKey, typeutil.ProxyRole+"-101")
_, err = etcdCli.Put(ctx, k2, string(b2))
assert.Nil(t, err)
assert.NoError(t, err)
_, err = etcdCli.Delete(ctx, k1)
assert.Nil(t, err)
assert.NoError(t, err)
time.Sleep(100 * time.Millisecond)
pm.Stop()
time.Sleep(100 * time.Millisecond)
@ -119,7 +119,7 @@ func TestProxyManager_ErrCompacted(t *testing.T) {
Params.EtcdCfg.EtcdTLSKey.GetValue(),
Params.EtcdCfg.EtcdTLSCACert.GetValue(),
Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
@ -143,7 +143,7 @@ func TestProxyManager_ErrCompacted(t *testing.T) {
k := path.Join(sessKey, typeutil.ProxyRole+strconv.FormatInt(int64(i), 10))
v := "invalid session: " + strconv.FormatInt(int64(i), 10)
_, err = etcdCli.Put(ctx, k, v)
assert.Nil(t, err)
assert.NoError(t, err)
}
// The reason there the error is no handle is that if you run compact twice, an error will be reported;
@ -157,6 +157,6 @@ func TestProxyManager_ErrCompacted(t *testing.T) {
for i := 1; i < 10; i++ {
k := path.Join(sessKey, typeutil.ProxyRole+strconv.FormatInt(int64(i), 10))
_, err = etcdCli.Delete(ctx, k)
assert.Nil(t, err)
assert.NoError(t, err)
}
}

View File

@ -62,7 +62,7 @@ func TestQuotaCenter(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
core, err := NewCore(ctx, nil)
assert.Nil(t, err)
assert.NoError(t, err)
core.tsoAllocator = newMockTsoAllocator()
pcm := newProxyClientManager(core.proxyCreator)

View File

@ -1337,49 +1337,49 @@ func TestCore_Rbac(t *testing.T) {
{
resp, err := c.CreateRole(ctx, &milvuspb.CreateRoleRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
}
{
resp, err := c.DropRole(ctx, &milvuspb.DropRoleRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
}
{
resp, err := c.OperateUserRole(ctx, &milvuspb.OperateUserRoleRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
}
{
resp, err := c.SelectRole(ctx, &milvuspb.SelectRoleRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
}
{
resp, err := c.SelectUser(ctx, &milvuspb.SelectUserRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
}
{
resp, err := c.OperatePrivilege(ctx, &milvuspb.OperatePrivilegeRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
}
{
resp, err := c.SelectGrant(ctx, &milvuspb.SelectGrantRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
}
{
resp, err := c.ListPolicy(ctx, &internalpb.ListPolicyRequest{})
assert.NotNil(t, err)
assert.Error(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
}
}

View File

@ -78,7 +78,7 @@ func TestTimetickSync(t *testing.T) {
}
err := ttSync.updateTimeTick(msg, "1")
assert.Nil(t, err)
assert.NoError(t, err)
msg.ChannelNames = append(msg.ChannelNames, "a")
err = ttSync.updateTimeTick(msg, "1")
@ -90,11 +90,11 @@ func TestTimetickSync(t *testing.T) {
ttSync.sess2ChanTsMap[msg.Base.SourceID] = cttMsg
err = ttSync.updateTimeTick(msg, "1")
assert.Nil(t, err)
assert.NoError(t, err)
ttSync.sourceID = int64(1)
err = ttSync.updateTimeTick(msg, "1")
assert.Nil(t, err)
assert.NoError(t, err)
})
wg.Add(1)
@ -145,7 +145,7 @@ func TestMultiTimetickSync(t *testing.T) {
}
err := ttSync.updateTimeTick(msg, "1")
assert.Nil(t, err)
assert.NoError(t, err)
msg2 := &internalpb.ChannelTimeTickMsg{
Base: &commonpb.MsgBase{
@ -155,7 +155,7 @@ func TestMultiTimetickSync(t *testing.T) {
DefaultTimestamp: 102,
}
err = ttSync.updateTimeTick(msg2, "2")
assert.Nil(t, err)
assert.NoError(t, err)
// make sure result works
result := <-ttSync.sendChan

View File

@ -70,9 +70,9 @@ func Test_GetFieldSchemaByID(t *testing.T) {
},
}
_, err := GetFieldSchemaByID(coll, 1)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = GetFieldSchemaByID(coll, 2)
assert.NotNil(t, err)
assert.Error(t, err)
}
func Test_EncodeMsgPositions(t *testing.T) {
@ -83,12 +83,12 @@ func Test_EncodeMsgPositions(t *testing.T) {
str, err := EncodeMsgPositions([]*msgstream.MsgPosition{})
assert.Empty(t, str)
assert.Nil(t, err)
assert.NoError(t, err)
mps := []*msgstream.MsgPosition{mp}
str, err = EncodeMsgPositions(mps)
assert.NotEmpty(t, str)
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_DecodeMsgPositions(t *testing.T) {
@ -98,17 +98,17 @@ func Test_DecodeMsgPositions(t *testing.T) {
}
str, err := EncodeMsgPositions([]*msgstream.MsgPosition{mp})
assert.Nil(t, err)
assert.NoError(t, err)
mpOut := make([]*msgstream.MsgPosition, 1)
err = DecodeMsgPositions(str, &mpOut)
assert.Nil(t, err)
assert.NoError(t, err)
err = DecodeMsgPositions("", &mpOut)
assert.Nil(t, err)
assert.NoError(t, err)
err = DecodeMsgPositions("null", &mpOut)
assert.Nil(t, err)
assert.NoError(t, err)
}
func Test_getTravelTs(t *testing.T) {

View File

@ -73,7 +73,7 @@ func generateTestData(t *testing.T, num int) []*Blob {
}}
blobs, err := insertCodec.Serialize(1, 1, data)
assert.Nil(t, err)
assert.NoError(t, err)
return blobs
}
@ -90,7 +90,7 @@ func TestInsertlogIterator(t *testing.T) {
t.Run("test dispose", func(t *testing.T) {
blobs := generateTestData(t, 1)
itr, err := NewInsertBinlogIterator(blobs, common.RowIDField, schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
itr.Dispose()
assert.False(t, itr.HasNext())
@ -101,12 +101,12 @@ func TestInsertlogIterator(t *testing.T) {
t.Run("not empty iterator", func(t *testing.T) {
blobs := generateTestData(t, 3)
itr, err := NewInsertBinlogIterator(blobs, common.RowIDField, schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
for i := 1; i <= 3; i++ {
assert.True(t, itr.HasNext())
v, err := itr.Next()
assert.Nil(t, err)
assert.NoError(t, err)
value := v.(*Value)
f102 := make([]float32, 8)
@ -156,7 +156,7 @@ func TestMergeIterator(t *testing.T) {
t.Run("empty and non-empty iterators", func(t *testing.T) {
blobs := generateTestData(t, 3)
insertItr, err := NewInsertBinlogIterator(blobs, common.RowIDField, schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
iterators := []Iterator{
&InsertBinlogIterator{data: &InsertData{}},
insertItr,
@ -167,7 +167,7 @@ func TestMergeIterator(t *testing.T) {
for i := 1; i <= 3; i++ {
assert.True(t, itr.HasNext())
v, err := itr.Next()
assert.Nil(t, err)
assert.NoError(t, err)
value := v.(*Value)
f102 := make([]float32, 8)
for j := range f102 {
@ -200,9 +200,9 @@ func TestMergeIterator(t *testing.T) {
t.Run("non-empty iterators", func(t *testing.T) {
blobs := generateTestData(t, 3)
itr1, err := NewInsertBinlogIterator(blobs, common.RowIDField, schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
itr2, err := NewInsertBinlogIterator(blobs, common.RowIDField, schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
iterators := []Iterator{itr1, itr2}
itr := NewMergeIterator(iterators)
@ -231,7 +231,7 @@ func TestMergeIterator(t *testing.T) {
for j := 0; j < 2; j++ {
assert.True(t, itr.HasNext())
v, err := itr.Next()
assert.Nil(t, err)
assert.NoError(t, err)
value := v.(*Value)
assert.EqualValues(t, expected, value)
}
@ -245,7 +245,7 @@ func TestMergeIterator(t *testing.T) {
t.Run("test dispose", func(t *testing.T) {
blobs := generateTestData(t, 3)
itr1, err := NewInsertBinlogIterator(blobs, common.RowIDField, schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
itr := NewMergeIterator([]Iterator{itr1})
itr.Dispose()

View File

@ -40,23 +40,23 @@ func TestInsertBinlog(t *testing.T) {
w := NewInsertBinlogWriter(schemapb.DataType_Int64, 10, 20, 30, 40)
e1, err := w.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int32{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = e1.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
e2, err := w.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]int64{7, 8, 9})
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]bool{true, false, true})
assert.NotNil(t, err)
assert.Error(t, err)
err = e2.AddDataToPayload([]int64{10, 11, 12})
assert.Nil(t, err)
assert.NoError(t, err)
e2.SetEventTimestamp(300, 400)
w.SetEventTimeStamp(1000, 2000)
@ -66,11 +66,11 @@ func TestInsertBinlog(t *testing.T) {
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
@ -202,9 +202,9 @@ func TestInsertBinlog(t *testing.T) {
//insert e1, payload
e1Payload := buf[pos:e1NxtPos]
e1r, err := NewPayloadReader(schemapb.DataType_Int64, e1Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e1a, err := e1r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
e1r.Close()
@ -244,9 +244,9 @@ func TestInsertBinlog(t *testing.T) {
//insert e2, payload
e2Payload := buf[pos:]
e2r, err := NewPayloadReader(schemapb.DataType_Int64, e2Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e2a, err := e2r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
e2r.Close()
@ -254,13 +254,13 @@ func TestInsertBinlog(t *testing.T) {
//read binlog
r, err := NewBinlogReader(buf)
assert.Nil(t, err)
assert.NoError(t, err)
event1, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event1)
p1, err := event1.GetInt64FromPayload()
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, event1.TypeCode, InsertEventType)
ed1, ok := (event1.eventData).(*insertEventData)
assert.True(t, ok)
@ -268,10 +268,10 @@ func TestInsertBinlog(t *testing.T) {
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
event2, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event2)
p2, err := event2.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
assert.Equal(t, event2.TypeCode, InsertEventType)
ed2, ok := (event2.eventData).(*insertEventData)
@ -287,23 +287,23 @@ func TestDeleteBinlog(t *testing.T) {
w := NewDeleteBinlogWriter(schemapb.DataType_Int64, 50, 1, 1)
e1, err := w.NextDeleteEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int32{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = e1.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
e2, err := w.NextDeleteEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]int64{7, 8, 9})
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]bool{true, false, true})
assert.NotNil(t, err)
assert.Error(t, err)
err = e2.AddDataToPayload([]int64{10, 11, 12})
assert.Nil(t, err)
assert.NoError(t, err)
e2.SetEventTimestamp(300, 400)
w.SetEventTimeStamp(1000, 2000)
@ -313,11 +313,11 @@ func TestDeleteBinlog(t *testing.T) {
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
@ -449,9 +449,9 @@ func TestDeleteBinlog(t *testing.T) {
//insert e1, payload
e1Payload := buf[pos:e1NxtPos]
e1r, err := NewPayloadReader(schemapb.DataType_Int64, e1Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e1a, err := e1r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
e1r.Close()
@ -491,9 +491,9 @@ func TestDeleteBinlog(t *testing.T) {
//insert e2, payload
e2Payload := buf[pos:]
e2r, err := NewPayloadReader(schemapb.DataType_Int64, e2Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e2a, err := e2r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
e2r.Close()
@ -501,13 +501,13 @@ func TestDeleteBinlog(t *testing.T) {
//read binlog
r, err := NewBinlogReader(buf)
assert.Nil(t, err)
assert.NoError(t, err)
event1, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event1)
p1, err := event1.GetInt64FromPayload()
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, event1.TypeCode, DeleteEventType)
ed1, ok := (event1.eventData).(*deleteEventData)
assert.True(t, ok)
@ -515,10 +515,10 @@ func TestDeleteBinlog(t *testing.T) {
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
event2, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event2)
p2, err := event2.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
assert.Equal(t, event2.TypeCode, DeleteEventType)
ed2, ok := (event2.eventData).(*deleteEventData)
@ -534,23 +534,23 @@ func TestDDLBinlog1(t *testing.T) {
w := NewDDLBinlogWriter(schemapb.DataType_Int64, 50)
e1, err := w.NextCreateCollectionEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int32{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = e1.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
e2, err := w.NextDropCollectionEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]int64{7, 8, 9})
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]bool{true, false, true})
assert.NotNil(t, err)
assert.Error(t, err)
err = e2.AddDataToPayload([]int64{10, 11, 12})
assert.Nil(t, err)
assert.NoError(t, err)
e2.SetEventTimestamp(300, 400)
w.SetEventTimeStamp(1000, 2000)
@ -560,11 +560,11 @@ func TestDDLBinlog1(t *testing.T) {
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
@ -696,9 +696,9 @@ func TestDDLBinlog1(t *testing.T) {
//insert e1, payload
e1Payload := buf[pos:e1NxtPos]
e1r, err := NewPayloadReader(schemapb.DataType_Int64, e1Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e1a, err := e1r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
e1r.Close()
@ -738,9 +738,9 @@ func TestDDLBinlog1(t *testing.T) {
//insert e2, payload
e2Payload := buf[pos:]
e2r, err := NewPayloadReader(schemapb.DataType_Int64, e2Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e2a, err := e2r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
e2r.Close()
@ -748,13 +748,13 @@ func TestDDLBinlog1(t *testing.T) {
//read binlog
r, err := NewBinlogReader(buf)
assert.Nil(t, err)
assert.NoError(t, err)
event1, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event1)
p1, err := event1.GetInt64FromPayload()
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, event1.TypeCode, CreateCollectionEventType)
ed1, ok := (event1.eventData).(*createCollectionEventData)
assert.True(t, ok)
@ -762,10 +762,10 @@ func TestDDLBinlog1(t *testing.T) {
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
event2, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event2)
p2, err := event2.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
assert.Equal(t, event2.TypeCode, DropCollectionEventType)
ed2, ok := (event2.eventData).(*dropCollectionEventData)
@ -781,23 +781,23 @@ func TestDDLBinlog2(t *testing.T) {
w := NewDDLBinlogWriter(schemapb.DataType_Int64, 50)
e1, err := w.NextCreatePartitionEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int32{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = e1.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
e2, err := w.NextDropPartitionEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]int64{7, 8, 9})
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]bool{true, false, true})
assert.NotNil(t, err)
assert.Error(t, err)
err = e2.AddDataToPayload([]int64{10, 11, 12})
assert.Nil(t, err)
assert.NoError(t, err)
e2.SetEventTimestamp(300, 400)
w.SetEventTimeStamp(1000, 2000)
@ -807,11 +807,11 @@ func TestDDLBinlog2(t *testing.T) {
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
//magic number
@ -942,9 +942,9 @@ func TestDDLBinlog2(t *testing.T) {
//insert e1, payload
e1Payload := buf[pos:e1NxtPos]
e1r, err := NewPayloadReader(schemapb.DataType_Int64, e1Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e1a, err := e1r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
e1r.Close()
@ -984,9 +984,9 @@ func TestDDLBinlog2(t *testing.T) {
//insert e2, payload
e2Payload := buf[pos:]
e2r, err := NewPayloadReader(schemapb.DataType_Int64, e2Payload)
assert.Nil(t, err)
assert.NoError(t, err)
e2a, err := e2r.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
e2r.Close()
@ -994,13 +994,13 @@ func TestDDLBinlog2(t *testing.T) {
//read binlog
r, err := NewBinlogReader(buf)
assert.Nil(t, err)
assert.NoError(t, err)
event1, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event1)
p1, err := event1.GetInt64FromPayload()
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, event1.TypeCode, CreatePartitionEventType)
ed1, ok := (event1.eventData).(*createPartitionEventData)
assert.True(t, ok)
@ -1008,10 +1008,10 @@ func TestDDLBinlog2(t *testing.T) {
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
event2, err := r.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, event2)
p2, err := event2.GetInt64FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
assert.Equal(t, event2.TypeCode, DropPartitionEventType)
ed2, ok := (event2.eventData).(*dropPartitionEventData)
@ -1041,9 +1041,9 @@ func TestIndexFileBinlog(t *testing.T) {
w.PayloadDataType = schemapb.DataType_Int8
e, err := w.NextIndexFileEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e.AddByteToPayload(payload)
assert.Nil(t, err)
assert.NoError(t, err)
e.SetEventTimestamp(timestamp, timestamp)
w.SetEventTimeStamp(timestamp, timestamp)
@ -1052,11 +1052,11 @@ func TestIndexFileBinlog(t *testing.T) {
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
@ -1139,7 +1139,7 @@ func TestIndexFileBinlog(t *testing.T) {
}
j := make(map[string]interface{})
err = json.Unmarshal(multiBytes, &j)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%v", indexBuildID), fmt.Sprintf("%v", j["indexBuildID"]))
assert.Equal(t, fmt.Sprintf("%v", version), fmt.Sprintf("%v", j["version"]))
assert.Equal(t, fmt.Sprintf("%v", indexName), fmt.Sprintf("%v", j["indexName"]))
@ -1149,7 +1149,7 @@ func TestIndexFileBinlog(t *testing.T) {
// NextIndexFileBinlogWriter after close
_, err = w.NextIndexFileEventWriter()
assert.NotNil(t, err)
assert.Error(t, err)
}
/* #nosec G103 */
@ -1170,9 +1170,9 @@ func TestIndexFileBinlogV2(t *testing.T) {
w := NewIndexFileBinlogWriter(indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexName, indexID, key)
e, err := w.NextIndexFileEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e.AddOneStringToPayload(typeutil.UnsafeBytes2str(payload))
assert.Nil(t, err)
assert.NoError(t, err)
e.SetEventTimestamp(timestamp, timestamp)
w.SetEventTimeStamp(timestamp, timestamp)
@ -1181,11 +1181,11 @@ func TestIndexFileBinlogV2(t *testing.T) {
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
@ -1268,7 +1268,7 @@ func TestIndexFileBinlogV2(t *testing.T) {
}
j := make(map[string]interface{})
err = json.Unmarshal(multiBytes, &j)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%v", indexBuildID), fmt.Sprintf("%v", j["indexBuildID"]))
assert.Equal(t, fmt.Sprintf("%v", version), fmt.Sprintf("%v", j["version"]))
assert.Equal(t, fmt.Sprintf("%v", indexName), fmt.Sprintf("%v", j["indexName"]))
@ -1278,71 +1278,71 @@ func TestIndexFileBinlogV2(t *testing.T) {
// NextIndexFileBinlogWriter after close
_, err = w.NextIndexFileEventWriter()
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestNewBinlogReaderError(t *testing.T) {
data := []byte{}
reader, err := NewBinlogReader(data)
assert.Nil(t, reader)
assert.NotNil(t, err)
assert.Error(t, err)
data = []byte{0, 0, 0, 0}
reader, err = NewBinlogReader(data)
assert.Nil(t, reader)
assert.NotNil(t, err)
assert.Error(t, err)
buffer := new(bytes.Buffer)
err = binary.Write(buffer, common.Endian, MagicNumber)
assert.Nil(t, err)
assert.NoError(t, err)
data = buffer.Bytes()
reader, err = NewBinlogReader(data)
assert.Nil(t, reader)
assert.NotNil(t, err)
assert.Error(t, err)
err = binary.Write(buffer, common.Endian, int32(555))
assert.Nil(t, err)
assert.NoError(t, err)
data = buffer.Bytes()
reader, err = NewBinlogReader(data)
assert.Nil(t, reader)
assert.NotNil(t, err)
assert.Error(t, err)
w := NewInsertBinlogWriter(schemapb.DataType_Int64, 10, 20, 30, 40)
w.SetEventTimeStamp(1000, 2000)
e1, err := w.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int32{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = e1.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
sizeTotal := 2000000
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
reader, err = NewBinlogReader(buf)
assert.Nil(t, err)
assert.NoError(t, err)
reader.Close()
event1, err := reader.NextEventReader()
assert.Nil(t, event1)
assert.NotNil(t, err)
assert.Error(t, err)
reader.Close()
}
@ -1351,102 +1351,102 @@ func TestNewBinlogWriterTsError(t *testing.T) {
w := NewInsertBinlogWriter(schemapb.DataType_Int64, 10, 20, 30, 40)
_, err := w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.NotNil(t, err)
assert.Error(t, err)
sizeTotal := 2000000
w.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
w.SetEventTimeStamp(1000, 0)
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.NotNil(t, err)
assert.Error(t, err)
w.SetEventTimeStamp(1000, 2000)
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
_, err = w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
}
func TestInsertBinlogWriterCloseError(t *testing.T) {
insertWriter := NewInsertBinlogWriter(schemapb.DataType_Int64, 10, 20, 30, 40)
e1, err := insertWriter.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
sizeTotal := 2000000
insertWriter.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
insertWriter.SetEventTimeStamp(1000, 2000)
err = insertWriter.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, insertWriter.buffer)
insertEventWriter, err := insertWriter.NextInsertEventWriter()
assert.Nil(t, insertEventWriter)
assert.NotNil(t, err)
assert.Error(t, err)
insertWriter.Close()
}
func TestDeleteBinlogWriteCloseError(t *testing.T) {
deleteWriter := NewDeleteBinlogWriter(schemapb.DataType_Int64, 10, 1, 1)
e1, err := deleteWriter.NextDeleteEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
sizeTotal := 2000000
deleteWriter.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
deleteWriter.SetEventTimeStamp(1000, 2000)
err = deleteWriter.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, deleteWriter.buffer)
deleteEventWriter, err := deleteWriter.NextDeleteEventWriter()
assert.Nil(t, deleteEventWriter)
assert.NotNil(t, err)
assert.Error(t, err)
deleteWriter.Close()
}
func TestDDBinlogWriteCloseError(t *testing.T) {
ddBinlogWriter := NewDDLBinlogWriter(schemapb.DataType_Int64, 10)
e1, err := ddBinlogWriter.NextCreateCollectionEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
sizeTotal := 2000000
ddBinlogWriter.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(100, 200)
ddBinlogWriter.SetEventTimeStamp(1000, 2000)
err = ddBinlogWriter.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, ddBinlogWriter.buffer)
createCollectionEventWriter, err := ddBinlogWriter.NextCreateCollectionEventWriter()
assert.Nil(t, createCollectionEventWriter)
assert.NotNil(t, err)
assert.Error(t, err)
dropCollectionEventWriter, err := ddBinlogWriter.NextDropCollectionEventWriter()
assert.Nil(t, dropCollectionEventWriter)
assert.NotNil(t, err)
assert.Error(t, err)
createPartitionEventWriter, err := ddBinlogWriter.NextCreatePartitionEventWriter()
assert.Nil(t, createPartitionEventWriter)
assert.NotNil(t, err)
assert.Error(t, err)
dropPartitionEventWriter, err := ddBinlogWriter.NextDropPartitionEventWriter()
assert.Nil(t, dropPartitionEventWriter)
assert.NotNil(t, err)
assert.Error(t, err)
ddBinlogWriter.Close()
}
@ -1508,17 +1508,17 @@ func TestWriterListError(t *testing.T) {
insertWriter.buffer = nil
errorEvent.getPayloadLengthError = true
err := insertWriter.Finish()
assert.NotNil(t, err)
assert.Error(t, err)
insertWriter.buffer = nil
errorEvent.getMemoryError = true
err = insertWriter.Finish()
assert.NotNil(t, err)
assert.Error(t, err)
insertWriter.buffer = nil
errorEvent.writeError = true
err = insertWriter.Finish()
assert.NotNil(t, err)
assert.Error(t, err)
insertWriter.buffer = nil
errorEvent.finishError = true
err = insertWriter.Finish()
assert.NotNil(t, err)
assert.Error(t, err)
}

View File

@ -33,47 +33,47 @@ func TestBinlogWriterReader(t *testing.T) {
binlogWriter.SetEventTimeStamp(1000, 2000)
defer binlogWriter.Close()
eventWriter, err := binlogWriter.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = eventWriter.AddInt32ToPayload([]int32{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
_, err = binlogWriter.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
eventWriter.SetEventTimestamp(1000, 2000)
nums, err := binlogWriter.GetRowNums()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 3, nums)
sizeTotal := 20000000
binlogWriter.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = binlogWriter.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 1, binlogWriter.GetEventNums())
nums, err = binlogWriter.GetRowNums()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 3, nums)
err = eventWriter.AddInt32ToPayload([]int32{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
nums, err = binlogWriter.GetRowNums()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 3, nums)
buffer, err := binlogWriter.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
binlogWriter.Close()
binlogReader, err := NewBinlogReader(buffer)
assert.Nil(t, err)
assert.NoError(t, err)
eventReader, err := binlogReader.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
_, err = eventReader.GetInt8FromPayload()
assert.NotNil(t, err)
assert.Error(t, err)
payload, err := eventReader.GetInt32FromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 3, len(payload))
assert.EqualValues(t, 1, payload[0])
assert.EqualValues(t, 2, payload[1])
assert.EqualValues(t, 3, payload[2])
reader, err := binlogReader.NextEventReader()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Nil(t, reader)
}

View File

@ -316,20 +316,20 @@ func TestInsertCodec(t *testing.T) {
assert.Empty(t, s)
Blobs1, err := insertCodec.Serialize(PartitionID, SegmentID, insertData1)
assert.Nil(t, err)
assert.NoError(t, err)
for _, blob := range Blobs1 {
blob.Key = fmt.Sprintf("1/insert_log/2/3/4/5/%d", 100)
assert.Equal(t, blob.GetKey(), blob.Key)
}
Blobs2, err := insertCodec.Serialize(PartitionID, SegmentID, insertData2)
assert.Nil(t, err)
assert.NoError(t, err)
for _, blob := range Blobs2 {
blob.Key = fmt.Sprintf("1/insert_log/2/3/4/5/%d", 99)
assert.Equal(t, blob.GetKey(), blob.Key)
}
resultBlobs := append(Blobs1, Blobs2...)
collID, partID, segID, resultData, err := insertCodec.DeserializeAll(resultBlobs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, UniqueID(CollectionID), collID)
assert.Equal(t, UniqueID(PartitionID), partID)
assert.Equal(t, UniqueID(SegmentID), segID)
@ -367,19 +367,19 @@ func TestInsertCodec(t *testing.T) {
blobs := []*Blob{}
_, _, _, err = insertCodec.Deserialize(blobs)
assert.NotNil(t, err)
assert.Error(t, err)
_, _, _, _, err = insertCodec.DeserializeAll(blobs)
assert.NotNil(t, err)
assert.Error(t, err)
statsBlob1, err := insertCodec.SerializePkStatsByData(insertData1)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = DeserializeStats([]*Blob{statsBlob1})
assert.Nil(t, err)
assert.NoError(t, err)
statsBlob2, err := insertCodec.SerializePkStatsByData(insertData2)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = DeserializeStats([]*Blob{statsBlob2})
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestDeleteCodec(t *testing.T) {
@ -399,10 +399,10 @@ func TestDeleteCodec(t *testing.T) {
}
deleteData.Append(pk2, 23578294723)
blob, err := deleteCodec.Serialize(CollectionID, 1, 1, deleteData)
assert.Nil(t, err)
assert.NoError(t, err)
pid, sid, data, err := deleteCodec.Deserialize([]*Blob{blob})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pid, int64(1))
assert.Equal(t, sid, int64(1))
assert.Equal(t, data, deleteData)
@ -420,10 +420,10 @@ func TestDeleteCodec(t *testing.T) {
pk2 := NewVarCharPrimaryKey("test2")
deleteData.Append(pk2, 23578294723)
blob, err := deleteCodec.Serialize(CollectionID, 1, 1, deleteData)
assert.Nil(t, err)
assert.NoError(t, err)
pid, sid, data, err := deleteCodec.Deserialize([]*Blob{blob})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pid, int64(1))
assert.Equal(t, sid, int64(1))
assert.Equal(t, data, deleteData)
@ -433,7 +433,7 @@ func TestDeleteCodec(t *testing.T) {
func TestUpgradeDeleteLog(t *testing.T) {
binlogWriter := NewDeleteBinlogWriter(schemapb.DataType_String, CollectionID, 1, 1)
eventWriter, err := binlogWriter.NextDeleteEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
dData := &DeleteData{
Pks: []PrimaryKey{&Int64PrimaryKey{Value: 1}, &Int64PrimaryKey{Value: 2}},
@ -446,7 +446,7 @@ func TestUpgradeDeleteLog(t *testing.T) {
int64PkValue := dData.Pks[i].(*Int64PrimaryKey).Value
ts := dData.Tss[i]
err = eventWriter.AddOneStringToPayload(fmt.Sprintf("%d,%d", int64PkValue, ts))
assert.Nil(t, err)
assert.NoError(t, err)
sizeTotal += binary.Size(int64PkValue)
sizeTotal += binary.Size(ts)
}
@ -455,14 +455,14 @@ func TestUpgradeDeleteLog(t *testing.T) {
binlogWriter.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = binlogWriter.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buffer, err := binlogWriter.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
blob := &Blob{Value: buffer}
dCodec := NewDeleteCodec()
parID, segID, deleteData, err := dCodec.Deserialize([]*Blob{blob})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, int64(1), parID)
assert.Equal(t, int64(1), segID)
assert.ElementsMatch(t, dData.Pks, deleteData.Pks)
@ -485,18 +485,18 @@ func TestDDCodec(t *testing.T) {
DropPartitionEventType,
}
blobs, err := dataDefinitionCodec.Serialize(ts, ddRequests, eventTypeCodes)
assert.Nil(t, err)
assert.NoError(t, err)
for _, blob := range blobs {
blob.Key = fmt.Sprintf("1/data_definition/3/4/5/%d", 99)
}
resultTs, resultRequests, err := dataDefinitionCodec.Deserialize(blobs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, resultTs, ts)
assert.Equal(t, resultRequests, ddRequests)
blobs = []*Blob{}
_, _, err = dataDefinitionCodec.Deserialize(blobs)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestTsError(t *testing.T) {
@ -504,7 +504,7 @@ func TestTsError(t *testing.T) {
insertCodec := NewInsertCodecWithSchema(nil)
blobs, err := insertCodec.Serialize(1, 1, insertData)
assert.Nil(t, blobs)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestMemorySize(t *testing.T) {

View File

@ -39,24 +39,24 @@ func TestDescriptorEvent(t *testing.T) {
var buf bytes.Buffer
err := desc.Write(&buf)
assert.NotNil(t, err)
assert.Error(t, err)
sizeTotal := 20 // not important
desc.AddExtra(originalSizeKey, sizeTotal)
// original size not in string format
err = desc.Write(&buf)
assert.NotNil(t, err)
assert.Error(t, err)
desc.AddExtra(originalSizeKey, "not in int format")
err = desc.Write(&buf)
assert.NotNil(t, err)
assert.Error(t, err)
desc.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = desc.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
buffer := buf.Bytes()
@ -139,17 +139,17 @@ func TestInsertEvent(t *testing.T) {
) {
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err := ir1(w)
assert.Nil(t, err)
assert.NoError(t, err)
err = iw(w)
assert.NotNil(t, err)
assert.Error(t, err)
err = ir2(w)
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -161,16 +161,16 @@ func TestInsertEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(dt, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
values, _, err := pR.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, values, ev)
pR.Close()
r, err := newEventReader(dt, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
payload, _, err := r.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, ev)
r.Close()
@ -178,7 +178,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_bool", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Bool)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Bool, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]bool{true, false, true})
@ -194,7 +194,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_int8", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Int8)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Int8, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]int8{1, 2, 3})
@ -210,7 +210,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_int16", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Int16)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Int16, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]int16{1, 2, 3})
@ -226,7 +226,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_int32", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Int32)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Int32, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]int32{1, 2, 3})
@ -242,7 +242,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_int64", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Int64, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]int64{1, 2, 3})
@ -258,7 +258,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_float32", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Float)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Float, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]float32{1, 2, 3})
@ -274,7 +274,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_float64", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_Double)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_Double, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]float64{1, 2, 3})
@ -290,7 +290,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_binary_vector", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_BinaryVector, 16)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_BinaryVector, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]byte{1, 2, 3, 4}, 16)
@ -306,7 +306,7 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_float_vector", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_FloatVector, 2)
assert.Nil(t, err)
assert.NoError(t, err)
insertT(t, schemapb.DataType_FloatVector, w,
func(w *insertEventWriter) error {
return w.AddDataToPayload([]float32{1, 2, 3, 4}, 2)
@ -322,22 +322,22 @@ func TestInsertEvent(t *testing.T) {
t.Run("insert_string", func(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("567890")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("abcdefg")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -349,20 +349,20 @@ func TestInsertEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
s, err := pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
pR.Close()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
s, err = pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -376,22 +376,22 @@ func TestInsertEvent(t *testing.T) {
func TestDeleteEvent(t *testing.T) {
t.Run("delete_string", func(t *testing.T) {
w, err := newDeleteEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("567890")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("abcdefg")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -403,10 +403,10 @@ func TestDeleteEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
s, err := pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -414,10 +414,10 @@ func TestDeleteEvent(t *testing.T) {
pR.Close()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
s, err = pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -430,26 +430,26 @@ func TestDeleteEvent(t *testing.T) {
func TestCreateCollectionEvent(t *testing.T) {
t.Run("create_event", func(t *testing.T) {
w, err := newCreateCollectionEventWriter(schemapb.DataType_Float)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, w)
})
t.Run("create_collection_timestamp", func(t *testing.T) {
w, err := newCreateCollectionEventWriter(schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -461,16 +461,16 @@ func TestCreateCollectionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(createCollectionEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_Int64, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
values, _, err := pR.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, values, []int64{1, 2, 3, 4, 5, 6})
pR.Close()
r, err := newEventReader(schemapb.DataType_Int64, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
payload, _, err := r.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, []int64{1, 2, 3, 4, 5, 6})
r.Close()
@ -478,22 +478,22 @@ func TestCreateCollectionEvent(t *testing.T) {
t.Run("create_collection_string", func(t *testing.T) {
w, err := newCreateCollectionEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("567890")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("abcdefg")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -505,10 +505,10 @@ func TestCreateCollectionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
s, err := pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -516,10 +516,10 @@ func TestCreateCollectionEvent(t *testing.T) {
pR.Close()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
s, err = pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -532,26 +532,26 @@ func TestCreateCollectionEvent(t *testing.T) {
func TestDropCollectionEvent(t *testing.T) {
t.Run("drop_event", func(t *testing.T) {
w, err := newDropCollectionEventWriter(schemapb.DataType_Float)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, w)
})
t.Run("drop_collection_timestamp", func(t *testing.T) {
w, err := newDropCollectionEventWriter(schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -563,16 +563,16 @@ func TestDropCollectionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(createCollectionEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_Int64, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
values, _, err := pR.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, values, []int64{1, 2, 3, 4, 5, 6})
pR.Close()
r, err := newEventReader(schemapb.DataType_Int64, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
payload, _, err := r.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, []int64{1, 2, 3, 4, 5, 6})
r.Close()
@ -580,22 +580,22 @@ func TestDropCollectionEvent(t *testing.T) {
t.Run("drop_collection_string", func(t *testing.T) {
w, err := newDropCollectionEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("567890")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("abcdefg")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -607,10 +607,10 @@ func TestDropCollectionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
s, err := pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -618,10 +618,10 @@ func TestDropCollectionEvent(t *testing.T) {
pR.Close()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
s, err = r.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -634,26 +634,26 @@ func TestDropCollectionEvent(t *testing.T) {
func TestCreatePartitionEvent(t *testing.T) {
t.Run("create_event", func(t *testing.T) {
w, err := newCreatePartitionEventWriter(schemapb.DataType_Float)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, w)
})
t.Run("create_partition_timestamp", func(t *testing.T) {
w, err := newCreatePartitionEventWriter(schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -665,16 +665,16 @@ func TestCreatePartitionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(createCollectionEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_Int64, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
values, _, err := pR.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, values, []int64{1, 2, 3, 4, 5, 6})
pR.Close()
r, err := newEventReader(schemapb.DataType_Int64, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
payload, _, err := r.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, []int64{1, 2, 3, 4, 5, 6})
r.Close()
@ -682,22 +682,22 @@ func TestCreatePartitionEvent(t *testing.T) {
t.Run("create_partition_string", func(t *testing.T) {
w, err := newCreatePartitionEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("567890")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("abcdefg")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -709,10 +709,10 @@ func TestCreatePartitionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
s, err := pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -720,10 +720,10 @@ func TestCreatePartitionEvent(t *testing.T) {
pR.Close()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
s, err = pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -736,26 +736,26 @@ func TestCreatePartitionEvent(t *testing.T) {
func TestDropPartitionEvent(t *testing.T) {
t.Run("drop_event", func(t *testing.T) {
w, err := newDropPartitionEventWriter(schemapb.DataType_Float)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, w)
})
t.Run("drop_partition_timestamp", func(t *testing.T) {
w, err := newDropPartitionEventWriter(schemapb.DataType_Int64)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -767,16 +767,16 @@ func TestDropPartitionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(createCollectionEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_Int64, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
values, _, err := pR.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, values, []int64{1, 2, 3, 4, 5, 6})
pR.Close()
r, err := newEventReader(schemapb.DataType_Int64, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
payload, _, err := r.GetDataFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, []int64{1, 2, 3, 4, 5, 6})
r.Close()
@ -784,22 +784,22 @@ func TestDropPartitionEvent(t *testing.T) {
t.Run("drop_partition_string", func(t *testing.T) {
w, err := newDropPartitionEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("567890")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddOneStringToPayload("abcdefg")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.AddDataToPayload([]int{1, 2, 3})
assert.NotNil(t, err)
assert.Error(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -811,10 +811,10 @@ func TestDropPartitionEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(insertEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
s, err := pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -822,10 +822,10 @@ func TestDropPartitionEvent(t *testing.T) {
pR.Close()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
s, err = pR.GetStringFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, s[0], "1234")
assert.Equal(t, s[1], "567890")
assert.Equal(t, s[2], "abcdefg")
@ -839,19 +839,19 @@ func TestDropPartitionEvent(t *testing.T) {
func TestIndexFileEvent(t *testing.T) {
t.Run("index_file_string", func(t *testing.T) {
w, err := newIndexFileEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
payload := funcutil.GenRandomBytes()
err = w.AddOneStringToPayload(typeutil.UnsafeBytes2str(payload))
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -863,32 +863,32 @@ func TestIndexFileEvent(t *testing.T) {
payloadOffset := binary.Size(eventHeader{}) + binary.Size(indexFileEventData{})
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_String, pBuf)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pR.numRows, int64(1))
value, err := pR.GetStringFromPayload()
assert.Equal(t, len(value), 1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, typeutil.UnsafeStr2bytes(value[0]))
pR.Close()
})
t.Run("index_file_int8", func(t *testing.T) {
w, err := newIndexFileEventWriter(schemapb.DataType_Int8)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
payload := funcutil.GenRandomBytes()
err = w.AddByteToPayload(payload)
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -901,28 +901,28 @@ func TestIndexFileEvent(t *testing.T) {
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_Int8, pBuf)
assert.Equal(t, pR.numRows, int64(len(payload)))
assert.Nil(t, err)
assert.NoError(t, err)
value, err := pR.GetByteFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, value)
pR.Close()
})
t.Run("index_file_int8_large", func(t *testing.T) {
w, err := newIndexFileEventWriter(schemapb.DataType_Int8)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
payload := funcutil.GenRandomBytesWithLength(1000)
err = w.AddByteToPayload(payload)
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
@ -935,9 +935,9 @@ func TestIndexFileEvent(t *testing.T) {
pBuf := wBuf[payloadOffset:]
pR, err := NewPayloadReader(schemapb.DataType_Int8, pBuf)
assert.Equal(t, pR.numRows, int64(len(payload)))
assert.Nil(t, err)
assert.NoError(t, err)
value, err := pR.GetByteFromPayload()
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, payload, value)
pR.Close()
})
@ -950,93 +950,93 @@ func TestDescriptorEventTsError(t *testing.T) {
}
buf := new(bytes.Buffer)
err := insertData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
insertData.StartTimestamp = 1000
err = insertData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
deleteData := &deleteEventData{
StartTimestamp: 0,
EndTimestamp: 0,
}
err = deleteData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
deleteData.StartTimestamp = 1000
err = deleteData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
createCollectionData := &createCollectionEventData{
StartTimestamp: 0,
EndTimestamp: 0,
}
err = createCollectionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
createCollectionData.StartTimestamp = 1000
err = createCollectionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
dropCollectionData := &dropCollectionEventData{
StartTimestamp: 0,
EndTimestamp: 0,
}
err = dropCollectionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
dropCollectionData.StartTimestamp = 1000
err = dropCollectionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
createPartitionData := &createPartitionEventData{
StartTimestamp: 0,
EndTimestamp: 0,
}
err = createPartitionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
createPartitionData.StartTimestamp = 1000
err = createPartitionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
dropPartitionData := &dropPartitionEventData{
StartTimestamp: 0,
EndTimestamp: 0,
}
err = dropPartitionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
dropPartitionData.StartTimestamp = 1000
err = dropPartitionData.WriteEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestReadFixPartError(t *testing.T) {
buf := new(bytes.Buffer)
_, err := readEventHeader(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readInsertEventDataFixPart(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readDeleteEventDataFixPart(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readCreateCollectionEventDataFixPart(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readDropCollectionEventDataFixPart(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readCreatePartitionEventDataFixPart(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readDropPartitionEventDataFixPart(buf)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = readDescriptorEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
event := newDescriptorEventData()
err = binary.Write(buf, common.Endian, event.DescriptorEventDataFixPart)
assert.Nil(t, err)
assert.NoError(t, err)
_, err = readDescriptorEventData(buf)
assert.NotNil(t, err)
assert.Error(t, err)
size := getEventFixPartSize(EventTypeCode(10))
assert.Equal(t, size, int32(-1))
@ -1046,68 +1046,68 @@ func TestEventReaderError(t *testing.T) {
buf := new(bytes.Buffer)
r, err := newEventReader(schemapb.DataType_Int64, buf)
assert.Nil(t, r)
assert.NotNil(t, err)
assert.Error(t, err)
header := newEventHeader(DescriptorEventType)
err = header.Write(buf)
assert.Nil(t, err)
assert.NoError(t, err)
r, err = newEventReader(schemapb.DataType_Int64, buf)
assert.Nil(t, r)
assert.NotNil(t, err)
assert.Error(t, err)
buf = new(bytes.Buffer)
header = newEventHeader(InsertEventType)
err = header.Write(buf)
assert.Nil(t, err)
assert.NoError(t, err)
r, err = newEventReader(schemapb.DataType_Int64, buf)
assert.Nil(t, r)
assert.NotNil(t, err)
assert.Error(t, err)
buf = new(bytes.Buffer)
header = newEventHeader(InsertEventType)
header.EventLength = getEventFixPartSize(InsertEventType) + int32(binary.Size(header))
err = header.Write(buf)
assert.Nil(t, err)
assert.NoError(t, err)
insertData := &insertEventData{
StartTimestamp: 1000,
EndTimestamp: 2000,
}
err = binary.Write(buf, common.Endian, insertData)
assert.Nil(t, err)
assert.NoError(t, err)
r, err = newEventReader(schemapb.DataType_Int64, buf)
assert.Nil(t, r)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestEventClose(t *testing.T) {
w, err := newInsertEventWriter(schemapb.DataType_String)
assert.Nil(t, err)
assert.NoError(t, err)
w.SetEventTimestamp(tsoutil.ComposeTS(10, 0), tsoutil.ComposeTS(100, 0))
err = w.AddDataToPayload("1234")
assert.Nil(t, err)
assert.NoError(t, err)
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
var buf bytes.Buffer
err = w.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
wBuf := buf.Bytes()
r, err := newEventReader(schemapb.DataType_String, bytes.NewBuffer(wBuf))
assert.Nil(t, err)
assert.NoError(t, err)
r.Close()
err = r.readHeader()
assert.NotNil(t, err)
assert.Error(t, err)
err = r.readData()
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestIndexFileEventDataError(t *testing.T) {
@ -1119,12 +1119,12 @@ func TestIndexFileEventDataError(t *testing.T) {
event.SetEventTimestamp(0, 1)
// start timestamp not set
err = event.WriteEventData(&buffer)
assert.NotNil(t, err)
assert.Error(t, err)
event.SetEventTimestamp(1, 0)
// end timestamp not set
err = event.WriteEventData(&buffer)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestReadIndexFileEventDataFixPart(t *testing.T) {
@ -1132,5 +1132,5 @@ func TestReadIndexFileEventDataFixPart(t *testing.T) {
var buffer bytes.Buffer
// buffer is empty
_, err = readIndexFileEventDataFixPart(&buffer)
assert.NotNil(t, err)
assert.Error(t, err)
}

View File

@ -39,7 +39,7 @@ func TestEventTypeCode_String(t *testing.T) {
func TestSizeofStruct(t *testing.T) {
var buf bytes.Buffer
err := binary.Write(&buf, common.Endian, baseEventHeader{})
assert.Nil(t, err)
assert.NoError(t, err)
s1 := binary.Size(baseEventHeader{})
s2 := binary.Size(&baseEventHeader{})
assert.Equal(t, s1, s2)
@ -52,40 +52,40 @@ func TestSizeofStruct(t *testing.T) {
PostHeaderLengths: []uint8{0, 1, 2, 3},
}
err = de.Write(&buf)
assert.Nil(t, err)
assert.NoError(t, err)
s3 := binary.Size(de.DescriptorEventDataFixPart) + binary.Size(de.PostHeaderLengths) + binary.Size(de.ExtraLength) + int(de.ExtraLength)
assert.Equal(t, s3, buf.Len())
}
func TestEventWriter(t *testing.T) {
insertEvent, err := newInsertEventWriter(schemapb.DataType_Int32)
assert.Nil(t, err)
assert.NoError(t, err)
insertEvent.Close()
insertEvent, err = newInsertEventWriter(schemapb.DataType_Int32)
assert.Nil(t, err)
assert.NoError(t, err)
defer insertEvent.Close()
err = insertEvent.AddInt64ToPayload([]int64{1, 1})
assert.NotNil(t, err)
assert.Error(t, err)
err = insertEvent.AddInt32ToPayload([]int32{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
nums, err := insertEvent.GetPayloadLengthFromWriter()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 3, nums)
err = insertEvent.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
length, err := insertEvent.GetMemoryUsageInBytes()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, length, insertEvent.EventLength)
err = insertEvent.AddInt32ToPayload([]int32{1})
assert.NotNil(t, err)
assert.Error(t, err)
buffer := new(bytes.Buffer)
insertEvent.SetEventTimestamp(100, 200)
err = insertEvent.Write(buffer)
assert.Nil(t, err)
assert.NoError(t, err)
length, err = insertEvent.GetMemoryUsageInBytes()
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, length, buffer.Len())
insertEvent.Close()
}

View File

@ -55,10 +55,10 @@ func TestIndexFileBinlogCodec(t *testing.T) {
codec := NewIndexFileBinlogCodec()
serializedBlobs, err := codec.Serialize(indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexParams, indexName, indexID, datas)
assert.Nil(t, err)
assert.NoError(t, err)
idxBuildID, v, collID, parID, segID, fID, params, idxName, idxID, blobs, err := codec.DeserializeImpl(serializedBlobs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, indexBuildID, idxBuildID)
assert.Equal(t, version, v)
assert.Equal(t, collectionID, collID)
@ -74,7 +74,7 @@ func TestIndexFileBinlogCodec(t *testing.T) {
assert.ElementsMatch(t, datas, blobs)
blobs, indexParams, indexName, indexID, err = codec.Deserialize(serializedBlobs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.ElementsMatch(t, datas, blobs)
for key, value := range indexParams {
assert.Equal(t, value, params[key])
@ -84,7 +84,7 @@ func TestIndexFileBinlogCodec(t *testing.T) {
// empty
_, _, _, _, _, _, _, _, _, _, err = codec.DeserializeImpl(nil)
assert.NotNil(t, err)
assert.Error(t, err)
}
func TestIndexFileBinlogCodecError(t *testing.T) {
@ -93,7 +93,7 @@ func TestIndexFileBinlogCodecError(t *testing.T) {
// failed to read binlog
codec := NewIndexFileBinlogCodec()
_, _, _, _, err = codec.Deserialize([]*Blob{{Key: "key", Value: []byte("not in binlog format")}})
assert.NotNil(t, err)
assert.Error(t, err)
indexBuildID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
version := int64(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
@ -113,7 +113,7 @@ func TestIndexFileBinlogCodecError(t *testing.T) {
}
_, err = codec.Serialize(indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexParams, indexName, indexID, datas)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestIndexCodec(t *testing.T) {
@ -139,11 +139,11 @@ func TestIndexCodec(t *testing.T) {
"k1": "v1", "k2": "v2",
}
blobsInput, err := indexCodec.Serialize(blobs, indexParams, "index_test_name", 1234)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 4, len(blobsInput))
assert.EqualValues(t, IndexParamsKey, blobsInput[3].Key)
blobsOutput, indexParamsOutput, indexName, indexID, err := indexCodec.Deserialize(blobsInput)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, 3, len(blobsOutput))
for i := 0; i < 3; i++ {
assert.EqualValues(t, blobs[i], blobsOutput[i])
@ -154,5 +154,5 @@ func TestIndexCodec(t *testing.T) {
blobs = []*Blob{}
_, _, _, _, err = indexCodec.Deserialize(blobs)
assert.NotNil(t, err)
assert.Error(t, err)
}

View File

@ -107,7 +107,7 @@ func TestLocalCM(t *testing.T) {
for _, test := range loadWithPrefixTests {
t.Run(test.description, func(t *testing.T) {
gotk, gotv, err := testCM.ReadWithPrefix(ctx, path.Join(localPath, testLoadRoot, test.prefix))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, len(test.expectedValue), len(gotk))
assert.Equal(t, len(test.expectedValue), len(gotv))
assert.ElementsMatch(t, test.expectedValue, gotv)
@ -132,7 +132,7 @@ func TestLocalCM(t *testing.T) {
}
if test.isvalid {
got, err := testCM.MultiRead(ctx, test.multiKeys)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, test.expectedValue, got)
} else {
got, err := testCM.MultiRead(ctx, test.multiKeys)
@ -151,17 +151,17 @@ func TestLocalCM(t *testing.T) {
key1 := path.Join(localPath, testMultiSaveRoot, "key_1")
err := testCM.Write(ctx, key1, []byte("111"))
assert.Nil(t, err)
assert.NoError(t, err)
key2 := path.Join(localPath, testMultiSaveRoot, "key_2")
err = testCM.Write(ctx, key2, []byte("222"))
assert.Nil(t, err)
assert.NoError(t, err)
val, err := testCM.Read(ctx, key1)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, []byte("111"), val)
val, err = testCM.Read(ctx, key2)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, []byte("222"), val)
// localPath/testMultiSaveRoot/key_1 is a file already exist, use its path as directory is not allowed
@ -177,7 +177,7 @@ func TestLocalCM(t *testing.T) {
defer testCM.RemoveWithPrefix(ctx, testCM.RootPath())
err := testCM.Write(ctx, path.Join(localPath, testMultiSaveRoot, "key_1"), []byte("111"))
assert.Nil(t, err)
assert.NoError(t, err)
kvs := map[string][]byte{
path.Join(localPath, testMultiSaveRoot, "key_1"): []byte("123"),
@ -185,10 +185,10 @@ func TestLocalCM(t *testing.T) {
}
err = testCM.MultiWrite(ctx, kvs)
assert.Nil(t, err)
assert.NoError(t, err)
val, err := testCM.Read(ctx, path.Join(localPath, testMultiSaveRoot, "key_1"))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, []byte("123"), val)
kvs = map[string][]byte{
@ -272,7 +272,7 @@ func TestLocalCM(t *testing.T) {
require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv)
err = testCM.MultiRemove(ctx, multiRemoveTest)
assert.Nil(t, err)
assert.NoError(t, err)
for _, k := range multiRemoveTest {
v, err := testCM.Read(ctx, k)
@ -496,7 +496,7 @@ func TestLocalCM(t *testing.T) {
// localPath/testPrefix/bcd
testPrefix1 := path.Join(localPath, testPrefix)
dirs, mods, err := testCM.ListWithPrefix(ctx, testPrefix1+"/", false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 3, len(dirs))
assert.Equal(t, 3, len(mods))
assert.Contains(t, dirs, filepath.Dir(key1))
@ -510,7 +510,7 @@ func TestLocalCM(t *testing.T) {
// localPath/testPrefix/abd
// localPath/testPrefix/bcd
dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix1+"/", true)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 4, len(dirs))
assert.Equal(t, 4, len(mods))
assert.Contains(t, dirs, key1)
@ -524,7 +524,7 @@ func TestLocalCM(t *testing.T) {
// localPath/testPrefix/abd
testPrefix2 := path.Join(localPath, testPrefix, "a")
dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix2, false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 2, len(dirs))
assert.Equal(t, 2, len(mods))
assert.Contains(t, dirs, filepath.Dir(key1))
@ -536,7 +536,7 @@ func TestLocalCM(t *testing.T) {
// localPath/testPrefix/abc/deg
// localPath/testPrefix/abd
dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix2, true)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 3, len(dirs))
assert.Equal(t, 3, len(mods))
assert.Contains(t, dirs, key1)
@ -552,7 +552,7 @@ func TestLocalCM(t *testing.T) {
// return:
// localPath/testPrefix
dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix1, false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, len(dirs))
assert.Equal(t, 1, len(mods))
assert.Contains(t, dirs, filepath.Dir(key4))
@ -570,7 +570,7 @@ func TestLocalCM(t *testing.T) {
// return:
// localPath/testPrefix/abc
dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix2, false)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 1, len(dirs))
assert.Equal(t, 1, len(mods))
assert.Contains(t, dirs, filepath.Dir(key1))
@ -578,7 +578,7 @@ func TestLocalCM(t *testing.T) {
// recursive find localPath/testPrefix/a*
// no file returned
dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix2, true)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(dirs))
assert.Equal(t, 0, len(mods))

View File

@ -214,11 +214,11 @@ func TestMinIOCM(t *testing.T) {
defer cancel()
testCM, err := newMinIOChunkManager(ctx, testBucket, testMultiSaveRoot)
assert.Nil(t, err)
assert.NoError(t, err)
defer testCM.RemoveWithPrefix(ctx, testMultiSaveRoot)
err = testCM.Write(ctx, path.Join(testMultiSaveRoot, "key_1"), []byte("111"))
assert.Nil(t, err)
assert.NoError(t, err)
kvs := map[string][]byte{
path.Join(testMultiSaveRoot, "key_1"): []byte("123"),
@ -226,10 +226,10 @@ func TestMinIOCM(t *testing.T) {
}
err = testCM.MultiWrite(ctx, kvs)
assert.Nil(t, err)
assert.NoError(t, err)
val, err := testCM.Read(ctx, path.Join(testMultiSaveRoot, "key_1"))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, []byte("123"), val)
})
@ -239,7 +239,7 @@ func TestMinIOCM(t *testing.T) {
defer cancel()
testCM, err := newMinIOChunkManager(ctx, testBucket, testRemoveRoot)
assert.Nil(t, err)
assert.NoError(t, err)
defer testCM.RemoveWithPrefix(ctx, testRemoveRoot)
prepareTests := []struct {
@ -588,7 +588,7 @@ func TestMinioChunkManager_Read(t *testing.T) {
assert.Equal(t, value[i], byte(i))
}
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestMinioChunkManager_ReadEOF(t *testing.T) {
@ -604,7 +604,7 @@ func TestMinioChunkManager_ReadEOF(t *testing.T) {
for i := 0; i < 10; i++ {
assert.Equal(t, value[i], byte(i))
}
assert.Nil(t, err)
assert.NoError(t, err)
}
type MockReader struct {

File diff suppressed because it is too large Load Diff

View File

@ -23,21 +23,21 @@ func TestVarCharPrimaryKey(t *testing.T) {
// test GT
err := testPk.SetValue("bivlus")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, pk.GT(testPk))
// test LT
err = testPk.SetValue("mivlut")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, pk.LT(testPk))
t.Run("unmarshal", func(t *testing.T) {
blob, err := json.Marshal(pk)
assert.Nil(t, err)
assert.NoError(t, err)
unmarshalledPk := &VarCharPrimaryKey{}
err = json.Unmarshal(blob, unmarshalledPk)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pk.Value, unmarshalledPk.Value)
})
}
@ -55,21 +55,21 @@ func TestInt64PrimaryKey(t *testing.T) {
// test GT
err := testPk.SetValue(int64(10))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, pk.GT(testPk))
// test LT
err = testPk.SetValue(int64(200))
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, true, pk.LT(testPk))
t.Run("unmarshal", func(t *testing.T) {
blob, err := json.Marshal(pk)
assert.Nil(t, err)
assert.NoError(t, err)
unmarshalledPk := &Int64PrimaryKey{}
err = json.Unmarshal(blob, unmarshalledPk)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pk.Value, unmarshalledPk.Value)
})
}
@ -111,7 +111,7 @@ func TestParseFieldData2PrimaryKeys(t *testing.T) {
}
pks, err := ParseFieldData2PrimaryKeys(fieldData)
assert.Nil(t, err)
assert.NoError(t, err)
assert.ElementsMatch(t, pks, testPks)
})
@ -152,7 +152,7 @@ func TestParseFieldData2PrimaryKeys(t *testing.T) {
}
pks, err := ParseFieldData2PrimaryKeys(fieldData)
assert.Nil(t, err)
assert.NoError(t, err)
assert.ElementsMatch(t, pks, testPks)
})

View File

@ -42,45 +42,45 @@ func TestPrintBinlogFilesInt64(t *testing.T) {
curTS := time.Now().UnixNano() / int64(time.Millisecond)
e1, err := w.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int64{1, 2, 3})
assert.Nil(t, err)
assert.NoError(t, err)
err = e1.AddDataToPayload([]int32{4, 5, 6})
assert.NotNil(t, err)
assert.Error(t, err)
err = e1.AddDataToPayload([]int64{4, 5, 6})
assert.Nil(t, err)
assert.NoError(t, err)
e1.SetEventTimestamp(tsoutil.ComposeTS(curTS+10*60*1000, 0), tsoutil.ComposeTS(curTS+20*60*1000, 0))
e2, err := w.NextInsertEventWriter()
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]int64{7, 8, 9})
assert.Nil(t, err)
assert.NoError(t, err)
err = e2.AddDataToPayload([]bool{true, false, true})
assert.NotNil(t, err)
assert.Error(t, err)
err = e2.AddDataToPayload([]int64{10, 11, 12})
assert.Nil(t, err)
assert.NoError(t, err)
e2.SetEventTimestamp(tsoutil.ComposeTS(curTS+30*60*1000, 0), tsoutil.ComposeTS(curTS+40*60*1000, 0))
w.SetEventTimeStamp(tsoutil.ComposeTS(curTS, 0), tsoutil.ComposeTS(curTS+3600*1000, 0))
_, err = w.GetBuffer()
assert.NotNil(t, err)
assert.Error(t, err)
sizeTotal := 20000000
w.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
err = w.Finish()
assert.Nil(t, err)
assert.NoError(t, err)
buf, err := w.GetBuffer()
assert.Nil(t, err)
assert.NoError(t, err)
w.Close()
fd, err := ioutil.TempFile("", "binlog_int64.db")
defer os.RemoveAll(fd.Name())
assert.Nil(t, err)
assert.NoError(t, err)
num, err := fd.Write(buf)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(buf))
err = fd.Close()
assert.Nil(t, err)
assert.NoError(t, err)
}
@ -269,33 +269,33 @@ func TestPrintBinlogFiles(t *testing.T) {
},
}
firstBlobs, err := insertCodec.Serialize(1, 1, insertDataFirst)
assert.Nil(t, err)
assert.NoError(t, err)
var binlogFiles []string
for index, blob := range firstBlobs {
blob.Key = fmt.Sprintf("1/insert_log/2/3/4/5/%d", 100)
fileName := fmt.Sprintf("/tmp/firstblob_%d.db", index)
binlogFiles = append(binlogFiles, fileName)
fd, err := os.Create(fileName)
assert.Nil(t, err)
assert.NoError(t, err)
num, err := fd.Write(blob.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(blob.GetValue()))
err = fd.Close()
assert.Nil(t, err)
assert.NoError(t, err)
}
secondBlobs, err := insertCodec.Serialize(1, 1, insertDataSecond)
assert.Nil(t, err)
assert.NoError(t, err)
for index, blob := range secondBlobs {
blob.Key = fmt.Sprintf("1/insert_log/2/3/4/5/%d", 99)
fileName := fmt.Sprintf("/tmp/secondblob_%d.db", index)
binlogFiles = append(binlogFiles, fileName)
fd, err := os.Create(fileName)
assert.Nil(t, err)
assert.NoError(t, err)
num, err := fd.Write(blob.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(blob.GetValue()))
err = fd.Close()
assert.Nil(t, err)
assert.NoError(t, err)
}
binlogFiles = append(binlogFiles, "test")
@ -331,7 +331,7 @@ func TestPrintDDFiles(t *testing.T) {
DbID: UniqueID(0),
}
createCollString, err := proto.Marshal(&createCollReq)
assert.Nil(t, err)
assert.NoError(t, err)
dropCollReq := msgpb.DropCollectionRequest{
Base: &commonpb.MsgBase{
@ -346,7 +346,7 @@ func TestPrintDDFiles(t *testing.T) {
DbID: UniqueID(0),
}
dropCollString, err := proto.Marshal(&dropCollReq)
assert.Nil(t, err)
assert.NoError(t, err)
createPartitionReq := msgpb.CreatePartitionRequest{
Base: &commonpb.MsgBase{
@ -363,7 +363,7 @@ func TestPrintDDFiles(t *testing.T) {
DbID: UniqueID(0),
}
createPartitionString, err := proto.Marshal(&createPartitionReq)
assert.Nil(t, err)
assert.NoError(t, err)
dropPartitionReq := msgpb.DropPartitionRequest{
Base: &commonpb.MsgBase{
@ -380,7 +380,7 @@ func TestPrintDDFiles(t *testing.T) {
DbID: UniqueID(0),
}
dropPartitionString, err := proto.Marshal(&dropPartitionReq)
assert.Nil(t, err)
assert.NoError(t, err)
ddRequests := []string{
string(createCollString[:]),
string(dropCollString[:]),
@ -394,22 +394,22 @@ func TestPrintDDFiles(t *testing.T) {
DropPartitionEventType,
}
blobs, err := dataDefinitionCodec.Serialize(ts, ddRequests, eventTypeCodes)
assert.Nil(t, err)
assert.NoError(t, err)
var binlogFiles []string
for index, blob := range blobs {
blob.Key = fmt.Sprintf("1/data_definition/3/4/5/%d", 99)
fileName := fmt.Sprintf("/tmp/ddblob_%d.db", index)
binlogFiles = append(binlogFiles, fileName)
fd, err := os.Create(fileName)
assert.Nil(t, err)
assert.NoError(t, err)
num, err := fd.Write(blob.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(blob.GetValue()))
err = fd.Close()
assert.Nil(t, err)
assert.NoError(t, err)
}
resultTs, resultRequests, err := dataDefinitionCodec.Deserialize(blobs)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, resultTs, ts)
assert.Equal(t, resultRequests, ddRequests)
@ -449,23 +449,23 @@ func TestPrintIndexFile(t *testing.T) {
codec := NewIndexFileBinlogCodec()
serializedBlobs, err := codec.Serialize(indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexParams, indexName, indexID, datas)
assert.Nil(t, err)
assert.NoError(t, err)
var binlogFiles []string
for index, blob := range serializedBlobs {
fileName := fmt.Sprintf("/tmp/index_blob_%d.binlog", index)
binlogFiles = append(binlogFiles, fileName)
fd, err := os.Create(fileName)
assert.Nil(t, err)
assert.NoError(t, err)
num, err := fd.Write(blob.GetValue())
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, num, len(blob.GetValue()))
err = fd.Close()
assert.Nil(t, err)
assert.NoError(t, err)
}
err = PrintBinlogFiles(binlogFiles)
assert.Nil(t, err)
assert.NoError(t, err)
// remove tmp files
for _, file := range binlogFiles {

View File

@ -39,7 +39,7 @@ func TestStatsWriter_Int64PrimaryKey(t *testing.T) {
sr := &StatsReader{}
sr.SetBuffer(b)
stats, err := sr.GetPrimaryKeyStats()
assert.Nil(t, err)
assert.NoError(t, err)
maxPk := &Int64PrimaryKey{
Value: 9,
}
@ -58,7 +58,7 @@ func TestStatsWriter_Int64PrimaryKey(t *testing.T) {
Data: []int64{},
}
err = sw.GenerateByData(common.RowIDField, schemapb.DataType_Int64, msgs)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestStatsWriter_BF(t *testing.T) {
@ -102,7 +102,7 @@ func TestStatsWriter_VarCharPrimaryKey(t *testing.T) {
sr := &StatsReader{}
sr.SetBuffer(b)
stats, err := sr.GetPrimaryKeyStats()
assert.Nil(t, err)
assert.NoError(t, err)
maxPk := NewVarCharPrimaryKey("milvus")
minPk := NewVarCharPrimaryKey("abd")
assert.Equal(t, true, stats.MaxPk.EQ(maxPk))
@ -115,7 +115,7 @@ func TestStatsWriter_VarCharPrimaryKey(t *testing.T) {
Data: []int64{},
}
err = sw.GenerateByData(common.RowIDField, schemapb.DataType_Int64, msgs)
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestStatsWriter_UpgradePrimaryKey(t *testing.T) {
@ -136,11 +136,11 @@ func TestStatsWriter_UpgradePrimaryKey(t *testing.T) {
stats.BF.Add(b)
}
blob, err := json.Marshal(stats)
assert.Nil(t, err)
assert.NoError(t, err)
sr := &StatsReader{}
sr.SetBuffer(blob)
unmarshaledStats, err := sr.GetPrimaryKeyStats()
assert.Nil(t, err)
assert.NoError(t, err)
maxPk := &Int64PrimaryKey{
Value: 9,
}

View File

@ -851,7 +851,7 @@ func TestRowBasedInsertMsgToInsertData(t *testing.T) {
msg, _, columns := genRowBasedInsertMsg(numRows, fVecDim, bVecDim)
idata, err := RowBasedInsertMsgToInsertData(msg, schema)
assert.Nil(t, err)
assert.NoError(t, err)
for idx, fID := range fieldIDs {
column := columns[idx]
fData, ok := idata.Data[fID]
@ -869,7 +869,7 @@ func TestColumnBasedInsertMsgToInsertData(t *testing.T) {
msg, _, columns := genColumnBasedInsertMsg(schema, numRows, fVecDim, bVecDim)
idata, err := ColumnBasedInsertMsgToInsertData(msg, schema)
assert.Nil(t, err)
assert.NoError(t, err)
for idx, fID := range fieldIDs {
column := columns[idx]
fData, ok := idata.Data[fID]
@ -888,7 +888,7 @@ func TestInsertMsgToInsertData(t *testing.T) {
msg, _, columns := genRowBasedInsertMsg(numRows, fVecDim, bVecDim)
idata, err := InsertMsgToInsertData(msg, schema)
assert.Nil(t, err)
assert.NoError(t, err)
for idx, fID := range fieldIDs {
column := columns[idx]
fData, ok := idata.Data[fID]
@ -906,7 +906,7 @@ func TestInsertMsgToInsertData2(t *testing.T) {
msg, _, columns := genColumnBasedInsertMsg(schema, numRows, fVecDim, bVecDim)
idata, err := InsertMsgToInsertData(msg, schema)
assert.Nil(t, err)
assert.NoError(t, err)
for idx, fID := range fieldIDs {
column := columns[idx]
fData, ok := idata.Data[fID]

View File

@ -152,17 +152,17 @@ func TestNewVectorChunkManager(t *testing.T) {
bucketName := "vector-chunk-manager"
rcm, err := newMinIOChunkManager(ctx, bucketName, "")
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, rcm)
lcm := NewLocalChunkManager(RootPath(localPath))
vcm, err := NewVectorChunkManager(ctx, lcm, rcm, 16, true)
assert.Equal(t, "", vcm.RootPath())
assert.Nil(t, err)
assert.NoError(t, err)
assert.NotNil(t, vcm)
vcm, err = NewVectorChunkManager(ctx, lcm, rcm, -1, true)
assert.NotNil(t, err)
assert.Error(t, err)
assert.Nil(t, vcm)
}
@ -177,15 +177,15 @@ func TestVectorChunkManager_GetPath(t *testing.T) {
key := path.Join(localPath, "1")
err = vcm.Write(ctx, key, []byte{1})
assert.Nil(t, err)
assert.NoError(t, err)
pathGet, err := vcm.Path(ctx, key)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pathGet, key)
err = vcm.cacheStorage.Write(ctx, key, []byte{1})
assert.Nil(t, err)
assert.NoError(t, err)
pathGet, err = vcm.Path(ctx, key)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, pathGet, key)
err = vcm.RemoveWithPrefix(ctx, localPath)
@ -206,15 +206,15 @@ func TestVectorChunkManager_GetSize(t *testing.T) {
key := path.Join(localPath, "1")
err = vcm.Write(ctx, key, []byte{1})
assert.Nil(t, err)
assert.NoError(t, err)
sizeGet, err := vcm.Size(ctx, key)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, sizeGet, 1)
err = vcm.cacheStorage.Write(ctx, key, []byte{1})
assert.Nil(t, err)
assert.NoError(t, err)
sizeGet, err = vcm.Size(ctx, key)
assert.Nil(t, err)
assert.NoError(t, err)
assert.EqualValues(t, sizeGet, 1)
err = vcm.RemoveWithPrefix(ctx, localPath)
@ -237,7 +237,7 @@ func TestVectorChunkManager_Write(t *testing.T) {
key1 := path.Join(localPath, "key_1")
key2 := path.Join(localPath, "key_2")
err = vcm.Write(ctx, key1, []byte{1})
assert.Nil(t, err)
assert.NoError(t, err)
exist, err := vcm.Exist(ctx, key1)
assert.True(t, exist)
@ -277,10 +277,10 @@ func TestVectorChunkManager_Remove(t *testing.T) {
key1 := path.Join(localPath, "key_1")
key2 := path.Join(localPath, "key_2")
err = vcm.cacheStorage.Write(ctx, key1, []byte{1})
assert.Nil(t, err)
assert.NoError(t, err)
err = vcm.Remove(ctx, key1)
assert.Nil(t, err)
assert.NoError(t, err)
exist, err := vcm.Exist(ctx, key1)
assert.False(t, exist)
@ -367,15 +367,15 @@ func TestVectorChunkManager_Read(t *testing.T) {
assert.NotNil(t, binlogs)
for _, binlog := range binlogs {
err := vcm.vectorStorage.Write(ctx, binlog.Key, binlog.Value)
assert.Nil(t, err)
assert.NoError(t, err)
}
content, err = vcm.Read(ctx, "108")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, []byte{0, 255}, content)
content, err = vcm.Read(ctx, "109")
assert.Nil(t, err)
assert.NoError(t, err)
floatResult := make([]float32, 0)
for i := 0; i < len(content)/4; i++ {
singleData := typeutil.BytesToFloat32(content[i*4 : i*4+4])
@ -384,7 +384,7 @@ func TestVectorChunkManager_Read(t *testing.T) {
assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
contents, err := vcm.MultiRead(ctx, []string{"108", "109"})
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, []byte{0, 255}, contents[0])
floatResult = make([]float32, 0)
@ -395,11 +395,11 @@ func TestVectorChunkManager_Read(t *testing.T) {
assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
keys, contents, err := vcm.ReadWithPrefix(ctx, "10")
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, "101", keys[0])
assert.Equal(t, []byte{3, 4}, contents[0])
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, "108", keys[1])
assert.Equal(t, []byte{0, 255}, contents[1])
@ -412,7 +412,7 @@ func TestVectorChunkManager_Read(t *testing.T) {
assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
content, err = vcm.ReadAt(ctx, "109", 8*4, 8*4)
assert.Nil(t, err)
assert.NoError(t, err)
floatResult = make([]float32, 0)
for i := 0; i < len(content)/4; i++ {
@ -426,15 +426,15 @@ func TestVectorChunkManager_Read(t *testing.T) {
assert.Nil(t, content)
content, err = vcm.ReadAt(ctx, "109", 8*4, 8*4)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 32, len(content))
if localCache {
r, err := vcm.Mmap(ctx, "109")
assert.Nil(t, err)
assert.NoError(t, err)
p := make([]byte, 32)
n, err := r.ReadAt(p, 32)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, n, 32)
r, err = vcm.Mmap(ctx, "not exist")

View File

@ -39,26 +39,26 @@ func TestGlobalTSOAllocator_Initialize(t *testing.T) {
}
etcdEndpoints := strings.Split(endpoints, ",")
etcdCli, err := etcd.GetRemoteEtcdClient(etcdEndpoints)
assert.Nil(t, err)
assert.NoError(t, err)
defer etcdCli.Close()
etcdKV := tsoutil2.NewTSOKVBase(etcdCli, "/test/root/kv", "tsoTest")
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", etcdKV)
err = gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
time.Sleep(3 * time.Second)
err = gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
t.Run("GenerateTSO", func(t *testing.T) {
count := 1000
perCount := uint32(100)
startTs, err := gTestTsoAllocator.GenerateTSO(perCount)
assert.Nil(t, err)
assert.NoError(t, err)
lastPhysical, lastLogical := tsoutil.ParseTS(startTs)
for i := 0; i < count; i++ {
ts, _ := gTestTsoAllocator.GenerateTSO(perCount)
@ -86,17 +86,17 @@ func TestGlobalTSOAllocator_All(t *testing.T) {
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", etcdKV)
t.Run("Initialize", func(t *testing.T) {
err := gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GenerateTSO", func(t *testing.T) {
count := 1000
perCount := uint32(100)
startTs, err := gTestTsoAllocator.GenerateTSO(perCount)
assert.Nil(t, err)
assert.NoError(t, err)
lastPhysical, lastLogical := tsoutil.ParseTS(startTs)
for i := 0; i < count; i++ {
ts, err2 := gTestTsoAllocator.GenerateTSO(perCount)
@ -117,7 +117,7 @@ func TestGlobalTSOAllocator_All(t *testing.T) {
startTs, err := gTestTsoAllocator.GenerateTSO(uint32(maxL))
step := 10
perCount := uint32(step) << 18 // 10 ms
assert.Nil(t, err)
assert.NoError(t, err)
lastPhysical, lastLogical := tsoutil.ParseTS(startTs)
for i := 0; i < count; i++ {
ts, _ := gTestTsoAllocator.GenerateTSO(perCount)
@ -127,7 +127,7 @@ func TestGlobalTSOAllocator_All(t *testing.T) {
lastPhysical = physical
}
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
})
gTestTsoAllocator.SetLimitMaxLogic(true)
@ -138,22 +138,22 @@ func TestGlobalTSOAllocator_All(t *testing.T) {
nextTime := curTime.Add(2 * time.Second)
physical := nextTime.UnixNano() / int64(time.Millisecond)
err := gTestTsoAllocator.SetTSO(tsoutil.ComposeTS(physical, int64(logical)))
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("UpdateTSO", func(t *testing.T) {
err := gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Alloc", func(t *testing.T) {
_, err := gTestTsoAllocator.Alloc(100)
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("AllocOne", func(t *testing.T) {
_, err := gTestTsoAllocator.AllocOne()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("Reset", func(t *testing.T) {
@ -175,29 +175,29 @@ func TestGlobalTSOAllocator_Fail(t *testing.T) {
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", etcdKV)
t.Run("Initialize", func(t *testing.T) {
err := gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
})
t.Run("GenerateTSO_invalid", func(t *testing.T) {
_, err := gTestTsoAllocator.GenerateTSO(0)
assert.NotNil(t, err)
assert.Error(t, err)
})
gTestTsoAllocator.SetLimitMaxLogic(true)
t.Run("SetTSO_invalid", func(t *testing.T) {
err := gTestTsoAllocator.SetTSO(0)
assert.NotNil(t, err)
assert.Error(t, err)
err = gTestTsoAllocator.SetTSO(math.MaxUint64)
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Alloc_invalid", func(t *testing.T) {
_, err := gTestTsoAllocator.Alloc(0)
assert.NotNil(t, err)
assert.Error(t, err)
_, err = gTestTsoAllocator.Alloc(math.MaxUint32)
assert.NotNil(t, err)
assert.Error(t, err)
})
t.Run("Reset", func(t *testing.T) {
@ -218,16 +218,16 @@ func TestGlobalTSOAllocator_Update(t *testing.T) {
assert.NoError(t, err)
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", etcdKV)
err = gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
time.Sleep(160 * time.Millisecond)
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
time.Sleep(500 * time.Millisecond)
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
}
func TestGlobalTSOAllocator_load(t *testing.T) {
@ -243,10 +243,10 @@ func TestGlobalTSOAllocator_load(t *testing.T) {
assert.NoError(t, err)
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", etcdKV)
err = gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
err = gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
assert.NoError(t, err)
ts, _ := gTestTsoAllocator.GenerateTSO(1)
curTime, logical := tsoutil.ParseTS(ts)
@ -254,11 +254,11 @@ func TestGlobalTSOAllocator_load(t *testing.T) {
physical := nextTime.UnixNano() / int64(time.Millisecond)
target := tsoutil.ComposeTS(physical, int64(logical))
err = gTestTsoAllocator.SetTSO(target)
assert.Nil(t, err)
assert.NoError(t, err)
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", etcdKV)
err = gTestTsoAllocator.Initialize()
assert.Nil(t, err)
assert.NoError(t, err)
ts2, err2 := gTestTsoAllocator.GenerateTSO(1)
assert.Nil(t, err2)

View File

@ -92,7 +92,7 @@ func Test_WaitForComponentInitOrHealthy(t *testing.T) {
compErr: errors.New("error"),
}
err := WaitForComponentInitOrHealthy(context.TODO(), mc, "mockService", 1, 10*time.Millisecond)
assert.NotNil(t, err)
assert.Error(t, err)
mc = &MockComponent{
compState: &milvuspb.ComponentStates{
@ -104,7 +104,7 @@ func Test_WaitForComponentInitOrHealthy(t *testing.T) {
compErr: nil,
}
err = WaitForComponentInitOrHealthy(context.TODO(), mc, "mockService", 1, 10*time.Millisecond)
assert.NotNil(t, err)
assert.Error(t, err)
validCodes := []commonpb.StateCode{commonpb.StateCode_Initializing, commonpb.StateCode_Healthy}
testCodes := []commonpb.StateCode{commonpb.StateCode_Initializing, commonpb.StateCode_Healthy, commonpb.StateCode_Abnormal}
@ -112,9 +112,9 @@ func Test_WaitForComponentInitOrHealthy(t *testing.T) {
mc := buildMockComponent(code)
err := WaitForComponentInitOrHealthy(context.TODO(), mc, "mockService", 1, 10*time.Millisecond)
if funcutil.SliceContain(validCodes, code) {
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.NotNil(t, err)
assert.Error(t, err)
}
}
}
@ -126,9 +126,9 @@ func Test_WaitForComponentInit(t *testing.T) {
mc := buildMockComponent(code)
err := WaitForComponentInit(context.TODO(), mc, "mockService", 1, 10*time.Millisecond)
if funcutil.SliceContain(validCodes, code) {
assert.Nil(t, err)
assert.NoError(t, err)
} else {
assert.NotNil(t, err)
assert.Error(t, err)
}
}
}

Some files were not shown because too many files have changed in this diff Show More