mirror of https://github.com/milvus-io/milvus.git
Use suite for integration test (#24253)
Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>pull/24170/merge
parent
4500d63248
commit
3a66e1de65
|
@ -19,6 +19,7 @@ package config
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -104,7 +105,7 @@ func (es *EtcdSource) GetSourceName() string {
|
|||
}
|
||||
|
||||
func (es *EtcdSource) Close() {
|
||||
es.etcdCli.Close()
|
||||
// cannot close client here, since client is shared with components
|
||||
es.configRefresher.stop()
|
||||
}
|
||||
|
||||
|
@ -128,7 +129,7 @@ func (es *EtcdSource) UpdateOptions(opts Options) {
|
|||
|
||||
func (es *EtcdSource) refreshConfigurations() error {
|
||||
es.RLock()
|
||||
prefix := es.keyPrefix + "/config"
|
||||
prefix := path.Join(es.keyPrefix, "config")
|
||||
es.RUnlock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(es.ctx, ReadConfigTimeout)
|
||||
|
|
|
@ -162,6 +162,8 @@ func (m *Manager) FileConfigs() map[string]string {
|
|||
}
|
||||
|
||||
func (m *Manager) Close() {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
for _, s := range m.sources {
|
||||
s.Close()
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ func buildKvGroup(keys, values []string) (map[string]string, error) {
|
|||
// StartTestEmbedEtcdServer returns a newly created embed etcd server.
|
||||
// ### USED FOR UNIT TEST ONLY ###
|
||||
func StartTestEmbedEtcdServer() (*embed.Etcd, string, error) {
|
||||
dir, err := ioutil.TempDir(os.TempDir(), "milvus_datanode_ut")
|
||||
dir, err := ioutil.TempDir(os.TempDir(), "milvus_ut")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
# run integration test
|
||||
echo "Running integration test uner ./tests/integration"
|
||||
echo "Running integration test under ./tests/integration"
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
source $BASEDIR/setenv.sh
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
|
@ -44,6 +45,10 @@ const (
|
|||
Dim = 128
|
||||
)
|
||||
|
||||
type BulkInsertSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
// test bulk insert E2E
|
||||
// 1, create collection with a vector column and a varchar column
|
||||
// 2, generate numpy files
|
||||
|
@ -51,17 +56,10 @@ const (
|
|||
// 4, create index
|
||||
// 5, load
|
||||
// 6, search
|
||||
func TestBulkInsert(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
func (s *BulkInsertSuite) TestBulkInsert() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestBulkInsert"
|
||||
dbName := ""
|
||||
|
@ -75,7 +73,7 @@ func TestBulkInsert(t *testing.T) {
|
|||
&schemapb.FieldSchema{Name: "embeddings", DataType: schemapb.DataType_FloatVector, TypeParams: []*commonpb.KeyValuePair{{Key: common.DimKey, Value: "128"}}},
|
||||
)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -83,17 +81,17 @@ func TestBulkInsert(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if createCollectionStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createCollectionStatus fail reason", zap.String("reason", createCollectionStatus.GetReason()))
|
||||
t.FailNow()
|
||||
s.FailNow("failed to create collection")
|
||||
}
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
err = GenerateNumpyFile(c.chunkManager.RootPath()+"/"+"embeddings.npy", 100, schemapb.DataType_FloatVector, []*commonpb.KeyValuePair{
|
||||
|
@ -102,14 +100,14 @@ func TestBulkInsert(t *testing.T) {
|
|||
Value: strconv.Itoa(Dim),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = GenerateNumpyFile(c.chunkManager.RootPath()+"/"+"image_path.npy", 100, schemapb.DataType_VarChar, []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.MaxLengthKey,
|
||||
Value: strconv.Itoa(65535),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
bulkInsertFiles := []string{
|
||||
c.chunkManager.RootPath() + "/" + "embeddings.npy",
|
||||
|
@ -117,13 +115,13 @@ func TestBulkInsert(t *testing.T) {
|
|||
}
|
||||
|
||||
health1, err := c.dataCoord.CheckHealth(ctx, &milvuspb.CheckHealthRequest{})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
log.Info("dataCoord health", zap.Any("health1", health1))
|
||||
importResp, err := c.proxy.Import(ctx, &milvuspb.ImportRequest{
|
||||
CollectionName: collectionName,
|
||||
Files: bulkInsertFiles,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
log.Info("Import result", zap.Any("importResp", importResp), zap.Int64s("tasks", importResp.GetTasks()))
|
||||
|
||||
tasks := importResp.GetTasks()
|
||||
|
@ -133,7 +131,7 @@ func TestBulkInsert(t *testing.T) {
|
|||
importTaskState, err := c.proxy.GetImportState(ctx, &milvuspb.GetImportStateRequest{
|
||||
Task: task,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
switch importTaskState.GetState() {
|
||||
case commonpb.ImportState_ImportCompleted:
|
||||
break loop
|
||||
|
@ -150,12 +148,12 @@ func TestBulkInsert(t *testing.T) {
|
|||
}
|
||||
|
||||
health2, err := c.dataCoord.CheckHealth(ctx, &milvuspb.CheckHealthRequest{})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
log.Info("dataCoord health", zap.Any("health2", health2))
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
@ -170,21 +168,21 @@ func TestBulkInsert(t *testing.T) {
|
|||
if createIndexStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createIndexStatus fail reason", zap.String("reason", createIndexStatus.GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
s.NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, "embeddings")
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, "embeddings")
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if loadStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("loadStatus fail reason", zap.String("reason", loadStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
s.Equal(commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
waitingForLoad(ctx, c, collectionName)
|
||||
|
||||
// search
|
||||
|
@ -202,8 +200,8 @@ func TestBulkInsert(t *testing.T) {
|
|||
if searchResult.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("searchResult fail reason", zap.String("reason", searchResult.GetStatus().GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, searchResult.GetStatus().GetErrorCode())
|
||||
s.NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_Success, searchResult.GetStatus().GetErrorCode())
|
||||
|
||||
log.Info("======================")
|
||||
log.Info("======================")
|
||||
|
@ -212,6 +210,10 @@ func TestBulkInsert(t *testing.T) {
|
|||
log.Info("======================")
|
||||
}
|
||||
|
||||
func TestBulkInsert(t *testing.T) {
|
||||
suite.Run(t, new(BulkInsertSuite))
|
||||
}
|
||||
|
||||
func GenerateNumpyFile(filePath string, rowCount int, dType schemapb.DataType, typeParams []*commonpb.KeyValuePair) error {
|
||||
if dType == schemapb.DataType_VarChar {
|
||||
var data []string
|
||||
|
|
|
@ -2,10 +2,8 @@ package integration
|
|||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
|
@ -16,14 +14,14 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
)
|
||||
|
||||
func TestGetIndexStatistics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
type GetIndexStatisticsSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
func (s *GetIndexStatisticsSuite) TestGetIndexStatistics() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestGetIndexStatistics"
|
||||
dbName := ""
|
||||
|
@ -33,7 +31,7 @@ func TestGetIndexStatistics(t *testing.T) {
|
|||
|
||||
schema := constructSchema(collectionName, dim, true)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -41,11 +39,11 @@ func TestGetIndexStatistics(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: 2,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if createCollectionStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createCollectionStatus fail reason", zap.String("reason", createCollectionStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
hashKeys := generateHashKeys(rowNum)
|
||||
|
@ -56,19 +54,19 @@ func TestGetIndexStatistics(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
// flush
|
||||
flushResp, err := c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||
ids := segmentIDs.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
assert.Equal(t, true, has)
|
||||
s.NotEmpty(segmentIDs)
|
||||
s.Equal(true, has)
|
||||
waitingForFlush(ctx, c, ids)
|
||||
|
||||
// create index
|
||||
|
@ -82,20 +80,20 @@ func TestGetIndexStatistics(t *testing.T) {
|
|||
if createIndexStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createIndexStatus fail reason", zap.String("reason", createIndexStatus.GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
s.NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, floatVecField)
|
||||
|
||||
getIndexStatisticsResponse, err := c.proxy.GetIndexStatistics(ctx, &milvuspb.GetIndexStatisticsRequest{
|
||||
CollectionName: collectionName,
|
||||
IndexName: indexName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
indexInfos := getIndexStatisticsResponse.GetIndexDescriptions()
|
||||
assert.Equal(t, 1, len(indexInfos))
|
||||
assert.Equal(t, int64(3000), indexInfos[0].IndexedRows)
|
||||
assert.Equal(t, int64(3000), indexInfos[0].TotalRows)
|
||||
s.Equal(1, len(indexInfos))
|
||||
s.Equal(int64(3000), indexInfos[0].IndexedRows)
|
||||
s.Equal(int64(3000), indexInfos[0].TotalRows)
|
||||
|
||||
// skip second insert case for now
|
||||
// the result is not certain
|
||||
|
@ -107,32 +105,32 @@ func TestGetIndexStatistics(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, insertResult2.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(insertResult2.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
_, err = c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs2, has2 := flushResp.GetCollSegIDs()[collectionName]
|
||||
ids2 := segmentIDs2.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
assert.Equal(t, true, has2)
|
||||
s.NotEmpty(segmentIDs)
|
||||
s.Equal(true, has2)
|
||||
waitingForFlush(ctx, c, ids2)
|
||||
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if loadStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("loadStatus fail reason", zap.String("reason", loadStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
s.Equal(commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
waitingForLoad(ctx, c, collectionName)
|
||||
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
|
||||
|
@ -140,11 +138,11 @@ func TestGetIndexStatistics(t *testing.T) {
|
|||
CollectionName: collectionName,
|
||||
IndexName: indexName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
indexInfos2 := getIndexStatisticsResponse2.GetIndexDescriptions()
|
||||
assert.Equal(t, 1, len(indexInfos2))
|
||||
assert.Equal(t, int64(6000), indexInfos2[0].IndexedRows)
|
||||
assert.Equal(t, int64(6000), indexInfos2[0].TotalRows)
|
||||
s.Equal(1, len(indexInfos2))
|
||||
s.Equal(int64(6000), indexInfos2[0].IndexedRows)
|
||||
s.Equal(int64(6000), indexInfos2[0].TotalRows)
|
||||
*/
|
||||
|
||||
log.Info("TestGetIndexStatistics succeed")
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
@ -36,11 +35,7 @@ import (
|
|||
)
|
||||
|
||||
type TestGetVectorSuite struct {
|
||||
suite.Suite
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cluster *MiniCluster
|
||||
MiniClusterSuite
|
||||
|
||||
// test params
|
||||
nq int
|
||||
|
@ -51,25 +46,12 @@ type TestGetVectorSuite struct {
|
|||
vecType schemapb.DataType
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) SetupTest() {
|
||||
suite.ctx, suite.cancel = context.WithTimeout(context.Background(), time.Second*180)
|
||||
func (s *TestGetVectorSuite) run() {
|
||||
ctx, cancel := context.WithCancel(s.Cluster.ctx)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
suite.cluster, err = StartMiniCluster(suite.ctx)
|
||||
suite.Require().NoError(err)
|
||||
err = suite.cluster.Start()
|
||||
suite.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TearDownTest() {
|
||||
err := suite.cluster.Stop()
|
||||
suite.Require().NoError(err)
|
||||
suite.cancel()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) run() {
|
||||
collection := fmt.Sprintf("TestGetVector_%d_%d_%s_%s_%s",
|
||||
suite.nq, suite.topK, suite.indexType, suite.metricType, funcutil.GenRandomStr())
|
||||
s.nq, s.topK, s.indexType, s.metricType, funcutil.GenRandomStr())
|
||||
|
||||
const (
|
||||
NB = 10000
|
||||
|
@ -83,7 +65,7 @@ func (suite *TestGetVectorSuite) run() {
|
|||
Name: pkFieldName,
|
||||
IsPrimaryKey: true,
|
||||
Description: "",
|
||||
DataType: suite.pkType,
|
||||
DataType: s.pkType,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.MaxLengthKey,
|
||||
|
@ -98,7 +80,7 @@ func (suite *TestGetVectorSuite) run() {
|
|||
Name: vecFieldName,
|
||||
IsPrimaryKey: false,
|
||||
Description: "",
|
||||
DataType: suite.vecType,
|
||||
DataType: s.vecType,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.DimKey,
|
||||
|
@ -109,96 +91,96 @@ func (suite *TestGetVectorSuite) run() {
|
|||
}
|
||||
schema := constructSchema(collection, dim, false, pk, fVec)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
suite.Require().NoError(err)
|
||||
s.Require().NoError(err)
|
||||
|
||||
createCollectionStatus, err := suite.cluster.proxy.CreateCollection(suite.ctx, &milvuspb.CreateCollectionRequest{
|
||||
createCollectionStatus, err := s.Cluster.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
CollectionName: collection,
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: 2,
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
fieldsData := make([]*schemapb.FieldData, 0)
|
||||
if suite.pkType == schemapb.DataType_Int64 {
|
||||
if s.pkType == schemapb.DataType_Int64 {
|
||||
fieldsData = append(fieldsData, newInt64FieldData(pkFieldName, NB))
|
||||
} else {
|
||||
fieldsData = append(fieldsData, newStringFieldData(pkFieldName, NB))
|
||||
}
|
||||
var vecFieldData *schemapb.FieldData
|
||||
if suite.vecType == schemapb.DataType_FloatVector {
|
||||
if s.vecType == schemapb.DataType_FloatVector {
|
||||
vecFieldData = newFloatVectorFieldData(vecFieldName, NB, dim)
|
||||
} else {
|
||||
vecFieldData = newBinaryVectorFieldData(vecFieldName, NB, dim)
|
||||
}
|
||||
fieldsData = append(fieldsData, vecFieldData)
|
||||
hashKeys := generateHashKeys(NB)
|
||||
_, err = suite.cluster.proxy.Insert(suite.ctx, &milvuspb.InsertRequest{
|
||||
_, err = s.Cluster.proxy.Insert(ctx, &milvuspb.InsertRequest{
|
||||
CollectionName: collection,
|
||||
FieldsData: fieldsData,
|
||||
HashKeys: hashKeys,
|
||||
NumRows: uint32(NB),
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
// flush
|
||||
flushResp, err := suite.cluster.proxy.Flush(suite.ctx, &milvuspb.FlushRequest{
|
||||
flushResp, err := s.Cluster.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
CollectionNames: []string{collection},
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
s.Require().NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collection]
|
||||
ids := segmentIDs.GetData()
|
||||
suite.Require().NotEmpty(segmentIDs)
|
||||
suite.Require().True(has)
|
||||
s.Require().NotEmpty(segmentIDs)
|
||||
s.Require().True(has)
|
||||
|
||||
segments, err := suite.cluster.metaWatcher.ShowSegments()
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().NotEmpty(segments)
|
||||
segments, err := s.Cluster.metaWatcher.ShowSegments()
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotEmpty(segments)
|
||||
|
||||
waitingForFlush(suite.ctx, suite.cluster, ids)
|
||||
waitingForFlush(ctx, s.Cluster, ids)
|
||||
|
||||
// create index
|
||||
_, err = suite.cluster.proxy.CreateIndex(suite.ctx, &milvuspb.CreateIndexRequest{
|
||||
_, err = s.Cluster.proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||
CollectionName: collection,
|
||||
FieldName: vecFieldName,
|
||||
IndexName: "_default",
|
||||
ExtraParams: constructIndexParam(dim, suite.indexType, suite.metricType),
|
||||
ExtraParams: constructIndexParam(dim, s.indexType, s.metricType),
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
waitingForIndexBuilt(suite.ctx, suite.cluster, suite.T(), collection, vecFieldName)
|
||||
waitingForIndexBuilt(ctx, s.Cluster, s.T(), collection, vecFieldName)
|
||||
|
||||
// load
|
||||
_, err = suite.cluster.proxy.LoadCollection(suite.ctx, &milvuspb.LoadCollectionRequest{
|
||||
_, err = s.Cluster.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
CollectionName: collection,
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
waitingForLoad(suite.ctx, suite.cluster, collection)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
waitingForLoad(ctx, s.Cluster, collection)
|
||||
|
||||
// search
|
||||
nq := suite.nq
|
||||
topk := suite.topK
|
||||
nq := s.nq
|
||||
topk := s.topK
|
||||
|
||||
outputFields := []string{vecFieldName}
|
||||
params := getSearchParams(suite.indexType, suite.metricType)
|
||||
params := getSearchParams(s.indexType, s.metricType)
|
||||
searchReq := constructSearchRequest("", collection, "",
|
||||
vecFieldName, suite.vecType, outputFields, suite.metricType, params, nq, dim, topk, -1)
|
||||
vecFieldName, s.vecType, outputFields, s.metricType, params, nq, dim, topk, -1)
|
||||
|
||||
searchResp, err := suite.cluster.proxy.Search(suite.ctx, searchReq)
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(searchResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
searchResp, err := s.Cluster.proxy.Search(ctx, searchReq)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(searchResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
result := searchResp.GetResults()
|
||||
if suite.pkType == schemapb.DataType_Int64 {
|
||||
suite.Require().Len(result.GetIds().GetIntId().GetData(), nq*topk)
|
||||
if s.pkType == schemapb.DataType_Int64 {
|
||||
s.Require().Len(result.GetIds().GetIntId().GetData(), nq*topk)
|
||||
} else {
|
||||
suite.Require().Len(result.GetIds().GetStrId().GetData(), nq*topk)
|
||||
s.Require().Len(result.GetIds().GetStrId().GetData(), nq*topk)
|
||||
}
|
||||
suite.Require().Len(result.GetScores(), nq*topk)
|
||||
suite.Require().GreaterOrEqual(len(result.GetFieldsData()), 1)
|
||||
s.Require().Len(result.GetScores(), nq*topk)
|
||||
s.Require().GreaterOrEqual(len(result.GetFieldsData()), 1)
|
||||
var vecFieldIndex = -1
|
||||
for i, fieldData := range result.GetFieldsData() {
|
||||
if typeutil.IsVectorType(fieldData.GetType()) {
|
||||
|
@ -206,162 +188,162 @@ func (suite *TestGetVectorSuite) run() {
|
|||
break
|
||||
}
|
||||
}
|
||||
suite.Require().EqualValues(nq, result.GetNumQueries())
|
||||
suite.Require().EqualValues(topk, result.GetTopK())
|
||||
s.Require().EqualValues(nq, result.GetNumQueries())
|
||||
s.Require().EqualValues(topk, result.GetTopK())
|
||||
|
||||
// check output vectors
|
||||
if suite.vecType == schemapb.DataType_FloatVector {
|
||||
suite.Require().Len(result.GetFieldsData()[vecFieldIndex].GetVectors().GetFloatVector().GetData(), nq*topk*dim)
|
||||
if s.vecType == schemapb.DataType_FloatVector {
|
||||
s.Require().Len(result.GetFieldsData()[vecFieldIndex].GetVectors().GetFloatVector().GetData(), nq*topk*dim)
|
||||
rawData := vecFieldData.GetVectors().GetFloatVector().GetData()
|
||||
resData := result.GetFieldsData()[vecFieldIndex].GetVectors().GetFloatVector().GetData()
|
||||
if suite.pkType == schemapb.DataType_Int64 {
|
||||
if s.pkType == schemapb.DataType_Int64 {
|
||||
for i, id := range result.GetIds().GetIntId().GetData() {
|
||||
expect := rawData[int(id)*dim : (int(id)+1)*dim]
|
||||
actual := resData[i*dim : (i+1)*dim]
|
||||
suite.Require().ElementsMatch(expect, actual)
|
||||
s.Require().ElementsMatch(expect, actual)
|
||||
}
|
||||
} else {
|
||||
for i, idStr := range result.GetIds().GetStrId().GetData() {
|
||||
id, err := strconv.Atoi(idStr)
|
||||
suite.Require().NoError(err)
|
||||
s.Require().NoError(err)
|
||||
expect := rawData[id*dim : (id+1)*dim]
|
||||
actual := resData[i*dim : (i+1)*dim]
|
||||
suite.Require().ElementsMatch(expect, actual)
|
||||
s.Require().ElementsMatch(expect, actual)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
suite.Require().Len(result.GetFieldsData()[vecFieldIndex].GetVectors().GetBinaryVector(), nq*topk*dim/8)
|
||||
s.Require().Len(result.GetFieldsData()[vecFieldIndex].GetVectors().GetBinaryVector(), nq*topk*dim/8)
|
||||
rawData := vecFieldData.GetVectors().GetBinaryVector()
|
||||
resData := result.GetFieldsData()[vecFieldIndex].GetVectors().GetBinaryVector()
|
||||
if suite.pkType == schemapb.DataType_Int64 {
|
||||
if s.pkType == schemapb.DataType_Int64 {
|
||||
for i, id := range result.GetIds().GetIntId().GetData() {
|
||||
dataBytes := dim / 8
|
||||
for j := 0; j < dataBytes; j++ {
|
||||
expect := rawData[int(id)*dataBytes+j]
|
||||
actual := resData[i*dataBytes+j]
|
||||
suite.Require().Equal(expect, actual)
|
||||
s.Require().Equal(expect, actual)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i, idStr := range result.GetIds().GetStrId().GetData() {
|
||||
dataBytes := dim / 8
|
||||
id, err := strconv.Atoi(idStr)
|
||||
suite.Require().NoError(err)
|
||||
s.Require().NoError(err)
|
||||
for j := 0; j < dataBytes; j++ {
|
||||
expect := rawData[id*dataBytes+j]
|
||||
actual := resData[i*dataBytes+j]
|
||||
suite.Require().Equal(expect, actual)
|
||||
s.Require().Equal(expect, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
status, err := suite.cluster.proxy.DropCollection(suite.ctx, &milvuspb.DropCollectionRequest{
|
||||
status, err := s.Cluster.proxy.DropCollection(ctx, &milvuspb.DropCollectionRequest{
|
||||
CollectionName: collection,
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(status.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(status.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_FLAT() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexFaissIDMap
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_FLAT() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexFaissIDMap
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_IVF_FLAT() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexFaissIvfFlat
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_IVF_FLAT() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexFaissIvfFlat
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_IVF_PQ() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexFaissIvfPQ
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_IVF_PQ() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexFaissIvfPQ
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_IVF_SQ8() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexFaissIvfSQ8
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_IVF_SQ8() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexFaissIvfSQ8
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_HNSW() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexHNSW
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_HNSW() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexHNSW
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_IP() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexHNSW
|
||||
suite.metricType = distance.IP
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_IP() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexHNSW
|
||||
s.metricType = distance.IP
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_StringPK() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexHNSW
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_VarChar
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_StringPK() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexHNSW
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_VarChar
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_BinaryVector() {
|
||||
suite.nq = 10
|
||||
suite.topK = 10
|
||||
suite.indexType = IndexFaissBinIvfFlat
|
||||
suite.metricType = distance.JACCARD
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_BinaryVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_BinaryVector() {
|
||||
s.nq = 10
|
||||
s.topK = 10
|
||||
s.indexType = IndexFaissBinIvfFlat
|
||||
s.metricType = distance.JACCARD
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_BinaryVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
func (suite *TestGetVectorSuite) TestGetVector_Big_NQ_TOPK() {
|
||||
suite.T().Skip("skip big NQ Top due to timeout")
|
||||
suite.nq = 10000
|
||||
suite.topK = 200
|
||||
suite.indexType = IndexHNSW
|
||||
suite.metricType = distance.L2
|
||||
suite.pkType = schemapb.DataType_Int64
|
||||
suite.vecType = schemapb.DataType_FloatVector
|
||||
suite.run()
|
||||
func (s *TestGetVectorSuite) TestGetVector_Big_NQ_TOPK() {
|
||||
s.T().Skip("skip big NQ Top due to timeout")
|
||||
s.nq = 10000
|
||||
s.topK = 200
|
||||
s.indexType = IndexHNSW
|
||||
s.metricType = distance.L2
|
||||
s.pkType = schemapb.DataType_Int64
|
||||
s.vecType = schemapb.DataType_FloatVector
|
||||
s.run()
|
||||
}
|
||||
|
||||
//func (suite *TestGetVectorSuite) TestGetVector_DISKANN() {
|
||||
// suite.nq = 10
|
||||
// suite.topK = 10
|
||||
// suite.indexType = IndexDISKANN
|
||||
// suite.metricType = distance.L2
|
||||
// suite.pkType = schemapb.DataType_Int64
|
||||
// suite.vecType = schemapb.DataType_FloatVector
|
||||
// suite.run()
|
||||
//func (s *TestGetVectorSuite) TestGetVector_DISKANN() {
|
||||
// s.nq = 10
|
||||
// s.topK = 10
|
||||
// s.indexType = IndexDISKANN
|
||||
// s.metricType = distance.L2
|
||||
// s.pkType = schemapb.DataType_Int64
|
||||
// s.vecType = schemapb.DataType_FloatVector
|
||||
// s.run()
|
||||
//}
|
||||
|
||||
func TestGetVector(t *testing.T) {
|
||||
|
|
|
@ -20,10 +20,9 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
|
@ -35,18 +34,14 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
)
|
||||
|
||||
func TestHelloMilvus(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
type HelloMilvusSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
func (s *HelloMilvusSuite) TestHelloMilvus() {
|
||||
ctx, cancel := context.WithCancel(s.Cluster.ctx)
|
||||
defer cancel()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
c := s.Cluster
|
||||
|
||||
const (
|
||||
dim = 128
|
||||
|
@ -58,7 +53,7 @@ func TestHelloMilvus(t *testing.T) {
|
|||
|
||||
schema := constructSchema(collectionName, dim, true)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -66,16 +61,16 @@ func TestHelloMilvus(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if createCollectionStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createCollectionStatus fail reason", zap.String("reason", createCollectionStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
|
@ -87,23 +82,23 @@ func TestHelloMilvus(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
// flush
|
||||
flushResp, err := c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||
ids := segmentIDs.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
assert.True(t, has)
|
||||
s.NotEmpty(segmentIDs)
|
||||
s.True(has)
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
@ -119,21 +114,21 @@ func TestHelloMilvus(t *testing.T) {
|
|||
if createIndexStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createIndexStatus fail reason", zap.String("reason", createIndexStatus.GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
s.NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, floatVecField)
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if loadStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("loadStatus fail reason", zap.String("reason", loadStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
s.Equal(commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
waitingForLoad(ctx, c, collectionName)
|
||||
|
||||
// search
|
||||
|
@ -151,8 +146,13 @@ func TestHelloMilvus(t *testing.T) {
|
|||
if searchResult.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("searchResult fail reason", zap.String("reason", searchResult.GetStatus().GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, searchResult.GetStatus().GetErrorCode())
|
||||
s.NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_Success, searchResult.GetStatus().GetErrorCode())
|
||||
|
||||
log.Info("TestHelloMilvus succeed")
|
||||
|
||||
}
|
||||
|
||||
func TestHelloMilvus(t *testing.T) {
|
||||
suite.Run(t, new(HelloMilvusSuite))
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
|
@ -36,32 +36,23 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
)
|
||||
|
||||
func TestShowSessions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
type MetaWatcherSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
sessions, err := c.metaWatcher.ShowSessions()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, sessions)
|
||||
func (s *MetaWatcherSuite) TestShowSessions() {
|
||||
sessions, err := s.Cluster.metaWatcher.ShowSessions()
|
||||
s.NoError(err)
|
||||
s.NotEmpty(sessions)
|
||||
for _, session := range sessions {
|
||||
log.Info("ShowSessions result", zap.String("session", session.String()))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestShowSegments(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
func (s *MetaWatcherSuite) TestShowSegments() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestShowSegments"
|
||||
dbName := ""
|
||||
|
@ -109,7 +100,7 @@ func TestShowSegments(t *testing.T) {
|
|||
}
|
||||
schema := constructCollectionSchema()
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -117,13 +108,13 @@ func TestShowSegments(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
|
@ -135,25 +126,21 @@ func TestShowSegments(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestShowReplicas(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
func (s *MetaWatcherSuite) TestShowReplicas() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestShowReplicas"
|
||||
dbName := ""
|
||||
|
@ -201,7 +188,7 @@ func TestShowReplicas(t *testing.T) {
|
|||
}
|
||||
schema := constructCollectionSchema()
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -209,16 +196,16 @@ func TestShowReplicas(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if createCollectionStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createCollectionStatus fail reason", zap.String("reason", createCollectionStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
|
@ -230,22 +217,22 @@ func TestShowReplicas(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.NoError(err)
|
||||
s.Equal(insertResult.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
// flush
|
||||
flushResp, err := c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||
ids := segmentIDs.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
s.NotEmpty(segmentIDs)
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
@ -296,24 +283,24 @@ func TestShowReplicas(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if createIndexStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createIndexStatus fail reason", zap.String("reason", createIndexStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
s.Equal(commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, floatVecField)
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if loadStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("loadStatus fail reason", zap.String("reason", loadStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
s.Equal(commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
for {
|
||||
loadProgress, err := c.proxy.GetLoadingProgress(ctx, &milvuspb.GetLoadingProgressRequest{
|
||||
CollectionName: collectionName,
|
||||
|
@ -328,11 +315,15 @@ func TestShowReplicas(t *testing.T) {
|
|||
}
|
||||
|
||||
replicas, err := c.metaWatcher.ShowReplicas()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, replicas)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(replicas)
|
||||
for _, replica := range replicas {
|
||||
log.Info("ShowReplicas result", zap.String("replica", PrettyReplica(replica)))
|
||||
}
|
||||
|
||||
log.Info("TestShowReplicas succeed")
|
||||
}
|
||||
|
||||
func TestMetaWatcher(t *testing.T) {
|
||||
suite.Run(t, new(MetaWatcherSuite))
|
||||
}
|
||||
|
|
|
@ -310,6 +310,10 @@ func StartMiniCluster(ctx context.Context, opts ...Option) (cluster *MiniCluster
|
|||
return cluster, nil
|
||||
}
|
||||
|
||||
func (cluster *MiniCluster) GetContext() context.Context {
|
||||
return cluster.ctx
|
||||
}
|
||||
|
||||
func (cluster *MiniCluster) Start() error {
|
||||
log.Info("mini cluster start")
|
||||
err := cluster.rootCoord.Init()
|
||||
|
|
|
@ -20,183 +20,170 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/datanode"
|
||||
"github.com/milvus-io/milvus/internal/indexnode"
|
||||
"github.com/milvus-io/milvus/internal/querynodev2"
|
||||
)
|
||||
|
||||
func TestMiniClusterStartAndStop(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
type MiniClusterMethodsSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
func TestAddRemoveDataNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
func (s *MiniClusterMethodsSuite) TestStartAndStop() {
|
||||
//Do nothing
|
||||
}
|
||||
|
||||
func (s *MiniClusterMethodsSuite) TestRemoveDataNode() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
datanode := datanode.NewDataNode(ctx, c.factory)
|
||||
datanode.SetEtcdClient(c.etcdCli)
|
||||
//datanode := c.CreateDefaultDataNode()
|
||||
|
||||
err = c.AddDataNode(datanode)
|
||||
assert.NoError(t, err)
|
||||
err := c.AddDataNode(datanode)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.DataNodeNum)
|
||||
assert.Equal(t, 2, len(c.dataNodes))
|
||||
s.Equal(2, c.clusterConfig.DataNodeNum)
|
||||
s.Equal(2, len(c.dataNodes))
|
||||
|
||||
err = c.RemoveDataNode(datanode)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 1, c.clusterConfig.DataNodeNum)
|
||||
assert.Equal(t, 1, len(c.dataNodes))
|
||||
s.Equal(1, c.clusterConfig.DataNodeNum)
|
||||
s.Equal(1, len(c.dataNodes))
|
||||
|
||||
// add default node and remove randomly
|
||||
err = c.AddDataNode(nil)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.DataNodeNum)
|
||||
assert.Equal(t, 2, len(c.dataNodes))
|
||||
s.Equal(2, c.clusterConfig.DataNodeNum)
|
||||
s.Equal(2, len(c.dataNodes))
|
||||
|
||||
err = c.RemoveDataNode(nil)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 1, c.clusterConfig.DataNodeNum)
|
||||
assert.Equal(t, 1, len(c.dataNodes))
|
||||
s.Equal(1, c.clusterConfig.DataNodeNum)
|
||||
s.Equal(1, len(c.dataNodes))
|
||||
}
|
||||
|
||||
func TestAddRemoveQueryNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
func (s *MiniClusterMethodsSuite) TestRemoveQueryNode() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
queryNode := querynodev2.NewQueryNode(ctx, c.factory)
|
||||
queryNode.SetEtcdClient(c.etcdCli)
|
||||
//queryNode := c.CreateDefaultQueryNode()
|
||||
|
||||
err = c.AddQueryNode(queryNode)
|
||||
assert.NoError(t, err)
|
||||
err := c.AddQueryNode(queryNode)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.QueryNodeNum)
|
||||
assert.Equal(t, 2, len(c.queryNodes))
|
||||
s.Equal(2, c.clusterConfig.QueryNodeNum)
|
||||
s.Equal(2, len(c.queryNodes))
|
||||
|
||||
err = c.RemoveQueryNode(queryNode)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 1, c.clusterConfig.QueryNodeNum)
|
||||
assert.Equal(t, 1, len(c.queryNodes))
|
||||
s.Equal(1, c.clusterConfig.QueryNodeNum)
|
||||
s.Equal(1, len(c.queryNodes))
|
||||
|
||||
// add default node and remove randomly
|
||||
err = c.AddQueryNode(nil)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.QueryNodeNum)
|
||||
assert.Equal(t, 2, len(c.queryNodes))
|
||||
s.Equal(2, c.clusterConfig.QueryNodeNum)
|
||||
s.Equal(2, len(c.queryNodes))
|
||||
|
||||
err = c.RemoveQueryNode(nil)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
s.Equal(1, c.clusterConfig.QueryNodeNum)
|
||||
s.Equal(1, len(c.queryNodes))
|
||||
|
||||
assert.Equal(t, 1, c.clusterConfig.QueryNodeNum)
|
||||
assert.Equal(t, 1, len(c.queryNodes))
|
||||
}
|
||||
|
||||
func TestAddRemoveIndexNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
func (s *MiniClusterMethodsSuite) TestRemoveIndexNode() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
indexNode := indexnode.NewIndexNode(ctx, c.factory)
|
||||
indexNode.SetEtcdClient(c.etcdCli)
|
||||
//indexNode := c.CreateDefaultIndexNode()
|
||||
|
||||
err = c.AddIndexNode(indexNode)
|
||||
assert.NoError(t, err)
|
||||
err := c.AddIndexNode(indexNode)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.IndexNodeNum)
|
||||
assert.Equal(t, 2, len(c.indexNodes))
|
||||
s.Equal(2, c.clusterConfig.IndexNodeNum)
|
||||
s.Equal(2, len(c.indexNodes))
|
||||
|
||||
err = c.RemoveIndexNode(indexNode)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 1, c.clusterConfig.IndexNodeNum)
|
||||
assert.Equal(t, 1, len(c.indexNodes))
|
||||
s.Equal(1, c.clusterConfig.IndexNodeNum)
|
||||
s.Equal(1, len(c.indexNodes))
|
||||
|
||||
// add default node and remove randomly
|
||||
err = c.AddIndexNode(nil)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.IndexNodeNum)
|
||||
assert.Equal(t, 2, len(c.indexNodes))
|
||||
s.Equal(2, c.clusterConfig.IndexNodeNum)
|
||||
s.Equal(2, len(c.indexNodes))
|
||||
|
||||
err = c.RemoveIndexNode(nil)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
s.Equal(1, c.clusterConfig.IndexNodeNum)
|
||||
s.Equal(1, len(c.indexNodes))
|
||||
|
||||
assert.Equal(t, 1, c.clusterConfig.IndexNodeNum)
|
||||
assert.Equal(t, 1, len(c.indexNodes))
|
||||
}
|
||||
|
||||
func TestUpdateClusterSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
func (s *MiniClusterMethodsSuite) TestUpdateClusterSize() {
|
||||
|
||||
err = c.UpdateClusterSize(ClusterConfig{
|
||||
c := s.Cluster
|
||||
|
||||
err := c.UpdateClusterSize(ClusterConfig{
|
||||
QueryNodeNum: -1,
|
||||
DataNodeNum: -1,
|
||||
IndexNodeNum: -1,
|
||||
})
|
||||
assert.Error(t, err)
|
||||
s.Error(err)
|
||||
|
||||
err = c.UpdateClusterSize(ClusterConfig{
|
||||
QueryNodeNum: 2,
|
||||
DataNodeNum: 2,
|
||||
IndexNodeNum: 2,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 2, c.clusterConfig.DataNodeNum)
|
||||
assert.Equal(t, 2, c.clusterConfig.QueryNodeNum)
|
||||
assert.Equal(t, 2, c.clusterConfig.IndexNodeNum)
|
||||
s.Equal(2, c.clusterConfig.DataNodeNum)
|
||||
s.Equal(2, c.clusterConfig.QueryNodeNum)
|
||||
s.Equal(2, c.clusterConfig.IndexNodeNum)
|
||||
|
||||
assert.Equal(t, 2, len(c.dataNodes))
|
||||
assert.Equal(t, 2, len(c.queryNodes))
|
||||
assert.Equal(t, 2, len(c.indexNodes))
|
||||
s.Equal(2, len(c.dataNodes))
|
||||
s.Equal(2, len(c.queryNodes))
|
||||
s.Equal(2, len(c.indexNodes))
|
||||
|
||||
err = c.UpdateClusterSize(ClusterConfig{
|
||||
DataNodeNum: 3,
|
||||
QueryNodeNum: 2,
|
||||
IndexNodeNum: 1,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
assert.Equal(t, 3, c.clusterConfig.DataNodeNum)
|
||||
assert.Equal(t, 2, c.clusterConfig.QueryNodeNum)
|
||||
assert.Equal(t, 1, c.clusterConfig.IndexNodeNum)
|
||||
s.Equal(3, c.clusterConfig.DataNodeNum)
|
||||
s.Equal(2, c.clusterConfig.QueryNodeNum)
|
||||
s.Equal(1, c.clusterConfig.IndexNodeNum)
|
||||
|
||||
assert.Equal(t, 3, len(c.dataNodes))
|
||||
assert.Equal(t, 2, len(c.queryNodes))
|
||||
assert.Equal(t, 1, len(c.indexNodes))
|
||||
s.Equal(3, len(c.dataNodes))
|
||||
s.Equal(2, len(c.queryNodes))
|
||||
s.Equal(1, len(c.indexNodes))
|
||||
}
|
||||
|
||||
func TestMiniCluster(t *testing.T) {
|
||||
suite.Run(t, new(MiniClusterMethodsSuite))
|
||||
}
|
||||
|
|
|
@ -20,10 +20,9 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
|
@ -35,17 +34,14 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
)
|
||||
|
||||
func TestRangeSearchIP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
type RangeSearchSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
func (s *RangeSearchSuite) TestRangeSearchIP() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestRangeSearchIP"
|
||||
dbName := ""
|
||||
|
@ -55,7 +51,7 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
|
||||
schema := constructSchema(collectionName, dim, true)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -63,7 +59,7 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
err = merr.Error(createCollectionStatus)
|
||||
if err != nil {
|
||||
|
@ -72,8 +68,8 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(showCollectionsResp.GetStatus()))
|
||||
s.NoError(err)
|
||||
s.True(merr.Ok(showCollectionsResp.GetStatus()))
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
|
@ -85,23 +81,23 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(insertResult.GetStatus()))
|
||||
s.NoError(err)
|
||||
s.True(merr.Ok(insertResult.GetStatus()))
|
||||
|
||||
// flush
|
||||
flushResp, err := c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||
assert.True(t, has)
|
||||
s.True(has)
|
||||
ids := segmentIDs.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
s.NotEmpty(segmentIDs)
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
@ -114,19 +110,19 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
IndexName: "_default",
|
||||
ExtraParams: constructIndexParam(dim, IndexFaissIvfFlat, distance.IP),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = merr.Error(createIndexStatus)
|
||||
if err != nil {
|
||||
log.Warn("createIndexStatus fail reason", zap.Error(err))
|
||||
}
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, floatVecField)
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = merr.Error(loadStatus)
|
||||
if err != nil {
|
||||
log.Warn("LoadCollection fail reason", zap.Error(err))
|
||||
|
@ -153,7 +149,7 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
// pass in radius and range_filter when range search
|
||||
params["range_filter"] = filter
|
||||
|
@ -166,7 +162,7 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
// pass in illegal radius and range_filter when range search
|
||||
params["radius"] = filter
|
||||
|
@ -180,26 +176,20 @@ func TestRangeSearchIP(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.Error(t, err)
|
||||
s.Error(err)
|
||||
|
||||
log.Info("=========================")
|
||||
log.Info("=========================")
|
||||
log.Info("TestRangeSearchIP succeed")
|
||||
log.Info("=========================")
|
||||
log.Info("=========================")
|
||||
|
||||
}
|
||||
|
||||
func TestRangeSearchL2(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
func (s *RangeSearchSuite) TestRangeSearchL2() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestRangeSearchL2"
|
||||
dbName := ""
|
||||
|
@ -209,7 +199,7 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
|
||||
schema := constructSchema(collectionName, dim, true)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -217,7 +207,7 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
err = merr.Error(createCollectionStatus)
|
||||
if err != nil {
|
||||
|
@ -226,8 +216,8 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(showCollectionsResp.GetStatus()))
|
||||
s.NoError(err)
|
||||
s.True(merr.Ok(showCollectionsResp.GetStatus()))
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
|
@ -239,23 +229,23 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(insertResult.GetStatus()))
|
||||
s.NoError(err)
|
||||
s.True(merr.Ok(insertResult.GetStatus()))
|
||||
|
||||
// flush
|
||||
flushResp, err := c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||
assert.True(t, has)
|
||||
s.True(has)
|
||||
ids := segmentIDs.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
s.NotEmpty(segmentIDs)
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
@ -268,19 +258,19 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
IndexName: "_default",
|
||||
ExtraParams: constructIndexParam(dim, IndexFaissIvfFlat, distance.L2),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = merr.Error(createIndexStatus)
|
||||
if err != nil {
|
||||
log.Warn("createIndexStatus fail reason", zap.Error(err))
|
||||
}
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, floatVecField)
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = merr.Error(loadStatus)
|
||||
if err != nil {
|
||||
log.Warn("LoadCollection fail reason", zap.Error(err))
|
||||
|
@ -306,7 +296,7 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
// pass in radius and range_filter when range search
|
||||
params["range_filter"] = filter
|
||||
|
@ -319,7 +309,7 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
// pass in illegal radius and range_filter when range search
|
||||
params["radius"] = filter
|
||||
|
@ -333,11 +323,16 @@ func TestRangeSearchL2(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.Error(t, err)
|
||||
s.Error(err)
|
||||
|
||||
log.Info("=========================")
|
||||
log.Info("=========================")
|
||||
log.Info("TestRangeSearchL2 succeed")
|
||||
log.Info("=========================")
|
||||
log.Info("=========================")
|
||||
|
||||
}
|
||||
|
||||
func TestRangeSearch(t *testing.T) {
|
||||
suite.Run(t, new(RangeSearchSuite))
|
||||
}
|
||||
|
|
|
@ -29,62 +29,50 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/distance"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestRefreshPasswordLength(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
type RefreshConfigSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
func (s *RefreshConfigSuite) TestRefreshPasswordLength() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
s, err := c.proxy.CreateCredential(ctx, &milvuspb.CreateCredentialRequest{
|
||||
resp, err := c.proxy.CreateCredential(ctx, &milvuspb.CreateCredentialRequest{
|
||||
Username: "test",
|
||||
Password: "1234",
|
||||
})
|
||||
log.Debug("first create result", zap.Any("state", s))
|
||||
assert.Equal(t, commonpb.ErrorCode_IllegalArgument, s.GetErrorCode())
|
||||
log.Debug("first create result", zap.Any("state", resp))
|
||||
s.Require().NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_IllegalArgument, resp.GetErrorCode())
|
||||
|
||||
params := paramtable.Get()
|
||||
c.etcdCli.KV.Put(ctx, fmt.Sprintf("%s/config/proxy/minpasswordlength", params.EtcdCfg.RootPath.GetValue()), "3")
|
||||
key := fmt.Sprintf("%s/config/proxy/minpasswordlength", params.EtcdCfg.RootPath.GetValue())
|
||||
c.etcdCli.KV.Put(ctx, key, "3")
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
s, err = c.proxy.CreateCredential(ctx, &milvuspb.CreateCredentialRequest{
|
||||
s.Eventually(func() bool {
|
||||
resp, err = c.proxy.CreateCredential(ctx, &milvuspb.CreateCredentialRequest{
|
||||
Username: "test",
|
||||
Password: "1234",
|
||||
})
|
||||
log.Debug("second create result", zap.Any("state", s))
|
||||
return commonpb.ErrorCode_Success == s.GetErrorCode()
|
||||
log.Debug("second create result", zap.Any("state", resp))
|
||||
return commonpb.ErrorCode_Success == resp.GetErrorCode()
|
||||
}, time.Second*20, time.Millisecond*500)
|
||||
|
||||
}
|
||||
|
||||
func TestRefreshDefaultIndexName(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
func (s *RefreshConfigSuite) TestRefreshDefaultIndexName() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
params := paramtable.Get()
|
||||
c.etcdCli.KV.Put(ctx, fmt.Sprintf("%s/config/common/defaultIndexName", params.EtcdCfg.RootPath.GetValue()), "a_index")
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
s.Eventually(func() bool {
|
||||
return params.CommonCfg.DefaultIndexName.GetValue() == "a_index"
|
||||
}, time.Second*10, time.Millisecond*500)
|
||||
|
||||
|
@ -95,6 +83,7 @@ func TestRefreshDefaultIndexName(t *testing.T) {
|
|||
|
||||
schema := constructSchema("test", 128, true)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
s.Require().NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: "default",
|
||||
|
@ -102,11 +91,11 @@ func TestRefreshDefaultIndexName(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: 1,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
if createCollectionStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createCollectionStatus fail reason", zap.String("reason", createCollectionStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
s.Equal(createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
fVecColumn := newFloatVectorFieldData(floatVecField, rowNum, dim)
|
||||
hashKeys := generateHashKeys(rowNum)
|
||||
|
@ -117,19 +106,25 @@ func TestRefreshDefaultIndexName(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
_, err = c.proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||
CollectionName: collectionName,
|
||||
FieldName: floatVecField,
|
||||
ExtraParams: constructIndexParam(dim, IndexFaissIvfFlat, distance.L2),
|
||||
})
|
||||
s.NoError(err)
|
||||
|
||||
s, err := c.proxy.DescribeIndex(ctx, &milvuspb.DescribeIndexRequest{
|
||||
resp, err := c.proxy.DescribeIndex(ctx, &milvuspb.DescribeIndexRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, s.Status.GetErrorCode())
|
||||
assert.Equal(t, 1, len(s.IndexDescriptions))
|
||||
assert.Equal(t, "a_index_101", s.IndexDescriptions[0].GetIndexName())
|
||||
s.NoError(err)
|
||||
s.Equal(commonpb.ErrorCode_Success, resp.Status.GetErrorCode())
|
||||
s.Equal(1, len(resp.IndexDescriptions))
|
||||
s.Equal("a_index_101", resp.IndexDescriptions[0].GetIndexName())
|
||||
}
|
||||
|
||||
func TestRefreshConfig(t *testing.T) {
|
||||
suite.Run(t, new(RefreshConfigSuite))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/etcd"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
)
|
||||
|
||||
// EmbedEtcdSuite contains embed setup & teardown related logic
|
||||
type EmbedEtcdSuite struct {
|
||||
EtcdServer *embed.Etcd
|
||||
EtcdDir string
|
||||
}
|
||||
|
||||
func (s *EmbedEtcdSuite) SetupEmbedEtcd() error {
|
||||
server, folder, err := etcd.StartTestEmbedEtcdServer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.EtcdServer = server
|
||||
s.EtcdDir = folder
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *EmbedEtcdSuite) TearDownEmbedEtcd() {
|
||||
if s.EtcdServer != nil {
|
||||
s.EtcdServer.Server.Stop()
|
||||
}
|
||||
if s.EtcdDir != "" {
|
||||
os.RemoveAll(s.EtcdDir)
|
||||
}
|
||||
}
|
||||
|
||||
type MiniClusterSuite struct {
|
||||
suite.Suite
|
||||
EmbedEtcdSuite
|
||||
|
||||
Cluster *MiniCluster
|
||||
cancelFunc context.CancelFunc
|
||||
}
|
||||
|
||||
func (s *MiniClusterSuite) SetupSuite() {
|
||||
s.Require().NoError(s.SetupEmbedEtcd())
|
||||
}
|
||||
|
||||
func (s *MiniClusterSuite) TearDownSuite() {
|
||||
s.TearDownEmbedEtcd()
|
||||
}
|
||||
|
||||
func (s *MiniClusterSuite) SetupTest() {
|
||||
s.T().Log("Setup test...")
|
||||
// setup mini cluster to use embed etcd
|
||||
endpoints := etcd.GetEmbedEtcdEndpoints(s.EtcdServer)
|
||||
val := strings.Join(endpoints, ",")
|
||||
// setup env value to init etcd source
|
||||
s.T().Setenv("etcd.endpoints", val)
|
||||
|
||||
params = paramtable.Get()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
s.cancelFunc = cancel
|
||||
c, err := StartMiniCluster(ctx, func(c *MiniCluster) {
|
||||
// change config etcd endpoints
|
||||
c.params[params.EtcdCfg.Endpoints.Key] = val
|
||||
})
|
||||
s.Require().NoError(err)
|
||||
s.Cluster = c
|
||||
|
||||
// start mini cluster
|
||||
s.Require().NoError(s.Cluster.Start())
|
||||
}
|
||||
|
||||
func (s *MiniClusterSuite) TearDownTest() {
|
||||
s.T().Log("Tear Down test...")
|
||||
defer s.cancelFunc()
|
||||
if s.Cluster != nil {
|
||||
s.Cluster.Stop()
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
|
@ -30,22 +29,18 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/distance"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestUpsert(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}()
|
||||
assert.NoError(t, err)
|
||||
type UpsertSuite struct {
|
||||
MiniClusterSuite
|
||||
}
|
||||
|
||||
func (s *UpsertSuite) TestUpsert() {
|
||||
c := s.Cluster
|
||||
ctx, cancel := context.WithCancel(c.GetContext())
|
||||
defer cancel()
|
||||
|
||||
prefix := "TestUpsert"
|
||||
dbName := ""
|
||||
|
@ -55,7 +50,7 @@ func TestUpsert(t *testing.T) {
|
|||
|
||||
schema := constructSchema(collectionName, dim, false)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
|
@ -63,7 +58,7 @@ func TestUpsert(t *testing.T) {
|
|||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
err = merr.Error(createCollectionStatus)
|
||||
if err != nil {
|
||||
|
@ -72,8 +67,8 @@ func TestUpsert(t *testing.T) {
|
|||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(showCollectionsResp.GetStatus()))
|
||||
s.NoError(err)
|
||||
s.True(merr.Ok(showCollectionsResp.GetStatus()))
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
pkFieldData := newInt64FieldData(int64Field, rowNum)
|
||||
|
@ -86,23 +81,23 @@ func TestUpsert(t *testing.T) {
|
|||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(upsertResult.GetStatus()))
|
||||
s.NoError(err)
|
||||
s.True(merr.Ok(upsertResult.GetStatus()))
|
||||
|
||||
// flush
|
||||
flushResp, err := c.proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||
DbName: dbName,
|
||||
CollectionNames: []string{collectionName},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||
assert.True(t, has)
|
||||
s.True(has)
|
||||
ids := segmentIDs.GetData()
|
||||
assert.NotEmpty(t, segmentIDs)
|
||||
s.NotEmpty(segmentIDs)
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
@ -115,20 +110,20 @@ func TestUpsert(t *testing.T) {
|
|||
IndexName: "_default",
|
||||
ExtraParams: constructIndexParam(dim, IndexFaissIvfFlat, distance.IP),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = merr.Error(createIndexStatus)
|
||||
if err != nil {
|
||||
log.Warn("createIndexStatus fail reason", zap.Error(err))
|
||||
}
|
||||
|
||||
waitingForIndexBuilt(ctx, c, t, collectionName, floatVecField)
|
||||
waitingForIndexBuilt(ctx, c, s.T(), collectionName, floatVecField)
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
err = merr.Error(loadStatus)
|
||||
if err != nil {
|
||||
log.Warn("LoadCollection fail reason", zap.Error(err))
|
||||
|
@ -150,11 +145,16 @@ func TestUpsert(t *testing.T) {
|
|||
if err != nil {
|
||||
log.Warn("searchResult fail reason", zap.Error(err))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
s.NoError(err)
|
||||
|
||||
log.Info("==================")
|
||||
log.Info("==================")
|
||||
log.Info("TestUpsert succeed")
|
||||
log.Info("==================")
|
||||
log.Info("==================")
|
||||
|
||||
}
|
||||
|
||||
func TestUpsert(t *testing.T) {
|
||||
suite.Run(t, new(UpsertSuite))
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue