mirror of https://github.com/milvus-io/milvus.git
Add bulkinsert integration test (#22621)
Signed-off-by: wayblink <anyang.wang@zilliz.com>pull/22681/head
parent
3ffa6d5302
commit
28390dfe09
|
@ -937,6 +937,7 @@ func (s *Server) Stop() error {
|
|||
s.stopCompactionTrigger()
|
||||
s.stopCompactionHandler()
|
||||
}
|
||||
s.indexBuilder.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/milvus-io/milvus/internal/util/timerecord"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
@ -279,7 +278,7 @@ func (node *QueryNode) getStatisticsWithDmlChannel(ctx context.Context, req *que
|
|||
|
||||
// WatchDmChannels create consumers on dmChannels to receive Incremental data,which is the important part of real-time query
|
||||
func (node *QueryNode) WatchDmChannels(ctx context.Context, in *querypb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
err := fmt.Errorf("query node %d is not ready", nodeID)
|
||||
|
@ -374,7 +373,7 @@ func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmC
|
|||
zap.String("channel", req.GetChannelName()),
|
||||
)
|
||||
// check node healthy
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
err := fmt.Errorf("query node %d is not ready", nodeID)
|
||||
status := &commonpb.Status{
|
||||
|
@ -434,7 +433,8 @@ func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmC
|
|||
|
||||
// LoadSegments load historical data into query node, historical data can be vector data or index
|
||||
func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegmentsRequest) (*commonpb.Status, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
log.Info("wayblink", zap.Int64("nodeID", nodeID))
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthy) {
|
||||
err := fmt.Errorf("query node %d is not ready", nodeID)
|
||||
|
@ -615,7 +615,7 @@ func (node *QueryNode) ReleasePartitions(ctx context.Context, in *querypb.Releas
|
|||
|
||||
// ReleaseSegments remove the specified segments from query node according segmentIDs, partitionIDs, and collectionID
|
||||
func (node *QueryNode) ReleaseSegments(ctx context.Context, in *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
err := fmt.Errorf("query node %d is not ready", nodeID)
|
||||
status := &commonpb.Status{
|
||||
|
@ -712,7 +712,7 @@ func filterSegmentInfo(segmentInfos []*querypb.SegmentInfo, segmentIDs map[int64
|
|||
|
||||
// Search performs replica search tasks.
|
||||
func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (*internalpb.SearchResults, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
if !node.IsStandAlone && req.GetReq().GetBase().GetTargetID() != nodeID {
|
||||
return &internalpb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -794,7 +794,7 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
|
|||
}
|
||||
|
||||
func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.SearchRequest, dmlChannel string) (*internalpb.SearchResults, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(nodeID), metrics.SearchLabel, metrics.TotalLabel).Inc()
|
||||
failRet := &internalpb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -919,7 +919,7 @@ func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.Se
|
|||
}
|
||||
|
||||
func (node *QueryNode) queryWithDmlChannel(ctx context.Context, req *querypb.QueryRequest, dmlChannel string) (*internalpb.RetrieveResults, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(nodeID), metrics.QueryLabel, metrics.TotalLabel).Inc()
|
||||
failRet := &internalpb.RetrieveResults{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -1055,7 +1055,7 @@ func (node *QueryNode) Query(ctx context.Context, req *querypb.QueryRequest) (*i
|
|||
zap.Uint64("guaranteeTimestamp", req.Req.GetGuaranteeTimestamp()),
|
||||
zap.Uint64("timeTravel", req.GetReq().GetTravelTimestamp()))
|
||||
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
if req.GetReq().GetBase().GetTargetID() != nodeID {
|
||||
return &internalpb.RetrieveResults{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -1158,7 +1158,7 @@ func (node *QueryNode) SyncReplicaSegments(ctx context.Context, req *querypb.Syn
|
|||
|
||||
// ShowConfigurations returns the configurations of queryNode matching req.Pattern
|
||||
func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
log.Warn("QueryNode.ShowConfigurations failed",
|
||||
zap.Int64("nodeId", nodeID),
|
||||
|
@ -1195,7 +1195,7 @@ func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.S
|
|||
|
||||
// GetMetrics return system infos of the query node, such as total memory, memory usage, cpu usage ...
|
||||
func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
log.Ctx(ctx).Warn("QueryNode.GetMetrics failed",
|
||||
zap.Int64("nodeId", nodeID),
|
||||
|
@ -1260,7 +1260,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
|
|||
}
|
||||
|
||||
func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.GetDataDistributionRequest) (*querypb.GetDataDistributionResponse, error) {
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
log := log.With(
|
||||
zap.Int64("msgID", req.GetBase().GetMsgID()),
|
||||
zap.Int64("nodeID", nodeID),
|
||||
|
@ -1355,7 +1355,7 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||
|
||||
func (node *QueryNode) SyncDistribution(ctx context.Context, req *querypb.SyncDistributionRequest) (*commonpb.Status, error) {
|
||||
log := log.Ctx(ctx).With(zap.Int64("collectionID", req.GetCollectionID()), zap.String("channel", req.GetChannel()))
|
||||
nodeID := paramtable.GetNodeID()
|
||||
nodeID := node.session.ServerID
|
||||
// check node healthy
|
||||
if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) {
|
||||
err := fmt.Errorf("query node %d is not ready", nodeID)
|
||||
|
|
|
@ -696,6 +696,9 @@ func (c *Core) Stop() error {
|
|||
c.stopExecutor()
|
||||
c.stopScheduler()
|
||||
c.cancelIfNotNil()
|
||||
if c.quotaCenter != nil {
|
||||
c.quotaCenter.stop()
|
||||
}
|
||||
c.wg.Wait()
|
||||
c.revokeSession()
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/util/distance"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/importutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
TempFilesPath = "/tmp/integration_test/import/"
|
||||
Dim = 128
|
||||
)
|
||||
|
||||
// test bulk insert E2E
|
||||
// 1, create collection with a vector column and a varchar column
|
||||
// 2, generate numpy files
|
||||
// 3, import
|
||||
// 4, create index
|
||||
// 5, load
|
||||
// 6, search
|
||||
func TestBulkInsert(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
defer c.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
prefix := "TestBulkInsert"
|
||||
dbName := ""
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
int64Field := "int64"
|
||||
floatVecField := "embeddings"
|
||||
scalarField := "image_path"
|
||||
dim := 128
|
||||
|
||||
constructCollectionSchema := func() *schemapb.CollectionSchema {
|
||||
pk := &schemapb.FieldSchema{
|
||||
Name: int64Field,
|
||||
IsPrimaryKey: true,
|
||||
Description: "pk",
|
||||
DataType: schemapb.DataType_Int64,
|
||||
TypeParams: nil,
|
||||
IndexParams: nil,
|
||||
AutoID: true,
|
||||
}
|
||||
fVec := &schemapb.FieldSchema{
|
||||
Name: floatVecField,
|
||||
IsPrimaryKey: false,
|
||||
Description: "",
|
||||
DataType: schemapb.DataType_FloatVector,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: strconv.Itoa(dim),
|
||||
},
|
||||
},
|
||||
IndexParams: nil,
|
||||
AutoID: false,
|
||||
}
|
||||
scalar := &schemapb.FieldSchema{
|
||||
Name: scalarField,
|
||||
IsPrimaryKey: false,
|
||||
Description: "",
|
||||
DataType: schemapb.DataType_VarChar,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "max_length",
|
||||
Value: "65535",
|
||||
},
|
||||
},
|
||||
IndexParams: nil,
|
||||
AutoID: false,
|
||||
}
|
||||
return &schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
pk,
|
||||
fVec,
|
||||
scalar,
|
||||
},
|
||||
}
|
||||
}
|
||||
schema := constructCollectionSchema()
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
|
||||
createCollectionStatus, err := c.proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: 2,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
if createCollectionStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createCollectionStatus fail reason", zap.String("reason", createCollectionStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, createCollectionStatus.GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
|
||||
log.Info("CreateCollection result", zap.Any("createCollectionStatus", createCollectionStatus))
|
||||
showCollectionsResp, err := c.proxy.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, showCollectionsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
|
||||
log.Info("ShowCollections result", zap.Any("showCollectionsResp", showCollectionsResp))
|
||||
|
||||
err = GenerateNumpyFile(c.chunkManager.RootPath()+"/"+"embeddings.npy", 100, schemapb.DataType_FloatVector, []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: strconv.Itoa(Dim),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = GenerateNumpyFile(c.chunkManager.RootPath()+"/"+"image_path.npy", 100, schemapb.DataType_VarChar, []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "max_length",
|
||||
Value: strconv.Itoa(65535),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
bulkInsertFiles := []string{
|
||||
c.chunkManager.RootPath() + "/" + "embeddings.npy",
|
||||
c.chunkManager.RootPath() + "/" + "image_path.npy",
|
||||
}
|
||||
|
||||
health1, err := c.dataCoord.CheckHealth(ctx, &milvuspb.CheckHealthRequest{})
|
||||
assert.NoError(t, err)
|
||||
log.Info("dataCoord health", zap.Any("health1", health1))
|
||||
importResp, err := c.proxy.Import(ctx, &milvuspb.ImportRequest{
|
||||
CollectionName: collectionName,
|
||||
Files: bulkInsertFiles,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
log.Info("Import result", zap.Any("importResp", importResp), zap.Int64s("tasks", importResp.GetTasks()))
|
||||
|
||||
tasks := importResp.GetTasks()
|
||||
for _, task := range tasks {
|
||||
loop:
|
||||
for {
|
||||
importTaskState, err := c.proxy.GetImportState(ctx, &milvuspb.GetImportStateRequest{
|
||||
Task: task,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
switch importTaskState.GetState() {
|
||||
case commonpb.ImportState_ImportCompleted:
|
||||
break loop
|
||||
case commonpb.ImportState_ImportFailed:
|
||||
break loop
|
||||
case commonpb.ImportState_ImportFailedAndCleaned:
|
||||
break loop
|
||||
default:
|
||||
log.Info("import task state", zap.Int64("id", task), zap.String("state", importTaskState.GetState().String()))
|
||||
time.Sleep(time.Second * time.Duration(3))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
health2, err := c.dataCoord.CheckHealth(ctx, &milvuspb.CheckHealthRequest{})
|
||||
assert.NoError(t, err)
|
||||
log.Info("dataCoord health", zap.Any("health2", health2))
|
||||
|
||||
segments, err := c.metaWatcher.ShowSegments()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, segments)
|
||||
for _, segment := range segments {
|
||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||
}
|
||||
|
||||
// create index
|
||||
createIndexStatus, err := c.proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||
CollectionName: collectionName,
|
||||
FieldName: floatVecField,
|
||||
IndexName: "_default",
|
||||
ExtraParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: strconv.Itoa(dim),
|
||||
},
|
||||
{
|
||||
Key: common.MetricTypeKey,
|
||||
Value: distance.L2,
|
||||
},
|
||||
{
|
||||
Key: "index_type",
|
||||
Value: "HNSW",
|
||||
},
|
||||
{
|
||||
Key: "M",
|
||||
Value: "64",
|
||||
},
|
||||
{
|
||||
Key: "efConstruction",
|
||||
Value: "512",
|
||||
},
|
||||
},
|
||||
})
|
||||
if createIndexStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("createIndexStatus fail reason", zap.String("reason", createIndexStatus.GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||
|
||||
// load
|
||||
loadStatus, err := c.proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
if loadStatus.GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("loadStatus fail reason", zap.String("reason", loadStatus.GetReason()))
|
||||
}
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||
for {
|
||||
loadProgress, err := c.proxy.GetLoadingProgress(ctx, &milvuspb.GetLoadingProgressRequest{
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
if err != nil {
|
||||
panic("GetLoadingProgress fail")
|
||||
}
|
||||
if loadProgress.GetProgress() == 100 {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
// search
|
||||
expr := fmt.Sprintf("%s > 0", "int64")
|
||||
nq := 10
|
||||
topk := 10
|
||||
roundDecimal := -1
|
||||
nprobe := 10
|
||||
|
||||
searchReq := constructSearchRequest("", collectionName, expr,
|
||||
floatVecField, nq, dim, nprobe, topk, roundDecimal)
|
||||
|
||||
searchResult, err := c.proxy.Search(ctx, searchReq)
|
||||
|
||||
if searchResult.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||
log.Warn("searchResult fail reason", zap.String("reason", searchResult.GetStatus().GetReason()))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, searchResult.GetStatus().GetErrorCode())
|
||||
|
||||
log.Info("======================")
|
||||
log.Info("======================")
|
||||
log.Info("TestBulkInsert succeed")
|
||||
log.Info("======================")
|
||||
log.Info("======================")
|
||||
}
|
||||
|
||||
func GenerateNumpyFile(filePath string, rowCount int, dType schemapb.DataType, typeParams []*commonpb.KeyValuePair) error {
|
||||
if dType == schemapb.DataType_VarChar {
|
||||
var data []string
|
||||
for i := 0; i < rowCount; i++ {
|
||||
data = append(data, "str")
|
||||
}
|
||||
err := importutil.CreateNumpyFile(filePath, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if dType == schemapb.DataType_FloatVector {
|
||||
dimStr, ok := funcutil.KeyValuePair2Map(typeParams)["dim"]
|
||||
if !ok {
|
||||
return errors.New("FloatVector field needs dim parameter")
|
||||
}
|
||||
dim, err := strconv.Atoi(dimStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//data := make([][]float32, rowCount)
|
||||
var data [][Dim]float32
|
||||
for i := 0; i < rowCount; i++ {
|
||||
vec := [Dim]float32{}
|
||||
for j := 0; j < dim; j++ {
|
||||
vec[j] = 1.1
|
||||
}
|
||||
//v := reflect.Indirect(reflect.ValueOf(vec))
|
||||
//log.Info("type", zap.Any("type", v.Kind()))
|
||||
data = append(data, vec)
|
||||
//v2 := reflect.Indirect(reflect.ValueOf(data))
|
||||
//log.Info("type", zap.Any("type", v2.Kind()))
|
||||
}
|
||||
err = importutil.CreateNumpyFile(filePath, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGenerateNumpyFile(t *testing.T) {
|
||||
err := GenerateNumpyFile(TempFilesPath+"embeddings.npy", 100, schemapb.DataType_FloatVector, []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: strconv.Itoa(Dim),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
log.Error("err", zap.Error(err))
|
||||
}
|
|
@ -50,7 +50,7 @@ func (watcher *EtcdMetaWatcher) ShowSessions() ([]*sessionutil.Session, error) {
|
|||
}
|
||||
|
||||
func (watcher *EtcdMetaWatcher) ShowSegments() ([]*datapb.SegmentInfo, error) {
|
||||
metaBasePath := path.Join(watcher.rootPath, "/meta/datacoord-meta/s/")
|
||||
metaBasePath := path.Join(watcher.rootPath, "/meta/datacoord-meta/s/") + "/"
|
||||
return listSegments(watcher.etcdCli, metaBasePath, func(s *datapb.SegmentInfo) bool {
|
||||
return true
|
||||
})
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
querycoord "github.com/milvus-io/milvus/internal/querycoordv2"
|
||||
"github.com/milvus-io/milvus/internal/querynode"
|
||||
"github.com/milvus-io/milvus/internal/rootcoord"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
|
@ -97,7 +98,8 @@ type MiniCluster struct {
|
|||
params map[string]string
|
||||
clusterConfig ClusterConfig
|
||||
|
||||
factory dependency.Factory
|
||||
factory dependency.Factory
|
||||
chunkManager storage.ChunkManager
|
||||
|
||||
etcdCli *clientv3.Client
|
||||
|
||||
|
@ -135,6 +137,11 @@ func StartMiniCluster(ctx context.Context, opts ...Option) (cluster *MiniCluster
|
|||
|
||||
if cluster.factory == nil {
|
||||
cluster.factory = dependency.NewDefaultFactory(true)
|
||||
chunkManager, err := cluster.factory.NewPersistentStorageChunkManager(cluster.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cluster.chunkManager = chunkManager
|
||||
}
|
||||
|
||||
if cluster.etcdCli == nil {
|
||||
|
@ -417,29 +424,40 @@ func (cluster *MiniCluster) Start() error {
|
|||
func (cluster *MiniCluster) Stop() error {
|
||||
log.Info("mini cluster stop")
|
||||
cluster.proxy.Stop()
|
||||
log.Info("mini cluster proxy stopped")
|
||||
cluster.rootCoord.Stop()
|
||||
log.Info("mini cluster rootCoord stopped")
|
||||
cluster.dataCoord.Stop()
|
||||
log.Info("mini cluster dataCoord stopped")
|
||||
//cluster.indexCoord.Stop()
|
||||
cluster.queryCoord.Stop()
|
||||
log.Info("mini cluster queryCoord stopped")
|
||||
|
||||
for _, dataNode := range cluster.dataNodes {
|
||||
dataNode.Stop()
|
||||
}
|
||||
log.Info("mini cluster datanodes stopped")
|
||||
for _, queryNode := range cluster.queryNodes {
|
||||
queryNode.Stop()
|
||||
}
|
||||
log.Info("mini cluster indexnodes stopped")
|
||||
for _, indexNode := range cluster.indexNodes {
|
||||
indexNode.Stop()
|
||||
}
|
||||
log.Info("mini cluster querynodes stopped")
|
||||
|
||||
cluster.etcdCli.KV.Delete(cluster.ctx, Params.EtcdCfg.RootPath.GetValue(), clientv3.WithPrefix())
|
||||
defer cluster.etcdCli.Close()
|
||||
chunkManager, err := cluster.factory.NewPersistentStorageChunkManager(cluster.ctx)
|
||||
if err != nil {
|
||||
log.Warn("fail to create chunk manager to clean test data", zap.Error(err))
|
||||
} else {
|
||||
chunkManager.RemoveWithPrefix(cluster.ctx, chunkManager.RootPath())
|
||||
|
||||
if cluster.chunkManager == nil {
|
||||
chunkManager, err := cluster.factory.NewPersistentStorageChunkManager(cluster.ctx)
|
||||
if err != nil {
|
||||
log.Warn("fail to create chunk manager to clean test data", zap.Error(err))
|
||||
} else {
|
||||
cluster.chunkManager = chunkManager
|
||||
}
|
||||
}
|
||||
cluster.chunkManager.RemoveWithPrefix(cluster.ctx, cluster.chunkManager.RootPath())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -451,6 +469,7 @@ func DefaultParams() map[string]string {
|
|||
//"runtime.role": typeutil.StandaloneRole,
|
||||
Params.IntegrationTestCfg.IntegrationMode.Key: "true",
|
||||
Params.CommonCfg.StorageType.Key: "local",
|
||||
Params.DataNodeCfg.MemoryForceSyncEnable.Key: "false", // local execution will print too many logs
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,16 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMiniClusterStartAndStop(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = c.Start()
|
||||
assert.NoError(t, err)
|
||||
err = c.Stop()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAddRemoveDataNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, err := StartMiniCluster(ctx)
|
||||
|
|
Loading…
Reference in New Issue