test: removing deprecated code (#30484)

issue: https://github.com/milvus-io/milvus/issues/29507
Removing deprecated code from integration test lib

Signed-off-by: yiwangdr <yiwangdr@gmail.com>
pull/30428/head
yiwangdr 2024-02-05 18:46:01 -08:00 committed by GitHub
parent 85246c1f5d
commit 98adbb3b6d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 37 additions and 1515 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,182 +0,0 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"github.com/stretchr/testify/suite"
)
type MiniClusterMethodsSuite struct {
MiniClusterSuite
}
func (s *MiniClusterMethodsSuite) TestStartAndStop() {
// Do nothing
}
//func (s *MiniClusterMethodsSuite) TestRemoveDataNode() {
// c := s.Cluster
// ctx, cancel := context.WithCancel(c.GetContext())
// defer cancel()
//
// datanode := datanode.NewDataNode(ctx, c.factory)
// datanode.SetEtcdClient(c.EtcdCli)
// // datanode := c.CreateDefaultDataNode()
//
// err := c.AddDataNode(datanode)
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.DataNodeNum)
// s.Equal(2, len(c.DataNodes))
//
// err = c.RemoveDataNode(datanode)
// s.NoError(err)
//
// s.Equal(1, c.clusterConfig.DataNodeNum)
// s.Equal(1, len(c.DataNodes))
//
// // add default node and remove randomly
// err = c.AddDataNode(nil)
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.DataNodeNum)
// s.Equal(2, len(c.DataNodes))
//
// err = c.RemoveDataNode(nil)
// s.NoError(err)
//
// s.Equal(1, c.clusterConfig.DataNodeNum)
// s.Equal(1, len(c.DataNodes))
//}
//
//func (s *MiniClusterMethodsSuite) TestRemoveQueryNode() {
// c := s.Cluster
// ctx, cancel := context.WithCancel(c.GetContext())
// defer cancel()
//
// queryNode := querynodev2.NewQueryNode(ctx, c.factory)
// queryNode.SetEtcdClient(c.EtcdCli)
// // queryNode := c.CreateDefaultQueryNode()
//
// err := c.AddQueryNode(queryNode)
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.QueryNodeNum)
// s.Equal(2, len(c.QueryNodes))
//
// err = c.RemoveQueryNode(queryNode)
// s.NoError(err)
//
// s.Equal(1, c.clusterConfig.QueryNodeNum)
// s.Equal(1, len(c.QueryNodes))
//
// // add default node and remove randomly
// err = c.AddQueryNode(nil)
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.QueryNodeNum)
// s.Equal(2, len(c.QueryNodes))
//
// err = c.RemoveQueryNode(nil)
// s.NoError(err)
//
// s.Equal(1, c.clusterConfig.QueryNodeNum)
// s.Equal(1, len(c.QueryNodes))
//}
//
//func (s *MiniClusterMethodsSuite) TestRemoveIndexNode() {
// c := s.Cluster
// ctx, cancel := context.WithCancel(c.GetContext())
// defer cancel()
//
// indexNode := indexnode.NewIndexNode(ctx, c.factory)
// indexNode.SetEtcdClient(c.EtcdCli)
// // indexNode := c.CreateDefaultIndexNode()
//
// err := c.AddIndexNode(indexNode)
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.IndexNodeNum)
// s.Equal(2, len(c.IndexNodes))
//
// err = c.RemoveIndexNode(indexNode)
// s.NoError(err)
//
// s.Equal(1, c.clusterConfig.IndexNodeNum)
// s.Equal(1, len(c.IndexNodes))
//
// // add default node and remove randomly
// err = c.AddIndexNode(nil)
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.IndexNodeNum)
// s.Equal(2, len(c.IndexNodes))
//
// err = c.RemoveIndexNode(nil)
// s.NoError(err)
//
// s.Equal(1, c.clusterConfig.IndexNodeNum)
// s.Equal(1, len(c.IndexNodes))
//}
//
//func (s *MiniClusterMethodsSuite) TestUpdateClusterSize() {
// c := s.Cluster
//
// err := c.UpdateClusterSize(ClusterConfig{
// QueryNodeNum: -1,
// DataNodeNum: -1,
// IndexNodeNum: -1,
// })
// s.Error(err)
//
// err = c.UpdateClusterSize(ClusterConfig{
// QueryNodeNum: 2,
// DataNodeNum: 2,
// IndexNodeNum: 2,
// })
// s.NoError(err)
//
// s.Equal(2, c.clusterConfig.DataNodeNum)
// s.Equal(2, c.clusterConfig.QueryNodeNum)
// s.Equal(2, c.clusterConfig.IndexNodeNum)
//
// s.Equal(2, len(c.DataNodes))
// s.Equal(2, len(c.QueryNodes))
// s.Equal(2, len(c.IndexNodes))
//
// err = c.UpdateClusterSize(ClusterConfig{
// DataNodeNum: 3,
// QueryNodeNum: 2,
// IndexNodeNum: 1,
// })
// s.NoError(err)
//
// s.Equal(3, c.clusterConfig.DataNodeNum)
// s.Equal(2, c.clusterConfig.QueryNodeNum)
// s.Equal(1, c.clusterConfig.IndexNodeNum)
//
// s.Equal(3, len(c.DataNodes))
// s.Equal(2, len(c.QueryNodes))
// s.Equal(1, len(c.IndexNodes))
//}
func TestMiniCluster(t *testing.T) {
t.Skip("Skip integration test, need to refactor integration test framework")
suite.Run(t, new(MiniClusterMethodsSuite))
}

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"net"
"path"
"sync"
"time"
@ -49,6 +50,42 @@ import (
"github.com/milvus-io/milvus/pkg/util/paramtable"
)
var params *paramtable.ComponentParam = paramtable.Get()
type ClusterConfig struct {
// ProxyNum int
// todo coord num can be more than 1 if enable Active-Standby
// RootCoordNum int
// DataCoordNum int
// IndexCoordNum int
// QueryCoordNum int
QueryNodeNum int
DataNodeNum int
IndexNodeNum int
}
func DefaultParams() map[string]string {
testPath := fmt.Sprintf("integration-test-%d", time.Now().Unix())
return map[string]string{
params.EtcdCfg.RootPath.Key: testPath,
params.MinioCfg.RootPath.Key: testPath,
//"runtime.role": typeutil.StandaloneRole,
//params.IntegrationTestCfg.IntegrationMode.Key: "true",
params.LocalStorageCfg.Path.Key: path.Join("/tmp", testPath),
params.CommonCfg.StorageType.Key: "local",
params.DataNodeCfg.MemoryForceSyncEnable.Key: "false", // local execution will print too many logs
params.CommonCfg.GracefulStopTimeout.Key: "10",
}
}
func DefaultClusterConfig() ClusterConfig {
return ClusterConfig{
QueryNodeNum: 1,
DataNodeNum: 1,
IndexNodeNum: 1,
}
}
type MiniClusterV2 struct {
ctx context.Context

View File

@ -50,26 +50,6 @@ func (s *MiniClusterSuite) WaitForFlush(ctx context.Context, segIDs []int64, flu
}
}
func waitingForFlush(ctx context.Context, cluster *MiniCluster, segIDs []int64) {
flushed := func() bool {
resp, err := cluster.Proxy.GetFlushState(ctx, &milvuspb.GetFlushStateRequest{
SegmentIDs: segIDs,
})
if err != nil {
return false
}
return resp.GetFlushed()
}
for !flushed() {
select {
case <-ctx.Done():
panic("flush timeout")
default:
time.Sleep(500 * time.Millisecond)
}
}
}
func NewInt64FieldData(fieldName string, numRows int) *schemapb.FieldData {
return &schemapb.FieldData{
Type: schemapb.DataType_Int64,

View File

@ -75,26 +75,6 @@ func (s *MiniClusterSuite) waitForLoadInternal(ctx context.Context, dbName, coll
}
}
func waitingForLoad(ctx context.Context, cluster *MiniCluster, collection string) {
getLoadingProgress := func() *milvuspb.GetLoadingProgressResponse {
loadProgress, err := cluster.Proxy.GetLoadingProgress(ctx, &milvuspb.GetLoadingProgressRequest{
CollectionName: collection,
})
if err != nil {
panic("GetLoadingProgress fail")
}
return loadProgress
}
for getLoadingProgress().GetProgress() != 100 {
select {
case <-ctx.Done():
panic("load timeout")
default:
time.Sleep(500 * time.Millisecond)
}
}
}
func ConstructSearchRequest(
dbName, collectionName string,
expr string,