mirror of https://github.com/milvus-io/milvus.git
Open regression test
Signed-off-by: quicksilver <zhifeng.zhang@zilliz.com>pull/4973/head^2
parent
707a6a366e
commit
0530fdf62f
2
.env
2
.env
|
@ -1,7 +1,7 @@
|
|||
REPO=milvusdb/milvus-distributed-dev
|
||||
ARCH=amd64
|
||||
UBUNTU=18.04
|
||||
DATE_VERSION=20201120-092740
|
||||
DATE_VERSION=20201202-085131
|
||||
LATEST_DATE_VERSION=latest
|
||||
PULSAR_ADDRESS=pulsar://pulsar:6650
|
||||
ETCD_ADDRESS=etcd:2379
|
||||
|
|
|
@ -10,7 +10,7 @@ try {
|
|||
dir ('build/docker/test') {
|
||||
sh 'docker pull ${SOURCE_REPO}/pytest:${SOURCE_TAG} || true'
|
||||
sh 'docker-compose build --force-rm regression'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run --rm regression || true'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run --rm regression'
|
||||
try {
|
||||
withCredentials([usernamePassword(credentialsId: "${env.DOCKER_CREDENTIALS_ID}", usernameVariable: 'DOCKER_USERNAME', passwordVariable: 'DOCKER_PASSWORD')]) {
|
||||
sh 'docker login -u ${DOCKER_USERNAME} -p ${DOCKER_PASSWORD} ${DOKCER_REGISTRY_URL}'
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"log"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/apache/pulsar-client-go/pulsar"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -83,18 +84,28 @@ func (ms *PulsarMsgStream) CreatePulsarConsumers(channels []string,
|
|||
pulsarBufSize int64) {
|
||||
ms.unmarshal = unmarshal
|
||||
for i := 0; i < len(channels); i++ {
|
||||
receiveChannel := make(chan pulsar.ConsumerMessage, pulsarBufSize)
|
||||
pc, err := (*ms.client).Subscribe(pulsar.ConsumerOptions{
|
||||
Topic: channels[i],
|
||||
SubscriptionName: subName,
|
||||
Type: pulsar.KeyShared,
|
||||
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
|
||||
MessageChannel: receiveChannel,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Failed to subscribe topic, error = %v", err)
|
||||
fn := func() error {
|
||||
receiveChannel := make(chan pulsar.ConsumerMessage, pulsarBufSize)
|
||||
pc, err := (*ms.client).Subscribe(pulsar.ConsumerOptions{
|
||||
Topic: channels[i],
|
||||
SubscriptionName: subName,
|
||||
Type: pulsar.KeyShared,
|
||||
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
|
||||
MessageChannel: receiveChannel,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pc == nil {
|
||||
return errors.New("pulsar is not ready, consumer is nil")
|
||||
}
|
||||
ms.consumers = append(ms.consumers, &pc)
|
||||
return nil
|
||||
}
|
||||
err := Retry(10, time.Millisecond*200, fn)
|
||||
if err != nil {
|
||||
panic("create pulsar consumer timeout!")
|
||||
}
|
||||
ms.consumers = append(ms.consumers, &pc)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,6 +239,10 @@ func (ms *PulsarMsgStream) bufMsgPackToChannel() {
|
|||
|
||||
cases := make([]reflect.SelectCase, len(ms.consumers))
|
||||
for i := 0; i < len(ms.consumers); i++ {
|
||||
pc := *ms.consumers[i]
|
||||
if pc == nil {
|
||||
panic("pc is nil")
|
||||
}
|
||||
ch := (*ms.consumers[i]).Chan()
|
||||
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
package msgstream
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Reference: https://blog.cyeam.com/golang/2018/08/27/retry
|
||||
|
||||
func Retry(attempts int, sleep time.Duration, fn func() error) error {
|
||||
if err := fn(); err != nil {
|
||||
if s, ok := err.(InterruptError); ok {
|
||||
return s.error
|
||||
}
|
||||
|
||||
if attempts--; attempts > 0 {
|
||||
log.Printf("retry func error: %s. attempts #%d after %s.", err.Error(), attempts, sleep)
|
||||
time.Sleep(sleep)
|
||||
return Retry(attempts, 2*sleep, fn)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type InterruptError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func NoRetryError(err error) InterruptError {
|
||||
return InterruptError{err}
|
||||
}
|
|
@ -18,6 +18,7 @@ const (
|
|||
)
|
||||
|
||||
func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
|
||||
log.Println("insert into: ", in.CollectionName)
|
||||
it := &InsertTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
BaseInsertTask: BaseInsertTask{
|
||||
|
@ -76,6 +77,7 @@ func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.
|
|||
}
|
||||
|
||||
func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSchema) (*commonpb.Status, error) {
|
||||
log.Println("create collection: ", req)
|
||||
cct := &CreateCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: internalpb.CreateCollectionRequest{
|
||||
|
@ -117,6 +119,7 @@ func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSc
|
|||
}
|
||||
|
||||
func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
|
||||
log.Println("search: ", req.CollectionName, req.Dsl)
|
||||
qt := &QueryTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
SearchRequest: internalpb.SearchRequest{
|
||||
|
@ -164,6 +167,7 @@ func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.Qu
|
|||
}
|
||||
|
||||
func (p *Proxy) DropCollection(ctx context.Context, req *servicepb.CollectionName) (*commonpb.Status, error) {
|
||||
log.Println("drop collection: ", req)
|
||||
dct := &DropCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropCollectionRequest: internalpb.DropCollectionRequest{
|
||||
|
@ -204,6 +208,7 @@ func (p *Proxy) DropCollection(ctx context.Context, req *servicepb.CollectionNam
|
|||
}
|
||||
|
||||
func (p *Proxy) HasCollection(ctx context.Context, req *servicepb.CollectionName) (*servicepb.BoolResponse, error) {
|
||||
log.Println("has collection: ", req)
|
||||
hct := &HasCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasCollectionRequest: internalpb.HasCollectionRequest{
|
||||
|
@ -248,6 +253,7 @@ func (p *Proxy) HasCollection(ctx context.Context, req *servicepb.CollectionName
|
|||
}
|
||||
|
||||
func (p *Proxy) DescribeCollection(ctx context.Context, req *servicepb.CollectionName) (*servicepb.CollectionDescription, error) {
|
||||
log.Println("describe collection: ", req)
|
||||
dct := &DescribeCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeCollectionRequest: internalpb.DescribeCollectionRequest{
|
||||
|
@ -292,6 +298,7 @@ func (p *Proxy) DescribeCollection(ctx context.Context, req *servicepb.Collectio
|
|||
}
|
||||
|
||||
func (p *Proxy) ShowCollections(ctx context.Context, req *commonpb.Empty) (*servicepb.StringListResponse, error) {
|
||||
log.Println("show collections")
|
||||
sct := &ShowCollectionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowCollectionRequest: internalpb.ShowCollectionRequest{
|
||||
|
@ -335,6 +342,7 @@ func (p *Proxy) ShowCollections(ctx context.Context, req *commonpb.Empty) (*serv
|
|||
}
|
||||
|
||||
func (p *Proxy) CreatePartition(ctx context.Context, in *servicepb.PartitionName) (*commonpb.Status, error) {
|
||||
log.Println("create partition", in)
|
||||
cpt := &CreatePartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreatePartitionRequest: internalpb.CreatePartitionRequest{
|
||||
|
@ -380,6 +388,7 @@ func (p *Proxy) CreatePartition(ctx context.Context, in *servicepb.PartitionName
|
|||
}
|
||||
|
||||
func (p *Proxy) DropPartition(ctx context.Context, in *servicepb.PartitionName) (*commonpb.Status, error) {
|
||||
log.Println("drop partition: ", in)
|
||||
dpt := &DropPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropPartitionRequest: internalpb.DropPartitionRequest{
|
||||
|
@ -426,6 +435,7 @@ func (p *Proxy) DropPartition(ctx context.Context, in *servicepb.PartitionName)
|
|||
}
|
||||
|
||||
func (p *Proxy) HasPartition(ctx context.Context, in *servicepb.PartitionName) (*servicepb.BoolResponse, error) {
|
||||
log.Println("has partition: ", in)
|
||||
hpt := &HasPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasPartitionRequest: internalpb.HasPartitionRequest{
|
||||
|
@ -478,6 +488,7 @@ func (p *Proxy) HasPartition(ctx context.Context, in *servicepb.PartitionName) (
|
|||
}
|
||||
|
||||
func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionName) (*servicepb.PartitionDescription, error) {
|
||||
log.Println("describe partition: ", in)
|
||||
dpt := &DescribePartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribePartitionRequest: internalpb.DescribePartitionRequest{
|
||||
|
@ -532,6 +543,7 @@ func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionNa
|
|||
}
|
||||
|
||||
func (p *Proxy) ShowPartitions(ctx context.Context, req *servicepb.CollectionName) (*servicepb.StringListResponse, error) {
|
||||
log.Println("show partitions: ", req)
|
||||
spt := &ShowPartitionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowPartitionRequest: internalpb.ShowPartitionRequest{
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
[pytest]
|
||||
log_format = [%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)
|
||||
log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
# cli arguments. `-x`-stop test when error occurred;
|
||||
addopts = -x
|
||||
|
||||
testpaths = .
|
||||
|
||||
log_cli = true
|
||||
log_level = 10
|
||||
|
||||
timeout = 360
|
||||
|
||||
markers =
|
||||
level: test level
|
||||
serial
|
||||
|
||||
; level = 1
|
|
@ -4,5 +4,5 @@ numpy==1.18.1
|
|||
pytest==5.3.4
|
||||
pytest-cov==2.8.1
|
||||
pytest-timeout==1.3.4
|
||||
pymilvus-distributed==0.0.2
|
||||
pymilvus-distributed==0.0.3
|
||||
sklearn==0.0
|
||||
|
|
|
@ -101,7 +101,7 @@ class TestInsertBase:
|
|||
connect.flush([collection])
|
||||
connect.drop_collection(collection)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_create_index(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -119,7 +119,7 @@ class TestInsertBase:
|
|||
if field["name"] == field_name:
|
||||
assert field["indexes"][0] == get_simple_index
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_create_index_new(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -137,7 +137,7 @@ class TestInsertBase:
|
|||
if field["name"] == field_name:
|
||||
assert field["indexes"][0] == get_simple_index
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_after_create_index(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -154,7 +154,7 @@ class TestInsertBase:
|
|||
if field["name"] == field_name:
|
||||
assert field["indexes"][0] == get_simple_index
|
||||
|
||||
@pytest.mark.skip(" later ")
|
||||
# @pytest.mark.skip(" later ")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_search(self, connect, collection):
|
||||
'''
|
||||
|
@ -168,7 +168,7 @@ class TestInsertBase:
|
|||
logging.getLogger().debug(res)
|
||||
assert res
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("segment row count")
|
||||
def test_insert_segment_row_count(self, connect, collection):
|
||||
nb = default_segment_row_limit + 1
|
||||
res_ids = connect.bulk_insert(collection, gen_entities(nb))
|
||||
|
@ -189,7 +189,7 @@ class TestInsertBase:
|
|||
def insert_count(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.skip(" laster need custom ids")
|
||||
@pytest.mark.skip(" todo support count entities")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_ids(self, connect, id_collection, insert_count):
|
||||
'''
|
||||
|
@ -206,7 +206,7 @@ class TestInsertBase:
|
|||
res_count = connect.count_entities(id_collection)
|
||||
assert res_count == nb
|
||||
|
||||
@pytest.mark.skip(" laster need custom ids")
|
||||
@pytest.mark.skip(" todo support count entities")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
|
||||
'''
|
||||
|
@ -223,7 +223,7 @@ class TestInsertBase:
|
|||
res_count = connect.count_entities(id_collection)
|
||||
assert res_count == nb
|
||||
|
||||
@pytest.mark.skip(" not support count_entites")
|
||||
@pytest.mark.skip(" todo support count entities")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
|
||||
'''
|
||||
|
@ -250,7 +250,6 @@ class TestInsertBase:
|
|||
assert res_count == nb
|
||||
|
||||
# TODO: assert exception && enable
|
||||
@pytest.mark.skip(" todo support custom id")
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_twice_ids_no_ids(self, connect, id_collection):
|
||||
|
@ -288,7 +287,6 @@ class TestInsertBase:
|
|||
with pytest.raises(Exception) as e:
|
||||
res_ids = connect.bulk_insert(id_collection, default_entities, ids)
|
||||
|
||||
@pytest.mark.skip(" not suppport custom id")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_ids_length_not_match_single(self, connect, collection):
|
||||
'''
|
||||
|
@ -301,7 +299,7 @@ class TestInsertBase:
|
|||
with pytest.raises(Exception) as e:
|
||||
res_ids = connect.bulk_insert(collection, default_entity, ids)
|
||||
|
||||
@pytest.mark.skip(" not support count entities")
|
||||
@pytest.mark.skip(" todo support count entities")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
|
||||
'''
|
||||
|
@ -336,7 +334,6 @@ class TestInsertBase:
|
|||
assert len(ids) == default_nb
|
||||
assert connect.has_partition(collection, default_tag)
|
||||
|
||||
@pytest.mark.skip("not support custom id")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_tag_with_ids(self, connect, id_collection):
|
||||
'''
|
||||
|
@ -350,7 +347,7 @@ class TestInsertBase:
|
|||
assert res_ids == ids
|
||||
|
||||
|
||||
@pytest.mark.skip(" not support custom id")
|
||||
@pytest.mark.skip(" todo support count entities")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_default_tag(self, connect, collection):
|
||||
'''
|
||||
|
@ -541,7 +538,7 @@ class TestInsertBase:
|
|||
with pytest.raises(Exception):
|
||||
connect.bulk_insert(collection, tmp_entity)
|
||||
|
||||
@pytest.mark.skip("to do add dim check ")
|
||||
@pytest.mark.skip("support count entities")
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.timeout(30)
|
||||
def test_collection_insert_rows_count_multi_threading(self, args, collection):
|
||||
|
@ -572,7 +569,7 @@ class TestInsertBase:
|
|||
assert res_count == thread_num * default_nb
|
||||
|
||||
# TODO: unable to set config
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("get entity by id")
|
||||
@pytest.mark.level(2)
|
||||
def _test_insert_disable_auto_flush(self, connect, collection):
|
||||
'''
|
||||
|
@ -598,7 +595,7 @@ class TestInsertBinary:
|
|||
request.param["metric_type"] = "JACCARD"
|
||||
return request.param
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("count entities")
|
||||
def test_insert_binary_entities(self, connect, binary_collection):
|
||||
'''
|
||||
target: test insert entities in binary collection
|
||||
|
@ -610,7 +607,7 @@ class TestInsertBinary:
|
|||
connect.flush()
|
||||
assert connect.count_entities(binary_collection) == default_nb
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("count entities")
|
||||
def test_insert_binary_entities_new(self, connect, binary_collection):
|
||||
'''
|
||||
target: test insert entities in binary collection
|
||||
|
@ -622,7 +619,7 @@ class TestInsertBinary:
|
|||
connect.flush()
|
||||
assert connect.count_entities(binary_collection) == default_nb
|
||||
|
||||
@pytest.mark.skip
|
||||
# @pytest.mark.skip
|
||||
def test_insert_binary_tag(self, connect, binary_collection):
|
||||
'''
|
||||
target: test insert entities and create partition tag
|
||||
|
@ -634,9 +631,8 @@ class TestInsertBinary:
|
|||
assert len(ids) == default_nb
|
||||
assert connect.has_partition(binary_collection, default_tag)
|
||||
|
||||
# TODO
|
||||
@pytest.mark.skip("count entities")
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.skip
|
||||
def test_insert_binary_multi_times(self, connect, binary_collection):
|
||||
'''
|
||||
target: test insert entities multi times and final flush
|
||||
|
@ -649,7 +645,7 @@ class TestInsertBinary:
|
|||
connect.flush([binary_collection])
|
||||
assert connect.count_entities(binary_collection) == default_nb
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
|
||||
'''
|
||||
target: test insert binary entities after build index
|
||||
|
@ -666,7 +662,7 @@ class TestInsertBinary:
|
|||
if field["name"] == binary_field_name:
|
||||
assert field["indexes"][0] == get_binary_index
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
|
||||
'''
|
||||
|
@ -684,7 +680,7 @@ class TestInsertBinary:
|
|||
if field["name"] == binary_field_name:
|
||||
assert field["indexes"][0] == get_binary_index
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("binary search")
|
||||
def test_insert_binary_search(self, connect, binary_collection):
|
||||
'''
|
||||
target: test search vector after insert vector after a while
|
||||
|
@ -761,7 +757,7 @@ class TestInsertAsync:
|
|||
assert len(ids) == nb
|
||||
|
||||
|
||||
@pytest.mark.skip(" not support count entites ")
|
||||
@pytest.mark.skip("count entities")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_async_long(self, connect, collection):
|
||||
'''
|
||||
|
@ -778,7 +774,7 @@ class TestInsertAsync:
|
|||
logging.getLogger().info(count)
|
||||
assert count == nb
|
||||
|
||||
@pytest.mark.skip(" not support count entites ")
|
||||
@pytest.mark.skip("count entities")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_async_callback_timeout(self, connect, collection):
|
||||
'''
|
||||
|
@ -793,7 +789,6 @@ class TestInsertAsync:
|
|||
count = connect.count_entities(collection)
|
||||
assert count == 0
|
||||
|
||||
@pytest.mark.skip(" later check")
|
||||
def test_insert_async_invalid_params(self, connect):
|
||||
'''
|
||||
target: test insert vectors with different length of vectors
|
||||
|
@ -835,7 +830,7 @@ class TestInsertMultiCollections:
|
|||
pytest.skip("sq8h not support in CPU mode")
|
||||
return request.param
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("count entities")
|
||||
def test_insert_vector_multi_collections(self, connect):
|
||||
'''
|
||||
target: test insert entities
|
||||
|
@ -868,7 +863,7 @@ class TestInsertMultiCollections:
|
|||
connect.flush([collection_name])
|
||||
assert len(ids) == 1
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_create_index_insert_vector_another(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -882,7 +877,7 @@ class TestInsertMultiCollections:
|
|||
ids = connect.bulk_insert(collection, default_entity)
|
||||
connect.drop_collection(collection_name)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_vector_create_index_another(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -897,7 +892,7 @@ class TestInsertMultiCollections:
|
|||
count = connect.count_entities(collection_name)
|
||||
assert count == 0
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -913,7 +908,7 @@ class TestInsertMultiCollections:
|
|||
count = connect.count_entities(collection)
|
||||
assert count == 1
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("count entities")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_search_vector_insert_vector_another(self, connect, collection):
|
||||
'''
|
||||
|
@ -930,7 +925,6 @@ class TestInsertMultiCollections:
|
|||
count = connect.count_entities(collection_name)
|
||||
assert count == 1
|
||||
|
||||
@pytest.mark.skip(" todo fix search ")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_vector_search_vector_another(self, connect, collection):
|
||||
'''
|
||||
|
@ -943,7 +937,6 @@ class TestInsertMultiCollections:
|
|||
ids = connect.bulk_insert(collection, default_entity)
|
||||
result = connect.search(collection_name, default_single_query)
|
||||
|
||||
@pytest.mark.skip(" todo fix search ")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_vector_sleep_search_vector_another(self, connect, collection):
|
||||
'''
|
||||
|
@ -1119,21 +1112,20 @@ class TestInsertInvalidBinary(object):
|
|||
def get_field_vectors_value(self, request):
|
||||
yield request.param
|
||||
|
||||
# @pytest.mark.skip
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
|
||||
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
|
||||
with pytest.raises(Exception):
|
||||
connect.bulk_insert(binary_collection, tmp_entity)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("todo support row data check")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
|
||||
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
|
||||
with pytest.raises(Exception):
|
||||
connect.bulk_insert(binary_collection, tmp_entity)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("todo support row data check")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_with_invalid_field_vector_value(self, connect, binary_collection, get_field_vectors_value):
|
||||
tmp_entity = copy.deepcopy(default_binary_entity)
|
||||
|
@ -1142,7 +1134,6 @@ class TestInsertInvalidBinary(object):
|
|||
with pytest.raises(Exception):
|
||||
connect.bulk_insert(binary_collection, tmp_entity)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
|
||||
'''
|
||||
|
@ -1155,7 +1146,7 @@ class TestInsertInvalidBinary(object):
|
|||
with pytest.raises(Exception):
|
||||
connect.bulk_insert(binary_id_collection, default_binary_entities, ids)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("check filed")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
|
||||
field_type = get_field_type
|
||||
|
@ -1163,7 +1154,7 @@ class TestInsertInvalidBinary(object):
|
|||
with pytest.raises(Exception):
|
||||
connect.bulk_insert(binary_collection, tmp_entity)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("check field")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_with_invalid_field_vector_value(self, connect, binary_collection, get_field_vectors_value):
|
||||
tmp_entity = copy.deepcopy(default_binary_entities)
|
||||
|
|
|
@ -101,7 +101,7 @@ class TestInsertBase:
|
|||
connect.flush([collection])
|
||||
connect.drop_collection(collection)
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create_index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_create_index(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -119,7 +119,7 @@ class TestInsertBase:
|
|||
if field["name"] == field_name:
|
||||
assert field["indexes"][0] == get_simple_index
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create_index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_after_create_index(self, connect, collection, get_simple_index):
|
||||
'''
|
||||
|
@ -150,7 +150,7 @@ class TestInsertBase:
|
|||
logging.getLogger().debug(res)
|
||||
assert res
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("segment row count")
|
||||
def test_insert_segment_row_count(self, connect, collection):
|
||||
nb = default_segment_row_limit + 1
|
||||
res_ids = connect.insert(collection, gen_entities_rows(nb))
|
||||
|
@ -182,7 +182,6 @@ class TestInsertBase:
|
|||
with pytest.raises(Exception) as e:
|
||||
res_ids = connect.insert(id_collection, gen_entities_rows(nb))
|
||||
|
||||
@pytest.mark.skip("todo fix")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_twice_ids_no_ids(self, connect, collection):
|
||||
'''
|
||||
|
@ -205,7 +204,6 @@ class TestInsertBase:
|
|||
assert len(ids) == default_nb
|
||||
assert connect.has_partition(collection, default_tag)
|
||||
|
||||
@pytest.mark.skip("todo support custom id")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_tag_with_ids(self, connect, id_collection):
|
||||
'''
|
||||
|
@ -254,7 +252,7 @@ class TestInsertBase:
|
|||
with pytest.raises(Exception) as e:
|
||||
ids = connect.insert(gen_unique_str("not_exist_collection"), default_entities_rows)
|
||||
|
||||
@pytest.mark.skip("todo support dim check")
|
||||
@pytest.mark.skip("todo support row data check")
|
||||
def test_insert_dim_not_matched(self, connect, collection):
|
||||
'''
|
||||
target: test insert entities, the vector dimension is not equal to the collection dimension
|
||||
|
@ -277,7 +275,7 @@ class TestInsertBinary:
|
|||
request.param["metric_type"] = "JACCARD"
|
||||
return request.param
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("count entities")
|
||||
def test_insert_binary_entities(self, connect, binary_collection):
|
||||
'''
|
||||
target: test insert entities in binary collection
|
||||
|
@ -289,7 +287,6 @@ class TestInsertBinary:
|
|||
connect.flush()
|
||||
assert connect.count_entities(binary_collection) == default_nb
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_insert_binary_tag(self, connect, binary_collection):
|
||||
'''
|
||||
target: test insert entities and create partition tag
|
||||
|
@ -302,7 +299,7 @@ class TestInsertBinary:
|
|||
assert connect.has_partition(binary_collection, default_tag)
|
||||
|
||||
# TODO
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("count entities")
|
||||
@pytest.mark.level(2)
|
||||
def test_insert_binary_multi_times(self, connect, binary_collection):
|
||||
'''
|
||||
|
@ -316,7 +313,7 @@ class TestInsertBinary:
|
|||
connect.flush([binary_collection])
|
||||
assert connect.count_entities(binary_collection) == default_nb
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
|
||||
'''
|
||||
target: test insert binary entities after build index
|
||||
|
@ -333,7 +330,7 @@ class TestInsertBinary:
|
|||
if field["name"] == binary_field_name:
|
||||
assert field["indexes"][0] == get_binary_index
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.skip("create index")
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
|
||||
'''
|
||||
|
@ -406,7 +403,7 @@ class TestInsertInvalid(object):
|
|||
def get_field_vectors_value(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.skip("todo support field check")
|
||||
@pytest.mark.skip("todo support row data check")
|
||||
def test_insert_field_name_not_match(self, connect, collection):
|
||||
'''
|
||||
target: test insert, with field name not matched
|
||||
|
@ -432,7 +429,7 @@ class TestInsertInvalid(object):
|
|||
else:
|
||||
connect.insert(collection, default_entity_row, partition_tag=tag_name)
|
||||
|
||||
@pytest.mark.skip("todo support field check")
|
||||
@pytest.mark.skip("todo support row data check")
|
||||
def test_insert_with_less_field(self, connect, collection):
|
||||
tmp_entity = copy.deepcopy(default_entity_row)
|
||||
tmp_entity[0].pop(default_float_vec_field_name)
|
||||
|
@ -457,7 +454,7 @@ class TestInsertInvalid(object):
|
|||
with pytest.raises(Exception):
|
||||
connect.insert(collection, tmp_entity)
|
||||
|
||||
@pytest.mark.skip("todo support field check")
|
||||
@pytest.mark.skip("todo support row data check")
|
||||
def test_insert_with_invalid_field_vector_value(self, connect, collection, get_field_vectors_value):
|
||||
tmp_entity = copy.deepcopy(default_entity_row)
|
||||
tmp_entity[0][default_float_vec_field_name][1] = get_field_vectors_value
|
||||
|
|
|
@ -258,7 +258,7 @@ def gen_default_fields(auto_id=True):
|
|||
def gen_binary_default_fields(auto_id=True):
|
||||
default_fields = {
|
||||
"fields": [
|
||||
{"name": "int64", "type": DataType.INT64},
|
||||
{"name": "int64", "type": DataType.INT64, "is_primary_key": not auto_id},
|
||||
{"name": "float", "type": DataType.FLOAT},
|
||||
{"name": default_binary_vec_field_name, "type": DataType.BINARY_VECTOR, "params": {"dim": default_dim}}
|
||||
],
|
||||
|
|
Loading…
Reference in New Issue