test: Remove useless common types and refine error assert in negative cases (#33023)

Related issue: #32653 

1. Remove some meaningless common types
2. Refine error assertion in negative cases
3. Remove some dup tests

---------

Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com>
pull/33048/head
yanliang567 2024-05-14 15:03:33 +08:00 committed by GitHub
parent 1d48d0aeb2
commit ba3b2a91a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 260 additions and 1129 deletions

View File

@ -105,36 +105,27 @@ default_count_output = "count(*)"
rows_all_data_type_file_path = "/tmp/rows_all_data_type"
"""" List of parameters used to pass """
get_invalid_strs = [
[],
1,
[1, "2", 3],
(1,),
{1: 1},
None,
"",
" ",
"12-s",
"12 s",
"(mn)",
"中文",
"%$#",
"".join("a" for i in range(max_name_length + 1))]
invalid_resource_names = [
None, # None
" ", # space
"", # empty
"12name", # start with number
"n12 ame", # contain space
"n-ame", # contain hyphen
"nam(e)", # contain special character
"name中文", # contain Chinese character
"name%$#", # contain special character
"".join("a" for i in range(max_name_length + 1))] # exceed max length
get_invalid_type_fields = [
1,
[1, "2", 3],
(1,),
{1: 1},
None,
"",
" ",
"12-s",
"12 s",
"(mn)",
"中文",
"%$#",
"".join("a" for i in range(max_name_length + 1))]
valid_resource_names = [
"name", # valid name
"_name", # start with underline
"_12name", # start with underline and contains number
"n12ame_", # end with letter and contains number and underline
"nam_e", # contains underline
"".join("a" for i in range(max_name_length))] # max length
invalid_dims = [min_dim-1, 32.1, -32, "vii", "十六", max_dim+1]
get_not_string = [
[],
@ -146,16 +137,6 @@ get_not_string = [
[1, "2", 3]
]
get_not_string_value = [
" ",
"12-s",
"12 s",
"(mn)",
"中文",
"%$#",
"a".join("a" for i in range(256))
]
get_invalid_vectors = [
"1*2",
[1],

View File

@ -234,31 +234,11 @@ def initialize_env(request):
param_info.prepare_param_info(host, port, handler, replica_num, user, password, secure, uri, token)
@pytest.fixture(params=ct.get_invalid_strs)
def get_invalid_string(request):
yield request.param
@pytest.fixture(params=cf.gen_simple_index())
def get_index_param(request):
yield request.param
@pytest.fixture(params=ct.get_invalid_strs)
def get_invalid_collection_name(request):
yield request.param
@pytest.fixture(params=ct.get_invalid_strs)
def get_invalid_field_name(request):
yield request.param
@pytest.fixture(params=ct.get_invalid_strs)
def get_invalid_index_type(request):
yield request.param
# TODO: construct invalid index params for all index types
@pytest.fixture(params=[{"metric_type": "L3", "index_type": "IVF_FLAT"},
{"metric_type": "L2", "index_type": "IVF_FLAT", "err_params": {"nlist": 10}},
@ -267,11 +247,6 @@ def get_invalid_index_params(request):
yield request.param
@pytest.fixture(params=ct.get_invalid_strs)
def get_invalid_partition_name(request):
yield request.param
@pytest.fixture(params=ct.get_invalid_dict)
def get_invalid_vector_dict(request):
yield request.param

View File

@ -55,28 +55,14 @@ max_vector_field_num = ct.max_vector_field_num
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_type_fields)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64 or request.param.dtype == DataType.VARCHAR:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
@pytest.fixture(scope="function", params=ct.invalid_dims)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
@ -95,37 +81,7 @@ class TestCollectionParams(TestcaseBase):
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with an empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: 1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, "qw$_o90", "1ns_", None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error1 = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
error2 = {ct.err_code: 1100, ct.err_msg: "Invalid collection name: 1ns_. the first character of a"
" collection name must be an underscore or letter: invalid"
" parameter".format(name)}
error = error1 if name not in ["1ns_", "qw$_o90"] else error2
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["_co11ection", "co11_ection"])
@pytest.mark.parametrize("name", ct.valid_resource_names)
def test_collection_naming_rules(self, name):
"""
target: test collection with valid name
@ -142,7 +98,7 @@ class TestCollectionParams(TestcaseBase):
check_items={exp_name: name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "".join("a" for i in range(ct.max_name_length + 1))])
@pytest.mark.parametrize("name", ct.invalid_resource_names)
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
@ -151,6 +107,8 @@ class TestCollectionParams(TestcaseBase):
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
if name is not None and name.strip() == "":
error = {ct.err_code: 1, ct.err_msg: "collection name should not be empty"}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@ -254,21 +212,34 @@ class TestCollectionParams(TestcaseBase):
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
def test_collection_invalid_schema_multi_pk(self):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
target: test collection with a schema with 2 pk fields
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
field1, _ = self.field_schema_wrap.init_field_schema(name="field1", dtype=DataType.INT64, is_primary=True)
field2, _ = self.field_schema_wrap.init_field_schema(name="field2", dtype=DataType.INT64, is_primary=True)
vector_field = cf.gen_float_vec_field(dim=32)
error = {ct.err_code: 999, ct.err_msg: "Expected only one primary key field"}
self.collection_schema_wrap.init_collection_schema(fields=[field1, field2, vector_field],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_schema_type(self):
"""
target: test collection with an invalid schema type
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name="field_name", dtype=DataType.INT64, is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
self.collection_wrap.init_collection(c_name, schema=field,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
@ -300,46 +271,8 @@ class TestCollectionParams(TestcaseBase):
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 1, ct.err_msg: "The fields of schema must be type list."}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
@pytest.mark.parametrize("invalid_field_name", ct.invalid_resource_names)
def test_collection_invalid_field_name(self, invalid_field_name):
"""
target: test collection with invalid field name
method: invalid string name
@ -347,44 +280,14 @@ class TestCollectionParams(TestcaseBase):
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
field, _ = self.field_schema_wrap.init_field_schema(name=invalid_field_name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1701, ct.err_msg: f"bad argument type for built-in"}
error = {ct.err_code: 999, ct.err_msg: f"field name invalid"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1701, ct.err_msg: "field name should not be empty"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
@pytest.mark.parametrize("dtype", [6, [[]], "int64", 5.1, (), "", "a", DataType.UNKNOWN])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
@ -392,26 +295,10 @@ class TestCollectionParams(TestcaseBase):
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
error = {ct.err_code: 999, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue #19334")
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_empty_fields(self):
"""
@ -420,7 +307,7 @@ class TestCollectionParams(TestcaseBase):
expected: exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Schema must have a primary key field."}
error = {ct.err_code: 999, ct.err_msg: "Schema must have a primary key field."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@ -527,7 +414,7 @@ class TestCollectionParams(TestcaseBase):
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
@pytest.mark.parametrize("is_primary", [None, 2, "string"])
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
@ -536,7 +423,7 @@ class TestCollectionParams(TestcaseBase):
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
error = {ct.err_code: 999, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@ -771,8 +658,8 @@ class TestCollectionParams(TestcaseBase):
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 24578")
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
# @pytest.mark.xfail(reason="issue 24578")
@pytest.mark.parametrize("auto_id", [None, 1, "string"])
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
@ -815,7 +702,7 @@ class TestCollectionParams(TestcaseBase):
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue #29796")
@pytest.mark.skip(reason="issue #29796")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
@ -824,6 +711,7 @@ class TestCollectionParams(TestcaseBase):
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 999, ct.err_msg: "invalid dimension"}
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 65535, ct.err_msg: "strconv.ParseInt: parsing \"[]\": invalid syntax"}
@ -1325,12 +1213,6 @@ class TestCollectionDataframe(TestcaseBase):
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
@ -1406,7 +1288,7 @@ class TestCollectionDataframe(TestcaseBase):
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_non_dataframe(self, get_non_df):
def test_construct_from_non_dataframe(self):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
@ -1415,7 +1297,7 @@ class TestCollectionDataframe(TestcaseBase):
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
df = cf.gen_default_list_data(nb=10)
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@ -2837,17 +2719,9 @@ class TestLoadCollection(TestcaseBase):
error = {ct.err_code: 0, ct.err_msg: "due to no partition specified"}
collection_w.load(partition_names=[], check_task=CheckTasks.err_res, check_items=error)
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_number_replicas(self, request):
if request.param == 1:
pytest.skip("1 is valid replica number")
if request.param is None:
pytest.skip("None is valid replica number")
yield request.param
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue #21618")
def test_load_replica_non_number(self, get_non_number_replicas):
@pytest.mark.parametrize("invalid_num_replica", [0.2, "not-int"])
def test_load_replica_non_number(self, invalid_num_replica):
"""
target: test load collection with non-number replicas
method: load with non-number replicas
@ -2861,8 +2735,8 @@ class TestLoadCollection(TestcaseBase):
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
# load with non-number replicas
error = {ct.err_code: 0, ct.err_msg: f"but expected one of: int, long"}
collection_w.load(replica_number=get_non_number_replicas, check_task=CheckTasks.err_res, check_items=error)
error = {ct.err_code: 999, ct.err_msg: f"`replica_number` value {invalid_num_replica} is illegal"}
collection_w.load(replica_number=invalid_num_replica, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("replicas", [-1, 0])
@ -4400,10 +4274,8 @@ class TestCollectionMultipleVectorValid(TestcaseBase):
class TestCollectionMultipleVectorInvalid(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
@pytest.fixture(scope="function", params=ct.invalid_dims)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
"""

View File

@ -46,15 +46,6 @@ class TestDatabaseParams(TestcaseBase):
super().teardown_method(method)
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_string(self, request):
"""
get invalid string
:param request:
:type request:
"""
yield request.param
def test_db_default(self):
"""
target: test normal db interface
@ -105,28 +96,18 @@ class TestDatabaseParams(TestcaseBase):
dbs_afrer_drop, _ = self.database_wrap.list_database()
assert db_name not in dbs_afrer_drop
@pytest.mark.parametrize("get_invalid_string", ct.get_invalid_strs[6:])
def test_create_db_invalid_name_value(self, get_invalid_string):
@pytest.mark.parametrize("db_name", ct.invalid_resource_names)
def test_create_db_invalid_name_value(self, db_name):
"""
target: test create db with invalid name
method: create db with invalid name
expected: error
"""
self._connect()
error = {ct.err_code: 802, ct.err_msg: "invalid database name[database=%s]" % get_invalid_string}
self.database_wrap.create_database(db_name=get_invalid_string, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.parametrize("get_invalid_string", ct.get_invalid_strs[:6])
def test_create_db_invalid_name_type(self, get_invalid_string):
"""
target: test create db with invalid name
method: create db with invalid name
expected: error
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "invalid database name[database=%s]" % get_invalid_string}
self.database_wrap.create_database(db_name=get_invalid_string, check_task=CheckTasks.err_res,
error = {ct.err_code: 802, ct.err_msg: "invalid database name[database=%s]" % db_name}
if db_name is None:
error = {ct.err_code: 999, ct.err_msg: f"`db_name` value {db_name} is illegal"}
self.database_wrap.create_database(db_name=db_name, check_task=CheckTasks.err_res,
check_items=error)
def test_create_db_without_connection(self):
@ -150,54 +131,26 @@ class TestDatabaseParams(TestcaseBase):
error = {ct.err_code: 1, ct.err_msg: "database already exist: default"}
self.database_wrap.create_database(ct.default_db, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.parametrize("get_invalid_string", ct.get_invalid_strs[6:])
def test_drop_db_invalid_name_value(self, get_invalid_string):
@pytest.mark.parametrize("invalid_name", ct.invalid_resource_names)
def test_drop_db_invalid_name(self, invalid_name):
"""
target: test drop db with invalid name
method: drop db with invalid name
expected: exception
"""
self._connect()
# create db
db_name = cf.gen_unique_str(prefix)
self.database_wrap.create_database(db_name)
# drop db
self.database_wrap.drop_database(db_name=get_invalid_string, check_task=CheckTasks.err_res,
check_items={ct.err_code: 802, ct.err_msg: "invalid database name"})
# created db is exist
error = {ct.err_code: 802, ct.err_msg: "invalid database name[database=%s]" % db_name}
if db_name is None:
error = {ct.err_code: 999, ct.err_msg: f"`db_name` value {db_name} is illegal"}
self.database_wrap.drop_database(db_name=invalid_name, check_task=CheckTasks.err_res, check_items=error)
# created db is existing
self.database_wrap.create_database(db_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 65535,
ct.err_msg: "database already exist: %s" % db_name})
self.database_wrap.drop_database(db_name)
dbs, _ = self.database_wrap.list_database()
assert db_name not in dbs
@pytest.mark.parametrize("get_invalid_string", ct.get_invalid_strs[:6])
def test_drop_db_invalid_name_type(self, get_invalid_string):
"""
target: test drop db with invalid name
method: drop db with invalid name
expected: exception
"""
self._connect()
# create db
db_name = cf.gen_unique_str(prefix)
self.database_wrap.create_database(db_name)
# drop db
self.database_wrap.drop_database(db_name=get_invalid_string, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "is illegal"})
# created db is exist
self.database_wrap.create_database(db_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 65535,
ct.err_msg: "database already exist: %s" % db_name})
self.database_wrap.drop_database(db_name)
dbs, _ = self.database_wrap.list_database()
assert db_name not in dbs

View File

@ -64,39 +64,23 @@ class TestIndexParams(TestcaseBase):
check_items={ct.err_code: 0, ct.err_msg: clem.CollectionType})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("field_name", ct.get_invalid_strs)
def test_index_field_name_invalid(self, field_name):
def test_index_field_name_not_existed(self):
"""
target: test index with error field name
target: test index on non_existing field
method: input field name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
log.error(iem.WrongFieldName % str(field_name))
self.index_wrap.init_index(collection_w.collection, field_name, default_index_params,
fieldname = "non_existing"
self.index_wrap.init_index(collection_w.collection, fieldname, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: iem.WrongFieldName % str(field_name)})
@pytest.mark.tags(CaseLabel.L1)
def test_index_field_name_not_existed(self):
"""
target: test index with error field name
method: input field name not created
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
f_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, f_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: f"cannot create index on non-existed field: {f_name}"})
check_items={ct.err_code: 999,
ct.err_msg: "cannot create index on non-existed field"})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("index_type", ct.get_invalid_strs)
@pytest.mark.parametrize("index_type", ["non_exiting_type", 100])
def test_index_type_invalid(self, index_type):
"""
target: test index with error index type
@ -128,7 +112,7 @@ class TestIndexParams(TestcaseBase):
index_params["index_type"] = "IVFFFFFFF"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
check_items={ct.err_code: 999, ct.err_msg: ""})
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_invalid(self, get_invalid_index_params):

View File

@ -32,12 +32,6 @@ default_search_exp = "int64 >= 0"
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
@ -145,19 +139,20 @@ class TestInsertParams(TestcaseBase):
data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):
def test_insert_invalid_field_name_dataframe(self):
"""
target: test insert with invalid dataframe data
method: insert with invalid field name dataframe
expected: raise exception
"""
invalid_field_name = "non_existing"
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(
columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)
columns={ct.default_int64_field_name: invalid_field_name}, inplace=True)
error = {ct.err_code: 999,
ct.err_msg: f"The name of field don't match, expected: int64, got {get_invalid_field_name}"}
ct.err_msg: f"The name of field don't match, expected: int64, got {invalid_field_name}"}
collection_w.insert(
data=df, check_task=CheckTasks.err_res, check_items=error)
@ -1973,8 +1968,8 @@ class TestUpsertInvalid(TestcaseBase):
collection_w.upsert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs[7:13])
def test_upsert_partition_name_invalid(self, partition_name):
@pytest.mark.parametrize("partition_name", ct.invalid_resource_names[4:])
def test_upsert_partition_name_non_existing(self, partition_name):
"""
target: test upsert partition name invalid
method: 1. create a collection with partitions

View File

@ -122,8 +122,7 @@ class TestPartitionParams(TestcaseBase):
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
def test_partition_special_chars_description(self, description):
def test_partition_special_chars_description(self):
"""
target: verify create a partition with special characters in description
method: create a partition with special characters in description
@ -134,6 +133,7 @@ class TestPartitionParams(TestcaseBase):
# create partition
partition_name = cf.gen_unique_str(prefix)
description = "@#¥%……&*"
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
@ -199,25 +199,26 @@ class TestPartitionParams(TestcaseBase):
check_items={"name": partition_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
@pytest.mark.parametrize("partition_name", ct.invalid_resource_names)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: create a partition with invalid names
expected: raise exception
"""
if partition_name == "12name":
pytest.skip(reason="won't fix issue #32998")
# create collection
collection_w = self.init_collection_wrap()
# create partition
error1 = {ct.err_code: 1, ct.err_msg: f"`partition_name` value {partition_name} is illegal"}
error2 = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. Partition name can"
f" only contain numbers, letters and underscores."}
error = error1 if partition_name in [None, [], 1, [1, "2", 3], (1,), {1: 1}] else error2
if partition_name is not None:
error = {ct.err_code: 999, ct.err_msg: f"Invalid partition name: {partition_name.strip()}"}
else:
error = {ct.err_code: 999, ct.err_msg: f"`partition_name` value {partition_name} is illegal"}
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items=error)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L2)
def test_partition_none_collection(self):
@ -311,17 +312,9 @@ class TestPartitionParams(TestcaseBase):
partition_w1.release()
partition_w2.load()
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_number_replicas(self, request):
if request.param == 1:
pytest.skip("1 is valid replica number")
if request.param is None:
pytest.skip("None is valid replica number")
yield request.param
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="issue #21618")
def test_load_partition_replica_non_number(self, get_non_number_replicas):
@pytest.mark.parametrize("replicas", [1.2, "not-int"])
def test_load_partition_replica_non_number(self, replicas):
"""
target: test load partition with non-number replicas
method: load with non-number replicas
@ -334,17 +327,17 @@ class TestPartitionParams(TestcaseBase):
partition_w.insert(cf.gen_default_list_data(nb=100))
# load with non-number replicas
error = {ct.err_code: 0, ct.err_msg: f"but expected one of: int, long"}
error = {ct.err_code: 0, ct.err_msg: f"`replica_number` value {replicas} is illegal"}
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
partition_w.load(replica_number=get_non_number_replicas, check_task=CheckTasks.err_res, check_items=error)
partition_w.load(replica_number=replicas, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("replicas", [0, -1])
def test_load_replica_invalid_number(self, replicas):
"""
target: test load partition with invalid replica number
method: load with invalid replica number
expected: raise exception
target: test load partition with 0 and negative number
method: load with 0 or -1
expected: load successful
"""
# create, insert
self._connect()
@ -1317,9 +1310,9 @@ class TestHasBase(TestcaseBase):
expected: status ok
"""
collection_w = self.init_collection_wrap()
partition_name = ct.get_invalid_strs
partition_name = ct.invalid_resource_names[0]
collection_w.has_partition(partition_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"})
check_items={ct.err_code: 999, 'err_msg': "is illegal"})
class TestDropBase(TestcaseBase):
@ -1384,6 +1377,7 @@ class TestNameInvalid(TestcaseBase):
expected: status not ok
"""
collection_w = self.init_collection_wrap()
partition_name = ct.get_invalid_strs
partition_name = ct.invalid_resource_names[0]
collection_w.drop_partition(partition_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"})
check_items={ct.err_code: 999,
'err_msg': f"`partition_name` value {partition_name} is illegal"})

View File

@ -988,16 +988,11 @@ class TestQueryParams(TestcaseBase):
expression = f"{expr_prefix}({json_field}['list'], {ids})"
collection_w.query(expression, check_task=CheckTasks.check_query_empty)
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_not_list(self, request):
if request.param == [1, "2", 3]:
pytest.skip('[1, "2", 3] is valid type for list')
yield request.param
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expr_prefix", ["json_contains_any", "JSON_CONTAINS_ANY",
"json_contains_all", "JSON_CONTAINS_ALL"])
def test_query_expr_json_contains_invalid_type(self, expr_prefix, enable_dynamic_field, get_not_list):
@pytest.mark.parametrize("not_list", ["str", {1, 2, 3}, (1, 2, 3), 10])
def test_query_expr_json_contains_invalid_type(self, expr_prefix, enable_dynamic_field, not_list):
"""
target: test query with expression using json_contains_any
method: query with expression using json_contains_any
@ -1007,8 +1002,9 @@ class TestQueryParams(TestcaseBase):
collection_w = self.init_collection_general(prefix, enable_dynamic_field=enable_dynamic_field)[0]
# 2. insert data
array = cf.gen_default_rows_data()
for i in range(ct.default_nb):
nb = 10
array = cf.gen_default_rows_data(nb=nb)
for i in range(nb):
array[i][json_field] = {"number": i,
"list": [m for m in range(i, i + 10)]}
@ -1016,7 +1012,7 @@ class TestQueryParams(TestcaseBase):
# 3. query
collection_w.load()
expression = f"{expr_prefix}({json_field}['list'], {get_not_list})"
expression = f"{expr_prefix}({json_field}['list'], {not_list})"
error = {ct.err_code: 1100, ct.err_msg: f"failed to create query plan: cannot parse expression: {expression}"}
collection_w.query(expression, check_task=CheckTasks.err_res, check_items=error)
@ -1713,7 +1709,7 @@ class TestQueryParams(TestcaseBase):
assert len(res2) == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("ignore_growing", ct.get_invalid_strs[:8])
@pytest.mark.parametrize("ignore_growing", [2.3, "str"])
def test_query_invalid_ignore_growing_param(self, ignore_growing):
"""
target: test query ignoring growing segment param invalid
@ -1722,17 +1718,15 @@ class TestQueryParams(TestcaseBase):
3. query with ignore_growing type invalid
expected: raise exception
"""
if ignore_growing == 1:
pytest.skip("number is valid")
# 1. create a collection
collection_w = self.init_collection_general(prefix, True)[0]
# 2. insert data again
data = cf.gen_default_dataframe_data(start=10000)
data = cf.gen_default_dataframe_data(start=100)
collection_w.insert(data)
# 3. query with param ignore_growing invalid
error = {ct.err_code: 1, ct.err_msg: "parse search growing failed"}
error = {ct.err_code: 999, ct.err_msg: "parse search growing failed"}
collection_w.query('int64 >= 0', ignore_growing=ignore_growing,
check_task=CheckTasks.err_res, check_items=error)

View File

@ -11,12 +11,6 @@ config_nodes = 8
class TestResourceGroupParams(TestcaseBase):
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def invalid_names(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
def test_rg_default(self):
"""
@ -110,44 +104,21 @@ class TestResourceGroupParams(TestcaseBase):
check_items=error)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
@pytest.mark.parametrize("rg_name", ["", None])
def test_create_rg_empty(self, rg_name):
"""
method: create a rg with an empty or null name
verify: fail with error msg
"""
self._connect()
error = {ct.err_code: 999,
ct.err_msg: "`resource_group_name` value {} is illegal".format(rg_name)}
self.init_resource_group(name=rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
@pytest.mark.parametrize("rg_name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_create_n_drop_rg_illegal_names(self, rg_name):
@pytest.mark.parametrize("rg_name", ct.invalid_resource_names)
def test_create_n_drop_rg_invalid_name(self, rg_name):
"""
method: create a rg with an invalid name(what are invalid names? types, length, chinese,symbols)
verify: fail with error msg
"""
self._connect()
error = {ct.err_code: 999,
ct.err_msg: "`resource_group_name` value {} is illegal".format(rg_name)}
self.init_resource_group(rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
# verify drop fail with error if illegal names
self.utility_wrap.drop_resource_group(rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
@pytest.mark.parametrize("rg_name", [" ", "12-s", "12 s", "(mn)", "中文", "%$#", "qw$_o90", "1ns_", "a".join("a" for i in range(256))])
def test_create_n_drop_rg_invalid_names(self, rg_name):
"""
method: create a rg with an invalid name(what are invalid names? types, length, chinese,symbols)
verify: fail with error msg
"""
self._connect()
error = {ct.err_code: 999,
ct.err_msg: "Invalid resource group name"}
self.init_resource_group(rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
# verify drop succ with invalid names
self.utility_wrap.drop_resource_group(rg_name)
error = {ct.err_code: 999, ct.err_msg: "Invalid resource group name"}
if rg_name is None or rg_name == "":
error = {ct.err_code: 999, ct.err_msg: "is illegal"}
self.init_resource_group(rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
else:
self.init_resource_group(rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
# verify drop succ with invalid names
self.utility_wrap.drop_resource_group(rg_name)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
def test_create_rg_max_length_name(self):
@ -260,18 +231,6 @@ class TestResourceGroupParams(TestcaseBase):
self.utility_wrap.drop_resource_group(name=rg_name)
assert rgs_count == len(self.utility_wrap.list_resource_groups()[0])
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
@pytest.mark.parametrize("rg_name", ["", None])
def test_drop_rg_empty_name(self, rg_name):
"""
method: drop a rg with empty or None name
verify: drop successfully
"""
self._connect()
error = {ct.err_code: 999,
ct.err_msg: "`resource_group_name` value {} is illegal".format(rg_name)}
self.utility_wrap.drop_resource_group(name=rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
def test_drop_rg_twice(self):
"""
@ -328,35 +287,17 @@ class TestResourceGroupParams(TestcaseBase):
check_items=default_rg_info)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
@pytest.mark.parametrize("rg_name", ["", None])
def test_describe_rg_empty_name(self, rg_name):
"""
method: describe a rg with an empty name
verify: fail with error msg
"""
self._connect()
error = {ct.err_code: 999,
ct.err_msg: "`resource_group_name` value {} is illegal".format(rg_name)}
self.utility_wrap.drop_resource_group(name=rg_name, check_task=ct.CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
def test_describe_rg_invalid_names(self):
@pytest.mark.parametrize("rg_name", ct.invalid_resource_names)
def test_describe_rg_invalid_name(self, rg_name):
"""
method: describe a rg with an invalid name(what are invalid names? types, length, chinese,symbols)
verify: fail with error msg
"""
pass
@pytest.mark.tags(CaseLabel.MultiQueryNodes)
def test_describe_rg_non_existing(self):
"""
method: describe a non-existing rg
verify: fail with error msg
"""
self._connect()
non_existing_rg = 'non_existing'
error = {ct.err_code: 999, ct.err_msg: "failed to describe resource group, err=resource group doesn't exist"}
self.utility_wrap.describe_resource_group(name=non_existing_rg,
error = {ct.err_code: 999, ct.err_msg: f"resource group not found[rg={rg_name}]"}
if rg_name is None or rg_name == "":
error = {ct.err_code: 999, ct.err_msg: f"`resource_group_name` value {rg_name} is illegal"}
self.utility_wrap.describe_resource_group(name=rg_name,
check_task=ct.CheckTasks.err_res,
check_items=error)

View File

@ -75,20 +75,6 @@ class TestCollectionSearchInvalid(TestcaseBase):
def get_invalid_vectors(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for field")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_value(self, request):
if not isinstance(request.param, str):
pytest.skip("field value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_metric_type)
def get_invalid_metric_type(self, request):
yield request.param
@ -99,42 +85,6 @@ class TestCollectionSearchInvalid(TestcaseBase):
pytest.skip("positive int is valid type for limit")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for expr")
if request.param is None:
pytest.skip("None is valid for expr")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_value(self, request):
if not isinstance(request.param, str):
pytest.skip("expression value only support string")
if request.param in ["", " "]:
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_bool_value(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition(self, request):
if request.param == []:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_output_fields(self, request):
if request.param == []:
pytest.skip("empty is valid for output_fields")
if request.param is None:
pytest.skip("None is valid for output_fields")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_guarantee_timestamp(self, request):
if request.param == 9999999999:
@ -143,12 +93,6 @@ class TestCollectionSearchInvalid(TestcaseBase):
pytest.skip("None is valid for guarantee_timestamp")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_range_search_paras(self, request):
if request.param == 1:
pytest.skip("number is valid for range search paras")
yield request.param
@pytest.fixture(scope="function", params=[True, False])
def enable_dynamic_field(self, request):
yield request.param
@ -232,7 +176,7 @@ class TestCollectionSearchInvalid(TestcaseBase):
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
collection_w = self.init_collection_general(prefix, dim=32)[0]
# 2. search with invalid field
invalid_vectors = get_invalid_vectors
log.info("test_search_param_invalid_vectors: searching with "
@ -240,7 +184,7 @@ class TestCollectionSearchInvalid(TestcaseBase):
collection_w.search(invalid_vectors, default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
check_items={"err_code": 999,
"err_msg": "`search_data` value {} is illegal".format(invalid_vectors)})
@pytest.mark.tags(CaseLabel.L2)
@ -264,46 +208,24 @@ class TestCollectionSearchInvalid(TestcaseBase):
"err_msg": 'vector dimension mismatch'})
@pytest.mark.tags(CaseLabel.L2)
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
@pytest.mark.parametrize("invalid_field_name", ct.invalid_resource_names)
def test_search_param_invalid_field(self, invalid_field_name):
"""
target: test search with invalid parameter type
method: search with invalid field type
expected: raise exception and report the error
"""
if get_invalid_fields_type is None:
if invalid_field_name in [None, ""]:
pytest.skip("None is legal")
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_type
log.info("test_search_param_invalid_field_type: searching with invalid field: %s"
% invalid_search_field)
collection_w.load()
error = {"err_code": 1, "err_msg": f"`anns_field` value {get_invalid_fields_type} is illegal"}
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
error = {"err_code": 999, "err_msg": f"failed to create query plan: failed to get field schema by name"}
collection_w.search(vectors[:default_nq], invalid_field_name, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
"""
target: test search with invalid parameter values
method: search with invalid field value
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_value
log.info("test_search_param_invalid_field_value: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1100,
"err_msg": "failed to create query plan: failed to get field schema "
"by name: %s not found" % invalid_search_field})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason="issue 30356")
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
@ -446,7 +368,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_param_invalid_expr_type(self, get_invalid_expr_type):
@pytest.mark.parametrize("invalid_search_expr", ["'non_existing_field'==2", 1])
def test_search_param_invalid_expr_type(self, invalid_search_expr):
"""
target: test search with invalid parameter type
method: search with invalid search expressions
@ -454,17 +377,15 @@ class TestCollectionSearchInvalid(TestcaseBase):
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
collection_w.load()
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_type
log.info("test_search_param_invalid_expr_type: searching with "
"invalid expr: {}".format(invalid_search_expr))
error = {"err_code": 999, "err_msg": "failed to create query plan: cannot parse expression"}
if invalid_search_expr == 1:
error = {"err_code": 999, "err_msg": "The type of expr must be string"}
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The type of expr must be string ,"
"but {} is given".format(type(invalid_search_expr))})
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("expression", cf.gen_field_compare_expressions())
@ -497,12 +418,13 @@ class TestCollectionSearchInvalid(TestcaseBase):
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
check_items={"err_code": 999,
"err_msg": "failed to create query plan: "
"cannot parse expression: %s" % expression})
@pytest.mark.tags(CaseLabel.L2)
def test_search_param_invalid_expr_value(self, get_invalid_expr_value):
@pytest.mark.parametrize("invalid_expr_value", ["string", 1.2, None, [1, 2, 3]])
def test_search_param_invalid_expr_value(self, invalid_expr_value):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
@ -511,19 +433,20 @@ class TestCollectionSearchInvalid(TestcaseBase):
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_value
invalid_search_expr = f"{ct.default_int64_field_name}=={invalid_expr_value}"
log.info("test_search_param_invalid_expr_value: searching with "
"invalid expr: %s" % invalid_search_expr)
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 65535,
check_items={"err_code": 999,
"err_msg": "failed to create query plan: cannot parse expression: %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L2)
def test_search_param_invalid_expr_bool(self, get_invalid_expr_bool_value):
@pytest.mark.parametrize("invalid_expr_bool_value", [1.2, 10, "string"])
def test_search_param_invalid_expr_bool(self, invalid_expr_bool_value):
"""
target: test search with invalid parameter values
method: search with invalid bool search expressions
@ -531,8 +454,9 @@ class TestCollectionSearchInvalid(TestcaseBase):
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, is_all_data_type=True)[0]
collection_w.load()
# 2 search with invalid bool expr
invalid_search_expr_bool = f"{default_bool_field_name} == {get_invalid_expr_bool_value}"
invalid_search_expr_bool = f"{default_bool_field_name} == {invalid_expr_bool_value}"
log.info("test_search_param_invalid_expr_bool: searching with "
"invalid expr: %s" % invalid_search_expr_bool)
collection_w.search(vectors[:default_nq], default_search_field,
@ -634,7 +558,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
default_search_params, nb, expression)
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_invalid_type(self, get_invalid_partition):
@pytest.mark.parametrize("invalid_partitions", [[None], [1, 2]])
def test_search_partitions_invalid_type(self, invalid_partitions):
"""
target: test search invalid partition
method: search with invalid partition type
@ -643,17 +568,33 @@ class TestCollectionSearchInvalid(TestcaseBase):
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
partition_name = get_invalid_partition
err_msg = "`partition_name_array` value {} is illegal".format(
partition_name)
err_msg = "`partition_name_array` value {} is illegal".format(invalid_partitions)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, partition_name,
default_limit, default_search_exp, invalid_partitions,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
check_items={"err_code": 999,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields):
@pytest.mark.parametrize("invalid_partitions", [["non_existing"], [ct.default_partition_name, "non_existing"]])
def test_search_partitions_non_existing(self, invalid_partitions):
"""
target: test search invalid partition
method: search with invalid partition type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
err_msg = "partition name non_existing not found"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, invalid_partitions,
check_task=CheckTasks.err_res,
check_items={"err_code": 999, "err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("invalid_output_fields", [[None], [1, 2], ct.default_int64_field_name])
def test_search_with_output_fields_invalid_type(self, invalid_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
@ -662,15 +603,31 @@ class TestCollectionSearchInvalid(TestcaseBase):
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
log.info("test_search_with_output_fields_invalid_type: Searching collection %s" %
collection_w.name)
output_fields = get_invalid_output_fields
err_msg = "`output_fields` value {} is illegal".format(output_fields)
err_msg = f"`output_fields` value {invalid_output_fields} is illegal"
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
default_search_exp, output_fields=invalid_output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
check_items={ct.err_code: 999,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("non_exiting_output_fields", [["non_exiting"], [ct.default_int64_field_name, "non_exiting"]])
def test_search_with_output_fields_non_existing(self, non_exiting_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
err_msg = f"field non_exiting not exist"
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=non_exiting_output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 999,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L1)
@ -1053,7 +1010,7 @@ class TestCollectionSearchInvalid(TestcaseBase):
"err_msg": f"field {output_fields[-1]} not exist"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("ignore_growing", ct.get_invalid_strs[2:8])
@pytest.mark.parametrize("ignore_growing", [1.2, "string", [True]])
def test_search_invalid_ignore_growing_param(self, ignore_growing):
"""
target: test search ignoring growing segment
@ -1062,24 +1019,21 @@ class TestCollectionSearchInvalid(TestcaseBase):
3. search with param ignore_growing invalid
expected: raise exception
"""
if ignore_growing is None or ignore_growing == "":
pytest.skip("number is valid")
# 1. create a collection
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. insert data again
data = cf.gen_default_dataframe_data(start=10000)
data = cf.gen_default_dataframe_data(start=100)
collection_w.insert(data)
# 3. search with param ignore_growing=True
search_params = {"metric_type": "L2", "params": {
"nprobe": 10}, "ignore_growing": ignore_growing}
search_params = {"metric_type": "L2", "params": {"nprobe": 10}, "ignore_growing": ignore_growing}
vector = [[random.random() for _ in range(default_dim)]
for _ in range(nq)]
collection_w.search(vector[:default_nq], default_search_field, search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
check_items={"err_code": 999,
"err_msg": "parse search growing failed"})
@pytest.mark.tags(CaseLabel.L2)
@ -1125,7 +1079,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="issue 30365")
def test_range_search_invalid_radius(self, get_invalid_range_search_paras):
@pytest.mark.parametrize("invalid_radius", [[0.1], "str"])
def test_range_search_invalid_radius(self, invalid_radius):
"""
target: test range search with invalid radius
method: range search with invalid radius
@ -1136,19 +1091,18 @@ class TestCollectionSearchInvalid(TestcaseBase):
# 2. range search
log.info("test_range_search_invalid_radius: Range searching collection %s" %
collection_w.name)
radius = get_invalid_range_search_paras
range_search_params = {"metric_type": "L2",
"params": {"nprobe": 10, "radius": radius, "range_filter": 0}}
"params": {"nprobe": 10, "radius": invalid_radius, "range_filter": 0}}
collection_w.search(vectors[:default_nq], default_search_field,
range_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "type must be number"})
check_items={"err_code": 999, "err_msg": "type must be number"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="issue 30365")
def test_range_search_invalid_range_filter(self, get_invalid_range_search_paras):
@pytest.mark.parametrize("invalid_range_filter", [[0.1], "str"])
def test_range_search_invalid_range_filter(self, invalid_range_filter):
"""
target: test range search with invalid range_filter
method: range search with invalid range_filter
@ -1164,15 +1118,13 @@ class TestCollectionSearchInvalid(TestcaseBase):
# 2. range search
log.info("test_range_search_invalid_range_filter: Range searching collection %s" %
collection_w.name)
range_filter = get_invalid_range_search_paras
range_search_params = {"metric_type": "L2",
"params": {"nprobe": 10, "radius": 1, "range_filter": range_filter}}
"params": {"nprobe": 10, "radius": 1, "range_filter": invalid_range_filter}}
collection_w.search(vectors[:default_nq], default_search_field,
range_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "type must be number"})
check_items={"err_code": 999, "err_msg": "type must be number"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason="issue 30365")
@ -1408,7 +1360,7 @@ class TestCollectionSearch(TestcaseBase):
"""
target: test search without specify metric type
method: create connection, collection, insert and search
expected: 1. search successfully with limit(topK)
expected: 1. search successfully with limit(topK)
"""
nq = 2
dim = 32
@ -1434,7 +1386,7 @@ class TestCollectionSearch(TestcaseBase):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: 1. search successfully with limit(topK)
expected: 1. search successfully with limit(topK)
"""
nq = 2
dim = 32
@ -6556,7 +6508,7 @@ class TestSearchDiskann(TestcaseBase):
"""
target: test delete after creating index
method: 1.create collection , insert data, primary_field is int field
2.create diskann index
2.create diskann index
3.search with invalid params, where topk <=20, search list [topk, 2147483647]
expected: search report an error
"""
@ -6588,7 +6540,7 @@ class TestSearchDiskann(TestcaseBase):
"""
target: test delete after creating index
method: 1.create collection , insert data, primary_field is int field
2.create diskann index
2.create diskann index
3.search with invalid params, [k, 200] when k <= 20
expected: search report an error
"""
@ -6617,7 +6569,7 @@ class TestSearchDiskann(TestcaseBase):
"""
target: test delete after creating index
method: 1.create collection , insert data, primary_field is string field
2.create diskann index
2.create diskann index
3.search with invalid metric type
expected: search successfully
"""
@ -6655,8 +6607,8 @@ class TestSearchDiskann(TestcaseBase):
def test_search_with_delete_data(self, _async):
"""
target: test delete after creating index
method: 1.create collection , insert data,
2.create diskann index
method: 1.create collection , insert data,
2.create diskann index
3.delete data, the search
expected: assert index and deleted id not in search result
"""
@ -9628,7 +9580,7 @@ class TestCollectionSearchJSON(TestcaseBase):
"""
target: test search case with default json expression
method: create connection, collection, insert and search
expected: 1. search successfully with limit(topK)
expected: 1. search successfully with limit(topK)
"""
# 1. initialize with data
dim = 64
@ -12509,7 +12461,7 @@ class TestCollectionHybridSearchValid(TestcaseBase):
"expr": "int64 > 0"}
req = AnnSearchRequest(**search_param)
req_list.append(req)
# 4. hybrid search
# 4. hybrid search
res = collection_w.hybrid_search(req_list, WeightedRanker(*weights), 10)[0]
is_sorted_descend = lambda lst: all(lst[i] >= lst[i + 1] for i in range(len(lst) - 1))
for i in range(nq):
@ -12541,7 +12493,7 @@ class TestCollectionHybridSearchValid(TestcaseBase):
"expr": "int64 > 0"}
req = AnnSearchRequest(**search_param)
req_list.append(req)
# 4. hybrid search
# 4. hybrid search
res = collection_w.hybrid_search(req_list, WeightedRanker(*weights), 10)[0]
is_sorted_descend = lambda lst: all(lst[i] >= lst[i+1] for i in range(len(lst)-1))
for i in range(nq):

View File

@ -34,22 +34,6 @@ exp_schema = "schema"
class TestUtilityParams(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if isinstance(request.param, str):
pytest.skip("string is valid type for metric")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_value(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if not isinstance(request.param, str):
pytest.skip("Skip invalid type for metric")
yield request.param
@pytest.fixture(scope="function", params=["JACCARD", "Superstructure", "Substructure"])
def get_not_support_metric(self, request):
yield request.param
@ -58,20 +42,11 @@ class TestUtilityParams(TestcaseBase):
def get_support_metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition_names(self, request):
if isinstance(request.param, list):
if len(request.param) == 0:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_not_string)
def get_invalid_type_collection_name(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_not_string_value)
@pytest.fixture(scope="function", params=ct.invalid_resource_names)
def get_invalid_value_collection_name(self, request):
yield request.param
@ -157,32 +132,34 @@ class TestUtilityParams(TestcaseBase):
check_items={ct.err_code: 0, ct.err_msg: "should create connect"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_invalid_name(self, get_invalid_collection_name):
@pytest.mark.parametrize("invalid_name", ct.invalid_resource_names)
def test_index_process_invalid_name(self, invalid_name):
"""
target: test building_process
method: input invalid name
expected: raise exception
"""
pass
# self._connect() c_name = get_invalid_collection_name ut = ApiUtilityWrapper() if isinstance(c_name,
# str) and c_name: ex, _ = ut.index_building_progress(c_name, check_items={ct.err_code: 1, ct.err_msg:
# "Invalid collection name"})
self._connect()
error = {ct.err_code: 999, ct.err_msg: f"Invalid collection name: {invalid_name}"}
if invalid_name in [None, "", " "]:
error = {ct.err_code: 999, ct.err_msg: "collection name should not be empty"}
self.utility_wrap.index_building_progress(collection_name=invalid_name,
check_task=CheckTasks.err_res, check_items=error)
# TODO: not support index name
@pytest.mark.tags(CaseLabel.L1)
def _test_index_process_invalid_index_name(self, get_invalid_index_name):
@pytest.mark.parametrize("invalid_index_name", ct.invalid_resource_names)
def test_index_process_invalid_index_name(self, invalid_index_name):
"""
target: test building_process
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.index_building_progress(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
collection_w = self.init_collection_wrap()
error = {ct.err_code: 999, ct.err_msg: "index not found"}
self.utility_wrap.index_building_progress(collection_name=collection_w.name, index_name=invalid_index_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_wait_index_invalid_name(self, get_invalid_collection_name):
@ -249,16 +226,19 @@ class TestUtilityParams(TestcaseBase):
self.utility_wrap.loading_progress("not_existed_name", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_loading_progress_invalid_partition_names(self, get_invalid_partition_names):
@pytest.mark.parametrize("partition_name", ct.invalid_resource_names)
def test_loading_progress_invalid_partition_names(self, partition_name):
"""
target: test loading progress with invalid partition names
method: input invalid partition names
expected: raise an exception
"""
collection_w = self.init_collection_general(prefix)[0]
partition_names = get_invalid_partition_names
err_msg = {ct.err_code: 0, ct.err_msg: "`partition_name_array` value {} is illegal".format(partition_names)}
collection_w = self.init_collection_general(prefix, nb=10)[0]
partition_names = [partition_name]
collection_w.load()
err_msg = {ct.err_code: 999, ct.err_msg: "partition not found"}
if partition_name is None:
err_msg = {ct.err_code: 999, ct.err_msg: "is illegal"}
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@ -270,8 +250,7 @@ class TestUtilityParams(TestcaseBase):
method: input all or part not existed partition names
expected: raise exception
"""
collection_w = self.init_collection_general(prefix)[0]
log.debug(collection_w.num_entities)
collection_w = self.init_collection_general(prefix, nb=10)[0]
collection_w.load()
err_msg = {ct.err_code: 15, ct.err_msg: f"partition not found"}
self.utility_wrap.loading_progress(collection_w.name, partition_names,
@ -394,138 +373,6 @@ class TestUtilityParams(TestcaseBase):
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_invalid_metric_type(self, get_support_metric_field, get_invalid_metric_type):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_type
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "params value {{'metric': {}}} "
"is illegal".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_invalid_metric_value(self, get_support_metric_field, get_invalid_metric_value):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_value
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_not_support_metric(self, get_support_metric_field, get_not_support_metric):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_not_support_metric
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_invalid_using(self, get_support_metric_field):
"""
target: test calculated distance with invalid using
method: input invalid using
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
using = "empty"
self.utility_wrap.calc_distance(op_l, op_r, params, using=using,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_not_match_dim(self):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type and value
expected: raise exception
"""
self._connect()
dim = 129
vector_l = cf.gen_vectors(default_nb, default_dim)
vector_r = cf.gen_vectors(default_nb, dim)
op_l = {"float_vectors": vector_l}
op_r = {"float_vectors": vector_r}
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Cannot calculate distance between "
"vectors with different dimension"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_collection_before_load(self, get_support_metric_field):
"""
target: test calculated distance when entities is not ready
method: calculate distance before load
expected: raise exception
"""
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb,
is_index=True)
middle = len(insert_ids) // 2
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection {} was not "
"loaded into memory)".format(collection_w.name)})
@pytest.mark.tags(CaseLabel.L1)
def test_rename_collection_old_invalid_type(self, get_invalid_type_collection_name):
"""
@ -539,7 +386,7 @@ class TestUtilityParams(TestcaseBase):
new_collection_name = cf.gen_unique_str(prefix)
self.utility_wrap.rename_collection(old_collection_name, new_collection_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
check_items={"err_code": 999,
"err_msg": "`collection_name` value {} is illegal".format(
old_collection_name)})
@ -554,10 +401,12 @@ class TestUtilityParams(TestcaseBase):
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix)
old_collection_name = get_invalid_value_collection_name
new_collection_name = cf.gen_unique_str(prefix)
error = {"err_code": 4, "err_msg": "collection not found"}
if old_collection_name in [None, ""]:
error = {"err_code": 999, "err_msg": "is illegal"}
self.utility_wrap.rename_collection(old_collection_name, new_collection_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 4,
"err_msg": "collection not found"})
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_rename_collection_new_invalid_type(self, get_invalid_type_collection_name):
@ -1014,7 +863,7 @@ class TestUtilityBase(TestcaseBase):
method: insert and flush data, call loading_progress after release
expected: return successfully with 0%
"""
collection_w = self.init_collection_general(prefix, insert_data=True)[0]
collection_w = self.init_collection_general(prefix, insert_data=True, nb=100)[0]
collection_w.release()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
exp_res = {loading_progress: '0%', num_loaded_partitions: 0, not_loaded_partitions: ['_default']}
@ -1181,355 +1030,6 @@ class TestUtilityBase(TestcaseBase):
assert not self.utility_wrap.has_collection(c_name)[0]
sleep(1)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_default(self):
"""
target: test calculated distance with default params
method: calculated distance between two random vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors")
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_default_sqrt(self, metric_field, metric):
"""
target: test calculated distance with default param
method: calculated distance with default sqrt
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default sqrt")
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_default_metric(self, sqrt):
"""
target: test calculated distance with default param
method: calculated distance with default metric
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default metric")
params = {"sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_binary_metric(self, metric_field, metric_binary):
"""
target: test calculate distance with binary vectors
method: calculate distance between binary vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
nb = 10
raw_vectors_l, vectors_l = cf.gen_binary_vectors(nb, default_dim)
raw_vectors_r, vectors_r = cf.gen_binary_vectors(nb, default_dim)
op_l = {"bin_vectors": vectors_l}
op_r = {"bin_vectors": vectors_r}
log.info("Calculating distance for binary vectors")
params = {metric_field: metric_binary}
vectors_l = raw_vectors_l
vectors_r = raw_vectors_r
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric_binary})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_from_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: both left and right vectors are from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
log.info("Creating vectors from collections for distance calculation")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
log.info("Creating vectors for entities")
params = {metric_field: metric, "sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_from_collections(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from collections
method: calculated distance between entities from two collections
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
prefix_1 = "utility_distance"
log.info("Creating two collections")
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
collection_w_1, vectors_1, _, insert_ids_1, _ = self.init_collection_general(prefix_1, True, nb)
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors_1[0].loc[:, default_field_name]
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids, "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids_1, "collection": collection_w_1.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance for entities from two collections")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_left_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set left vectors as random vectors, right vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = cf.gen_vectors(nb, default_dim)
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
op_l = {"float_vectors": vectors_l}
log.info("Extracting entities from collections for distance calculating")
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between vectors and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_right_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set right vectors as random vectors, left vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = cf.gen_vectors(nb, default_dim)
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between right vector and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_from_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from one partition entities
method: both left and right vectors are from partition
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
log.info("Extracting entities from partitions for distance calculating")
vectors_l = vectors[i].loc[:, default_field_name]
vectors_r = vectors[i].loc[:, default_field_name]
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculating distance between entities from one partition")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_from_partitions(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from partitions
method: calculate distance between entities from two partitions
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors[1].loc[:, default_field_name]
log.info("Extract entities from two partitions for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"partition": partitions[0].name, "field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"partition": partitions[1].name, "field": default_field_name}
log.info("Calculate distance between entities from two partitions")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_left_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set left vectors as random vectors, right vectors are entities
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_l = cf.gen_vectors(nb // 2, default_dim)
log.info("Extract entities from collection as right vectors")
op_l = {"float_vectors": vectors_l}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
log.info("Calculate distance between vector and entities")
for i in range(len(partitions)):
vectors_r = vectors[i].loc[:, default_field_name]
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="calc_distance interface is no longer supported")
def test_calc_distance_right_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set right vectors as random vectors, left vectors are entities
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_r = cf.gen_vectors(nb // 2, default_dim)
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
vectors_l = vectors[i].loc[:, default_field_name]
log.info("Extract entities from partition %d as left vector" % i)
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculate distance between vector and entities from partition %d" % i)
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L1)
def test_rename_collection(self):
"""
@ -1980,7 +1480,6 @@ class TestUtilityAdvanced(TestcaseBase):
check_items={ct.err_code: 1, ct.err_msg: "destination node not found in the same replica"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue: https://github.com/milvus-io/milvus/issues/19441")
def test_load_balance_with_one_sealed_segment_id_not_exist(self):
"""
target: test load balance of collection
@ -2015,7 +1514,7 @@ class TestUtilityAdvanced(TestcaseBase):
# load balance
self.utility_wrap.load_balance(collection_w.name, src_node_id, dst_node_ids, sealed_segment_ids,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "not found in source node"})
check_items={ct.err_code: 999, ct.err_msg: "not found in source node"})
@pytest.mark.tags(CaseLabel.L1)
def test_load_balance_with_all_sealed_segment_id_not_exist(self):
@ -4551,15 +4050,6 @@ class TestUtilityNegativeRbac(TestcaseBase):
super().teardown_method(method)
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_non_string(self, request):
"""
get invalid string without None
"""
if isinstance(request.param, str):
pytest.skip("skip string")
yield request.param
@pytest.mark.tags(CaseLabel.RBAC)
@pytest.mark.parametrize("name", ["longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong"
"longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong"