mirror of https://github.com/milvus-io/milvus.git
update pymilvus version and modify Error code & msg improvement (#27609)
Signed-off-by: nico <cheng.yuan@zilliz.com>pull/27713/head
parent
ff19cb2719
commit
76bb0a7bd8
|
@ -12,7 +12,7 @@ allure-pytest==2.7.0
|
|||
pytest-print==0.2.1
|
||||
pytest-level==0.1.1
|
||||
pytest-xdist==2.5.0
|
||||
pymilvus==2.3.0.post1.dev13
|
||||
pymilvus==2.3.1.post1.dev6
|
||||
pytest-rerunfailures==9.1.1
|
||||
git+https://github.com/Projectplace/pytest-tags
|
||||
ndg-httpsclient
|
||||
|
|
|
@ -424,8 +424,9 @@ class TestAliasOperationInvalid(TestcaseBase):
|
|||
collection_2 = self.init_collection_wrap(name=c_2_name, schema=default_schema,
|
||||
check_task=CheckTasks.check_collection_property,
|
||||
check_items={exp_name: c_2_name, exp_schema: default_schema})
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: "Create alias failed: duplicate collection alias"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"alias exists and already aliased to another collection, alias: {alias_a_name}, "
|
||||
f"collection: {c_1_name}, other collection: {c_2_name}"}
|
||||
self.utility_wrap.create_alias(collection_2.name, alias_a_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
@ -453,8 +454,8 @@ class TestAliasOperationInvalid(TestcaseBase):
|
|||
# collection_w.create_alias(alias_name)
|
||||
|
||||
alias_not_exist_name = cf.gen_unique_str(prefix)
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: "Alter alias failed: alias does not exist"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"failed to alter alias, alias does not exist: {alias_not_exist_name}"}
|
||||
self.utility_wrap.alter_alias(collection_w.name, alias_not_exist_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
|
|
@ -114,7 +114,11 @@ class TestCollectionParams(TestcaseBase):
|
|||
expected: raise exception
|
||||
"""
|
||||
self._connect()
|
||||
error = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
|
||||
error1 = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
|
||||
error2 = {ct.err_code: 1100, ct.err_msg: "Invalid collection name: 1ns_. the first character of a"
|
||||
" collection name must be an underscore or letter: invalid"
|
||||
" parameter".format(name)}
|
||||
error = error1 if name not in ["1ns_", "qw$_o90"] else error2
|
||||
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
|
@ -315,7 +319,7 @@ class TestCollectionParams(TestcaseBase):
|
|||
"""
|
||||
self._connect()
|
||||
fields = get_invalid_type_fields
|
||||
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
|
||||
error = {ct.err_code: 1, ct.err_msg: "The fields of schema must be type list."}
|
||||
self.collection_schema_wrap.init_collection_schema(fields=fields,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -374,7 +378,7 @@ class TestCollectionParams(TestcaseBase):
|
|||
c_name = cf.gen_unique_str(prefix)
|
||||
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
|
||||
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
|
||||
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "the partition key field must not be primary field"}
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -820,7 +824,7 @@ class TestCollectionParams(TestcaseBase):
|
|||
c_name = cf.gen_unique_str(prefix)
|
||||
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
|
||||
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
|
||||
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "strconv.ParseInt: parsing \"[]\": invalid syntax"}
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -1088,6 +1092,7 @@ class TestCollectionOperation(TestcaseBase):
|
|||
assert self.utility_wrap.has_collection(c_name)[0]
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("already has same senario")
|
||||
def test_collection_all_datatype_fields(self):
|
||||
"""
|
||||
target: test create collection with all dataType fields
|
||||
|
@ -1738,7 +1743,7 @@ class TestCreateCollectionInvalid(TestcaseBase):
|
|||
field_name_tmp = gen_unique_str("field_name")
|
||||
field_schema_temp = cf.gen_int64_field(field_name_tmp)
|
||||
field_schema_list.append(field_schema_temp)
|
||||
error = {ct.err_code: 1, ct.err_msg: "'maximum field\'s number should be limited to 64'"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "maximum field's number should be limited to 64"}
|
||||
schema, _ = self.collection_schema_wrap.init_collection_schema(fields=field_schema_list)
|
||||
self.init_collection_wrap(name=c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -2219,8 +2224,8 @@ class TestLoadCollection(TestcaseBase):
|
|||
c_name = cf.gen_unique_str()
|
||||
collection_wr = self.init_collection_wrap(name=c_name)
|
||||
collection_wr.drop()
|
||||
error = {ct.err_code: 4,
|
||||
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: "collection= : collection not found"}
|
||||
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -2234,8 +2239,8 @@ class TestLoadCollection(TestcaseBase):
|
|||
c_name = cf.gen_unique_str()
|
||||
collection_wr = self.init_collection_wrap(name=c_name)
|
||||
collection_wr.drop()
|
||||
error = {ct.err_code: 4,
|
||||
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: "collection= : collection not found"}
|
||||
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -2433,7 +2438,12 @@ class TestLoadCollection(TestcaseBase):
|
|||
self.init_partition_wrap(collection_w, partition2)
|
||||
collection_w.load()
|
||||
partition_w.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65538,
|
||||
ct.err_msg: 'failed to query: attempt #0: failed to search/query delegator 1'
|
||||
' for channel by-dev-rootcoord-dml_0_444857573607352620v0: fail '
|
||||
'to Query, QueryNode ID = 1, reason=partition=[444857573607352660]:'
|
||||
' partition not loaded: attempt #1: no available shard delegator '
|
||||
'found: service unavailable'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition1],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
collection_w.release()
|
||||
|
@ -2458,7 +2468,12 @@ class TestLoadCollection(TestcaseBase):
|
|||
partition_w1.release()
|
||||
collection_w.release()
|
||||
partition_w1.load()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65538,
|
||||
ct.err_msg: 'failed to query: attempt #0: failed to search/query delegator '
|
||||
'1 for channel by-dev-rootcoord-dml_14_444857573607352608v0: fail'
|
||||
' to Query, QueryNode ID = 1, reason=partition=[444857573607352653]:'
|
||||
' partition not loaded: attempt #1: no available shard delegator '
|
||||
'found: service unavailable'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition2],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
partition_w2.load()
|
||||
|
@ -2516,7 +2531,10 @@ class TestLoadCollection(TestcaseBase):
|
|||
partition_w1.release()
|
||||
partition_w1.drop()
|
||||
partition_w2.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65538, ct.err_msg: 'failed to query: attempt #0: failed to search/query delegator 1 '
|
||||
'for channel by-dev-rootcoord-xx: fail to Query, QueryNode ID = 1,'
|
||||
' reason=partition=[ ]: partition not loaded: attempt #1: no '
|
||||
'available shard delegator found: service unavailable'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition2],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
collection_w.load()
|
||||
|
@ -2595,8 +2613,8 @@ class TestLoadCollection(TestcaseBase):
|
|||
collection_wr.load()
|
||||
collection_wr.release()
|
||||
collection_wr.drop()
|
||||
error = {ct.err_code: 4,
|
||||
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: "collection=444857573607352784: collection not found"}
|
||||
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
|
||||
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -2613,8 +2631,8 @@ class TestLoadCollection(TestcaseBase):
|
|||
collection_wr.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
collection_wr.load()
|
||||
collection_wr.drop()
|
||||
error = {ct.err_code: 4,
|
||||
ct.err_msg: "can't find collection"}
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: "collection=444857573607351711: collection not found"}
|
||||
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
@ -2726,7 +2744,8 @@ class TestLoadCollection(TestcaseBase):
|
|||
assert collection_w.num_entities == ct.default_nb
|
||||
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
|
||||
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "failed to load collection: failed to spawn replica for collection: nodes not enough"}
|
||||
collection_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||
|
@ -2994,8 +3013,9 @@ class TestLoadCollection(TestcaseBase):
|
|||
assert collection_w.num_entities == ct.default_nb
|
||||
|
||||
collection_w.get_replicas(check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 15,
|
||||
"err_msg": "collection not found, maybe not loaded"})
|
||||
check_items={"err_code": 400,
|
||||
"err_msg": "failed to get replicas by collection: "
|
||||
"replica=444857573607352187: replica not found"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L3)
|
||||
def test_count_multi_replicas(self):
|
||||
|
@ -3054,13 +3074,15 @@ class TestDescribeCollection(TestcaseBase):
|
|||
description = \
|
||||
{'collection_name': c_name, 'auto_id': False, 'num_shards': ct.default_shards_num, 'description': '',
|
||||
'fields': [{'field_id': 100, 'name': 'int64', 'description': '', 'type': 5,
|
||||
'params': {}, 'is_primary': True},
|
||||
{'field_id': 101, 'name': 'float', 'description': '', 'type': 10, 'params': {}},
|
||||
'params': {}, 'is_primary': True, 'element_type': 0,},
|
||||
{'field_id': 101, 'name': 'float', 'description': '', 'type': 10, 'params': {},
|
||||
'element_type': 0,},
|
||||
{'field_id': 102, 'name': 'varchar', 'description': '', 'type': 21,
|
||||
'params': {'max_length': 65535}},
|
||||
{'field_id': 103, 'name': 'json_field', 'description': '', 'type': 23, 'params': {}},
|
||||
'params': {'max_length': 65535}, 'element_type': 0,},
|
||||
{'field_id': 103, 'name': 'json_field', 'description': '', 'type': 23, 'params': {},
|
||||
'element_type': 0,},
|
||||
{'field_id': 104, 'name': 'float_vector', 'description': '', 'type': 101,
|
||||
'params': {'dim': 128}}],
|
||||
'params': {'dim': 128}, 'element_type': 0}],
|
||||
'aliases': [], 'consistency_level': 0, 'properties': [], 'num_partitions': 1}
|
||||
res = collection_w.describe()[0]
|
||||
del res['collection_id']
|
||||
|
@ -3090,7 +3112,9 @@ class TestReleaseAdvanced(TestcaseBase):
|
|||
search_res, _ = collection_wr.search(vectors, default_search_field, default_search_params,
|
||||
default_limit, _async=True)
|
||||
collection_wr.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'collection %s was not loaded into memory' % c_name}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "failed to search: attempt #0: fail to get shard leaders from"
|
||||
" QueryCoord: collection=444818512783071471: collection not"
|
||||
" loaded: unrecoverable error"}
|
||||
collection_wr.search(vectors, default_search_field, default_search_params, default_limit,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -3117,8 +3141,10 @@ class TestReleaseAdvanced(TestcaseBase):
|
|||
default_search_params, limit, default_search_exp,
|
||||
[par_name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "partition has been released"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard leaders "
|
||||
"from QueryCoord: collection=444857573607353390: collection "
|
||||
"not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
def test_release_indexed_collection_during_searching(self):
|
||||
|
@ -3139,7 +3165,9 @@ class TestReleaseAdvanced(TestcaseBase):
|
|||
default_search_params, limit, default_search_exp,
|
||||
[par_name], _async=True)
|
||||
collection_w.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'collection %s was not loaded into memory' % collection_w.name}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "failed to search: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444818512783071824: collection not "
|
||||
"loaded: unrecoverable error"}
|
||||
collection_w.search(vectors, default_search_field,
|
||||
default_search_params, limit, default_search_exp,
|
||||
[par_name],
|
||||
|
@ -3189,8 +3217,8 @@ class TestLoadPartition(TestcaseBase):
|
|||
# for metric_type in ct.binary_metrics:
|
||||
binary_index["metric_type"] = metric_type
|
||||
if binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in ct.structure_metrics:
|
||||
error = {ct.err_code: 1, ct.err_msg: 'Invalid metric_type: SUBSTRUCTURE, '
|
||||
'which does not match the index type: BIN_IVF_FLAT'}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "metric type not found or not supported, supported: [HAMMING JACCARD]"}
|
||||
collection_w.create_index(ct.default_binary_vec_field_name, binary_index,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
collection_w.create_index(ct.default_binary_vec_field_name, ct.default_bin_flat_index)
|
||||
|
@ -3371,7 +3399,11 @@ class TestLoadPartition(TestcaseBase):
|
|||
partition_w2 = self.init_partition_wrap(collection_w, partition2)
|
||||
partition_w1.load()
|
||||
partition_w1.load()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65538,
|
||||
ct.err_msg: 'failed to query: attempt #0: failed to search/query delegator 1 for'
|
||||
' channel by-dev-rootcoord-dml_10_444857573607353001v0: fail to Query, '
|
||||
'QueryNode ID = 1, reason=partition=[444857573607353015]: partition not '
|
||||
'loaded: attempt #1: no available shard delegator found: service unavailable'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition2],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
collection_w.load()
|
||||
|
@ -3426,7 +3458,9 @@ class TestLoadPartition(TestcaseBase):
|
|||
partition_w2 = self.init_partition_wrap(collection_w, partition2)
|
||||
partition_w1.load()
|
||||
collection_w.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "failed to query: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444818512783073123: collection not"
|
||||
" loaded: unrecoverable error"}
|
||||
collection_w.query(default_term_expr, partition_names=[partition1],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
partition_w1.load()
|
||||
|
@ -3465,7 +3499,9 @@ class TestLoadPartition(TestcaseBase):
|
|||
partition_w2 = self.init_partition_wrap(collection_w, partition2)
|
||||
partition_w1.load()
|
||||
partition_w1.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: 'failed to query: attempt #0: fail to get shard leaders from QueryCoord: '
|
||||
'collection=444857573607352292: collection not loaded: unrecoverable error'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition1],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
partition_w1.load()
|
||||
|
@ -3528,7 +3564,9 @@ class TestLoadPartition(TestcaseBase):
|
|||
partition_w1.load()
|
||||
partition_w1.release()
|
||||
partition_w2.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: 'failed to query: attempt #0: fail to get shard leaders from QueryCoord:'
|
||||
' collection=444857573607353795: collection not loaded: unrecoverable error'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition1, partition2],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
collection_w.load()
|
||||
|
@ -3571,7 +3609,7 @@ class TestLoadPartition(TestcaseBase):
|
|||
partition_w1.load()
|
||||
partition_w1.release()
|
||||
partition_w1.drop()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'name not found'}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f'partition name {partition1} not found'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition1, partition2],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
partition_w2.drop()
|
||||
|
@ -3654,7 +3692,10 @@ class TestLoadPartition(TestcaseBase):
|
|||
partition_w1.load()
|
||||
partition_w2.drop()
|
||||
partition_w1.release()
|
||||
error = {ct.err_code: 1, ct.err_msg: 'not loaded into memory'}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: 'failed to query: attempt #0: fail to get shard leaders from'
|
||||
' QueryCoord: collection=444857573607353891: collection not'
|
||||
' loaded: unrecoverable error'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition1],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
partition_w1.load()
|
||||
|
@ -3767,7 +3808,7 @@ class TestCollectionString(TestcaseBase):
|
|||
max_length = 100000
|
||||
string_field = cf.gen_string_field(max_length=max_length)
|
||||
schema = cf.gen_collection_schema([int_field, string_field, vec_field])
|
||||
error = {ct.err_code: 1, ct.err_msg: "invalid max_length: %s" % max_length}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "the maximum length specified for a VarChar should be in (0, 65535]"}
|
||||
self.collection_wrap.init_collection(name=c_name, schema=schema,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
|
|
@ -583,8 +583,8 @@ class TestDeleteOperation(TestcaseBase):
|
|||
collection_w = self.init_collection_general(prefix, nb=tmp_nb, insert_data=True)[0]
|
||||
|
||||
# raise exception
|
||||
error = {ct.err_code: 15,
|
||||
ct.err_msg: f"partitionID of partitionName:{ct.default_tag} can not be find"}
|
||||
error = {ct.err_code: 200,
|
||||
ct.err_msg: f"Failed to get partition id: partition={ct.default_tag}: partition not found"}
|
||||
collection_w.delete(tmp_expr, partition_name=ct.default_tag,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -678,8 +678,9 @@ class TestDeleteOperation(TestcaseBase):
|
|||
assert res.delete_count == 1
|
||||
|
||||
# query without loading and raise exception
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"collection {collection_w.name} was not loaded into memory"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "failed to query: attempt #0: fail to get shard leaders from QueryCoord:"
|
||||
" collection=444857573607556205: collection not loaded: unrecoverable error"}
|
||||
collection_w.query(expr=tmp_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
|
|
@ -86,7 +86,8 @@ class TestHighLevelApi(TestcaseBase):
|
|||
client = self._connect(enable_high_level_api=True)
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 1. create collection
|
||||
error = {ct.err_code: 1, ct.err_msg: f"The auto_id can only be specified on field with DataType.INT64"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"type param(max_length) should be specified for varChar "
|
||||
f"field of collection {collection_name}"}
|
||||
client_w.create_collection(client, collection_name, default_dim, id_type="string", auto_id=True,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -121,7 +122,8 @@ class TestHighLevelApi(TestcaseBase):
|
|||
client = self._connect(enable_high_level_api=True)
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 1. create collection
|
||||
error = {ct.err_code: 1, ct.err_msg: f"metric type not found or not supported, supported: [L2 IP COSINE]"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "metric type not found or not supported, supported: [L2 IP COSINE HAMMING JACCARD]"}
|
||||
client_w.create_collection(client, collection_name, default_dim, metric_type="invalid",
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
|
|
@ -109,10 +109,10 @@ class TestIndexParams(TestcaseBase):
|
|||
if not isinstance(index_params["index_type"], str):
|
||||
msg = "must be str"
|
||||
else:
|
||||
msg = "Invalid index_type"
|
||||
msg = "invalid index type"
|
||||
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: msg})
|
||||
check_items={ct.err_code: 65535, ct.err_msg: msg})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_index_type_not_supported(self):
|
||||
|
@ -212,7 +212,8 @@ class TestIndexOperation(TestcaseBase):
|
|||
c_name = cf.gen_unique_str(prefix)
|
||||
collection_w = self.init_collection_wrap(name=c_name)
|
||||
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
|
||||
error = {ct.err_code: 1, ct.err_msg: f"CreateIndex failed: index already exists"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "CreateIndex failed: at most one "
|
||||
"distinct index is allowed per field"}
|
||||
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -241,8 +242,9 @@ class TestIndexOperation(TestcaseBase):
|
|||
collection_w = self.init_collection_general(prefix, True, is_index=False)[0]
|
||||
collection_w.create_index(ct.default_int64_field_name, {})
|
||||
collection_w.load(check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "there is no vector index on collection, "
|
||||
"please create index firstly"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"there is no vector index on collection: {collection_w.name}, "
|
||||
f"please create index firstly"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_index_collection_empty(self):
|
||||
|
@ -365,6 +367,7 @@ class TestIndexOperation(TestcaseBase):
|
|||
pass
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_index_drop_index(self):
|
||||
"""
|
||||
target: test index.drop
|
||||
|
@ -398,6 +401,7 @@ class TestIndexAdvanced(TestcaseBase):
|
|||
""" Test case of index interface """
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_index_drop_multi_collections(self):
|
||||
"""
|
||||
target: test index.drop
|
||||
|
@ -688,6 +692,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
ct.err_msg: "CreateIndex failed: creating multiple indexes on same field is not supported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_create_index_repeatedly_new(self):
|
||||
"""
|
||||
target: check if index can be created repeatedly, with the different create_index params
|
||||
|
@ -857,6 +862,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
ct.err_msg: "CreateIndex failed: creating multiple indexes on same field is not supported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_create_different_index_repeatedly_ip(self):
|
||||
"""
|
||||
target: check if index can be created repeatedly, with the different create_index params
|
||||
|
@ -883,6 +889,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index(self, get_simple_index):
|
||||
"""
|
||||
target: test drop index interface
|
||||
|
@ -902,6 +909,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
# TODO #7372
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index_repeatedly(self, get_simple_index):
|
||||
"""
|
||||
target: test drop index repeatedly
|
||||
|
@ -936,6 +944,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
check_items={ct.err_code: 0, ct.err_msg: "should create connect first."})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_create_drop_index_repeatedly(self, get_simple_index):
|
||||
"""
|
||||
target: test create / drop index repeatedly, use the same index params
|
||||
|
@ -953,6 +962,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index_ip(self, get_simple_index):
|
||||
"""
|
||||
target: test drop index interface
|
||||
|
@ -970,6 +980,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index_repeatedly_ip(self, get_simple_index):
|
||||
"""
|
||||
target: test drop index repeatedly
|
||||
|
@ -1036,6 +1047,7 @@ class TestNewIndexBase(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 1
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index_collection_not_create_ip(self):
|
||||
"""
|
||||
target: test drop index interface when index not created
|
||||
|
@ -1149,8 +1161,9 @@ class TestNewIndexBinary(TestcaseBase):
|
|||
binary_index_params = {'index_type': 'BIN_IVF_FLAT', 'metric_type': 'L2', 'params': {'nlist': 64}}
|
||||
collection_w.create_index(default_binary_vec_field_name, binary_index_params,
|
||||
index_name=binary_field_name, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "Invalid metric_type: L2, which does not match the index type: BIN_IVF_FLAT"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "metric type not found or not supported, supported: "
|
||||
"[HAMMING JACCARD SUBSTRUCTURE SUPERSTRUCTURE]"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("metric_type", ["L2", "IP", "COSINE", "JACCARD", "HAMMING"])
|
||||
|
@ -1186,6 +1199,7 @@ class TestNewIndexBinary(TestcaseBase):
|
|||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index(self):
|
||||
"""
|
||||
target: test drop index interface
|
||||
|
@ -1203,6 +1217,7 @@ class TestNewIndexBinary(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index_partition(self):
|
||||
"""
|
||||
target: test drop index interface
|
||||
|
@ -1220,10 +1235,10 @@ class TestNewIndexBinary(TestcaseBase):
|
|||
assert len(ins_res.primary_keys) == len(df)
|
||||
collection_w.create_index(default_binary_vec_field_name, default_binary_index_params,
|
||||
index_name=binary_field_name)
|
||||
assert collection_w.has_index(index_name=binary_field_name)[0] == True
|
||||
assert collection_w.has_index(index_name=binary_field_name)[0] is True
|
||||
assert len(collection_w.indexes) == 1
|
||||
collection_w.drop_index(index_name=binary_field_name)
|
||||
assert collection_w.has_index(index_name=binary_field_name)[0] == False
|
||||
assert collection_w.has_index(index_name=binary_field_name)[0] is False
|
||||
assert len(collection_w.indexes) == 0
|
||||
|
||||
|
||||
|
@ -1298,7 +1313,7 @@ class TestIndexInvalid(TestcaseBase):
|
|||
"loaded, please release it first"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("n_trees", [-1, 1025, 'a', {34}])
|
||||
@pytest.mark.parametrize("n_trees", [-1, 1025, 'a'])
|
||||
def test_annoy_index_with_invalid_params(self, n_trees):
|
||||
"""
|
||||
target: test create index with invalid params
|
||||
|
@ -1310,8 +1325,8 @@ class TestIndexInvalid(TestcaseBase):
|
|||
index_annoy = {"index_type": "ANNOY", "params": {"n_trees": n_trees}, "metric_type": "L2"}
|
||||
collection_w.create_index("float_vector", index_annoy,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "invalid index params"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "invalid index type: ANNOY"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_create_index_json(self):
|
||||
|
@ -1362,6 +1377,7 @@ class TestNewIndexAsync(TestcaseBase):
|
|||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
# @pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_create_index_drop(self, _async):
|
||||
"""
|
||||
target: test create index interface
|
||||
|
@ -1565,6 +1581,7 @@ class TestIndexString(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 2
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_index_with_string_field(self):
|
||||
"""
|
||||
target: test drop index with string field
|
||||
|
@ -1583,6 +1600,7 @@ class TestIndexString(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_collection_drop_index_with_string(self):
|
||||
"""
|
||||
target: test drop index with string field
|
||||
|
@ -1599,6 +1617,7 @@ class TestIndexString(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_index_with_string_field_empty(self):
|
||||
"""
|
||||
target: test drop index with string field
|
||||
|
@ -1704,6 +1723,7 @@ class TestIndexDiskann(TestcaseBase):
|
|||
"limit": default_limit})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_create_diskann_index_drop_with_async(self, _async):
|
||||
"""
|
||||
target: test create index interface
|
||||
|
@ -1725,6 +1745,7 @@ class TestIndexDiskann(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_create_diskann_index_with_partition(self):
|
||||
"""
|
||||
target: test create index with diskann
|
||||
|
@ -1752,6 +1773,7 @@ class TestIndexDiskann(TestcaseBase):
|
|||
assert len(collection_w.indexes) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_diskann_index_with_normal(self):
|
||||
"""
|
||||
target: test drop diskann index normal
|
||||
|
@ -1772,6 +1794,7 @@ class TestIndexDiskann(TestcaseBase):
|
|||
assert collection_w.has_index(index_name=index_name1)[0] == False
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_diskann_index_and_create_again(self):
|
||||
"""
|
||||
target: test drop diskann index normal
|
||||
|
@ -1814,6 +1837,7 @@ class TestIndexDiskann(TestcaseBase):
|
|||
assert collection_w.has_index(index_name="c")[0] == True
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_drop_diskann_index_with_partition(self):
|
||||
"""
|
||||
target: test drop diskann index normal
|
||||
|
@ -1849,8 +1873,8 @@ class TestIndexDiskann(TestcaseBase):
|
|||
collection_w.insert(data=df)
|
||||
collection_w.create_index(default_binary_vec_field_name, ct.default_diskann_index, index_name=binary_field_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "field data type BinaryVector don't support the index build type DISKANN"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "float or float16 vector are only supported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_create_diskann_index_multithread(self):
|
||||
|
|
|
@ -177,8 +177,10 @@ class TestPartitionParams(TestcaseBase):
|
|||
partition_name = cf.gen_str_by_length(256)
|
||||
self.partition_wrap.init_partition(collection_w.collection, partition_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
|
||||
)
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"Invalid partition name: {partition_name}. "
|
||||
f"The length of a partition name must be less "
|
||||
f"than 255 characters."})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("partition_name", ["_Partiti0n", "pArt1_ti0n"])
|
||||
|
@ -208,10 +210,13 @@ class TestPartitionParams(TestcaseBase):
|
|||
collection_w = self.init_collection_wrap()
|
||||
|
||||
# create partition
|
||||
error1 = {ct.err_code: 1, ct.err_msg: f"`partition_name` value {partition_name} is illegal"}
|
||||
error2 = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. Partition name can"
|
||||
f" only contain numbers, letters and underscores."}
|
||||
error = error1 if partition_name in [None, [], 1, [1, "2", 3], (1,), {1: 1}] else error2
|
||||
self.partition_wrap.init_partition(collection_w.collection, partition_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
|
||||
)
|
||||
check_items=error)
|
||||
# TODO: need an error code issue #5144 and assert independently
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -370,7 +375,8 @@ class TestPartitionParams(TestcaseBase):
|
|||
assert partition_w.num_entities == ct.default_nb
|
||||
|
||||
# load with 2 replicas
|
||||
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "failed to load partitions: failed to spawn replica for collection: nodes not enough"}
|
||||
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
partition_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -499,7 +505,10 @@ class TestPartitionParams(TestcaseBase):
|
|||
anns_field=ct.default_float_vec_field_name,
|
||||
params={"nprobe": 32}, limit=1,
|
||||
check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "failed to search: attempt #0: fail to get shard "
|
||||
"leaders from QueryCoord: collection=4448185127832"
|
||||
"79866: collection not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
|
||||
|
@ -665,8 +674,9 @@ class TestPartitionOperations(TestcaseBase):
|
|||
self.partition_wrap.init_partition(
|
||||
collection_w.collection, p_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "maximum partition's number should be limit to 4096"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "partition number (4096) exceeds max configuration (4096), "
|
||||
"collection: {}".format(collection_w.name)})
|
||||
|
||||
# TODO: Try to verify load collection with a large number of partitions. #11651
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ class TestQueryParams(TestcaseBase):
|
|||
"""
|
||||
collection_w, entities = self.init_collection_general(prefix, insert_data=True, nb=10)[0:2]
|
||||
term_expr = f'{default_int_field_name} in {entities[:default_pos]}'
|
||||
error = {ct.err_code: 1, ct.err_msg: "unexpected token Identifier"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "cannot parse expression: int64 in .."}
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
|
@ -228,7 +228,7 @@ class TestQueryParams(TestcaseBase):
|
|||
expected: raise exception
|
||||
"""
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
error = {ct.err_code: 1, ct.err_msg: "Invalid expression!"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "cannot parse expression: 12-s, error: field s not exist"}
|
||||
exprs = ["12-s", "中文", "a", " "]
|
||||
for expr in exprs:
|
||||
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
@ -254,7 +254,8 @@ class TestQueryParams(TestcaseBase):
|
|||
"""
|
||||
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
|
||||
term_expr = 'field in [1, 2]'
|
||||
error = {ct.err_code: 1, ct.err_msg: "fieldName(field) not found"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "cannot parse expression: field in [1, 2], error: field field not exist"}
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -309,26 +310,23 @@ class TestQueryParams(TestcaseBase):
|
|||
"""
|
||||
self._connect()
|
||||
df = cf.gen_default_dataframe_data()
|
||||
bool_values = pd.Series(
|
||||
data=[True if i % 2 == 0 else False for i in range(ct.default_nb)], dtype="bool")
|
||||
bool_values = pd.Series(data=[True if i % 2 == 0 else False for i in range(ct.default_nb)], dtype="bool")
|
||||
df.insert(2, ct.default_bool_field_name, bool_values)
|
||||
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
|
||||
primary_field=ct.default_int64_field_name)
|
||||
assert self.collection_wrap.num_entities == ct.default_nb
|
||||
self.collection_wrap.create_index(
|
||||
ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
self.collection_wrap.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
self.collection_wrap.load()
|
||||
|
||||
# output bool field
|
||||
res, _ = self.collection_wrap.query(default_term_expr, output_fields=[
|
||||
ct.default_bool_field_name])
|
||||
assert set(res[0].keys()) == {
|
||||
ct.default_int64_field_name, ct.default_bool_field_name}
|
||||
|
||||
res, _ = self.collection_wrap.query(default_term_expr, output_fields=[ct.default_bool_field_name])
|
||||
assert set(res[0].keys()) == {ct.default_int64_field_name, ct.default_bool_field_name}
|
||||
|
||||
# not support filter bool field with expr 'bool in [0/ 1]'
|
||||
not_support_expr = f'{ct.default_bool_field_name} in [0]'
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: 'error: value \"0\" in list cannot be casted to Bool'}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "cannot parse expression: bool in [0], error: value '0' in list cannot be casted to Bool"}
|
||||
self.collection_wrap.query(not_support_expr, output_fields=[ct.default_bool_field_name],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -417,11 +415,13 @@ class TestQueryParams(TestcaseBase):
|
|||
"""
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
expr_1 = f'{ct.default_int64_field_name} inn [1, 2]'
|
||||
error_1 = {ct.err_code: 1, ct.err_msg: f'unexpected token Identifier("inn")'}
|
||||
error_1 = {ct.err_code: 65535, ct.err_msg: "cannot parse expression: int64 inn [1, 2], "
|
||||
"error: invalid expression: int64 inn [1, 2]"}
|
||||
collection_w.query(expr_1, check_task=CheckTasks.err_res, check_items=error_1)
|
||||
|
||||
expr_3 = f'{ct.default_int64_field_name} in not [1, 2]'
|
||||
error_3 = {ct.err_code: 1, ct.err_msg: 'right operand of the InExpr must be array'}
|
||||
error_3 = {ct.err_code: 65535, ct.err_msg: "cannot parse expression: int64 in not [1, 2], "
|
||||
"error: line 1:9 no viable alternative at input 'innot'"}
|
||||
collection_w.query(expr_3, check_task=CheckTasks.err_res, check_items=error_3)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
@ -522,7 +522,8 @@ class TestQueryParams(TestcaseBase):
|
|||
f'{ct.default_int64_field_name} in "in"',
|
||||
f'{ct.default_int64_field_name} in (mn)']
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
error = {ct.err_code: 1, ct.err_msg: "right operand of the InExpr must be array"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "cannot parse expression: int64 in 1, "
|
||||
"error: line 1:9 no viable alternative at input 'in1'"}
|
||||
for expr in exprs:
|
||||
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -548,7 +549,9 @@ class TestQueryParams(TestcaseBase):
|
|||
"""
|
||||
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
|
||||
int_values = [[1., 2.], [1, 2.]]
|
||||
error = {ct.err_code: 1, ct.err_msg: "type mismatch"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "cannot parse expression: int64 in [1.0, 2.0], error: value '1.0' "
|
||||
"in list cannot be casted to Int64"}
|
||||
for values in int_values:
|
||||
term_expr = f'{ct.default_int64_field_name} in {values}'
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
@ -562,7 +565,9 @@ class TestQueryParams(TestcaseBase):
|
|||
"""
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
constants = [[1], (), {}]
|
||||
error = {ct.err_code: 1, ct.err_msg: "unsupported leaf node"}
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "cannot parse expression: int64 in [[1]], error: value '[1]' in "
|
||||
"list cannot be casted to Int64"}
|
||||
for constant in constants:
|
||||
term_expr = f'{ct.default_int64_field_name} in [{constant}]'
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
@ -982,8 +987,8 @@ class TestQueryParams(TestcaseBase):
|
|||
# 3. query
|
||||
collection_w.load()
|
||||
expression = f"{expr_prefix}({json_field}['list'], {get_not_list})"
|
||||
error = {ct.err_code: 1, ct.err_msg: f"cannot parse expression {expression}, error: "
|
||||
f"error: {expr_prefix} operation element must be an array"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"cannot parse expression: {expression}, "
|
||||
f"error: contains_any operation element must be an array"}
|
||||
collection_w.query(expression, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -1397,7 +1402,7 @@ class TestQueryParams(TestcaseBase):
|
|||
expected: raise exception
|
||||
"""
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
error = {ct.err_code: 1, ct.err_msg: 'Field int not exist'}
|
||||
error = {ct.err_code: 65535, ct.err_msg: 'field int not exist'}
|
||||
output_fields = [["int"], [ct.default_int64_field_name, "int"]]
|
||||
for fields in output_fields:
|
||||
collection_w.query(default_term_expr, output_fields=fields,
|
||||
|
@ -1470,7 +1475,7 @@ class TestQueryParams(TestcaseBase):
|
|||
collection_w.load()
|
||||
|
||||
# query with invalid output_fields
|
||||
error = {ct.err_code: 1, ct.err_msg: f"Field {output_fields[-1]} not exist"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"field {output_fields[-1]} not exist"}
|
||||
collection_w.query(default_term_expr, output_fields=output_fields,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -1504,7 +1509,9 @@ class TestQueryParams(TestcaseBase):
|
|||
df = cf.gen_default_dataframe_data()
|
||||
partition_w.insert(df)
|
||||
assert partition_w.num_entities == ct.default_nb
|
||||
error = {ct.err_code: 1, ct.err_msg: f'collection {collection_w.name} was not loaded into memory'}
|
||||
error = {ct.err_code: 65535, ct.err_msg: "failed to query: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444857573608181561: collection"
|
||||
" not loaded: unrecoverable error"}
|
||||
collection_w.query(default_term_expr, partition_names=[partition_w.name],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -1563,7 +1570,7 @@ class TestQueryParams(TestcaseBase):
|
|||
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
collection_w.load()
|
||||
partition_names = cf.gen_unique_str()
|
||||
error = {ct.err_code: 1, ct.err_msg: f'PartitionName: {partition_names} not found'}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f'partition name {partition_names} not found'}
|
||||
collection_w.query(default_term_expr, partition_names=[partition_names],
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -1818,9 +1825,9 @@ class TestQueryParams(TestcaseBase):
|
|||
term_expr = f'{ct.default_int64_field_name} in {int_values[10: pos + 10]}'
|
||||
collection_w.query(term_expr, offset=10, limit=limit,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "limit [%s] is invalid, should be in range "
|
||||
"[1, 16384], but got %s" % (limit, limit)})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, (offset+limit) "
|
||||
f"should be in range [1, 16384], but got {limit}"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("offset", ["12 s", " ", [0, 1], {2}])
|
||||
|
@ -1855,9 +1862,9 @@ class TestQueryParams(TestcaseBase):
|
|||
term_expr = f'{ct.default_int64_field_name} in {int_values[10: pos + 10]}'
|
||||
collection_w.query(term_expr, offset=offset, limit=10,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "offset [%s] is invalid, should be in range "
|
||||
"[1, 16384], but got %s" % (offset, offset)})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, (offset+limit) "
|
||||
f"should be in range [1, 16384], but got {offset}"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_query_during_upsert(self):
|
||||
|
@ -1933,7 +1940,10 @@ class TestQueryOperation(TestcaseBase):
|
|||
|
||||
# query without load
|
||||
collection_w.query(default_term_expr, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: clem.CollNotLoaded % collection_name})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "failed to query: attempt #0: fail to get shard leaders"
|
||||
" from QueryCoord: collection=444857573609193909: "
|
||||
"collection not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("term_expr", [f'{ct.default_int64_field_name} in [0]'])
|
||||
|
@ -2342,7 +2352,9 @@ class TestQueryString(TestcaseBase):
|
|||
"""
|
||||
collection_w = self.init_collection_general(prefix, insert_data=True)[0]
|
||||
collection_w.query(expression, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "type mismatch"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"cannot parse expression: {expression}, error: value "
|
||||
f"'0' in list cannot be casted to VarChar"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_query_string_expr_with_binary(self):
|
||||
|
@ -2385,8 +2397,9 @@ class TestQueryString(TestcaseBase):
|
|||
expression = 'float like "0%"'
|
||||
collection_w.query(expression,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "like operation on non-string field is unsupported"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"cannot parse expression: {expression}, error: like "
|
||||
f"operation on non-string or no-json field is unsupported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_query_compare_two_fields(self):
|
||||
|
@ -2414,7 +2427,9 @@ class TestQueryString(TestcaseBase):
|
|||
primary_field=ct.default_string_field_name)[0]
|
||||
expression = 'varchar == int64'
|
||||
collection_w.query(expression, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: f' cannot parse expression:{expression}'})
|
||||
check_items={ct.err_code: 65535, ct.err_msg:
|
||||
f"cannot parse expression: {expression}, error: comparisons between VarChar, "
|
||||
f"element_type: None and Int64 elementType: None are not supported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.xfail(reason="issue 24637")
|
||||
|
@ -2664,8 +2679,10 @@ class TestQueryCount(TestcaseBase):
|
|||
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
|
||||
collection_w.query(expr=default_term_expr, output_fields=[ct.default_count_output],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"has not been loaded to memory or load failed"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to query: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444857573609396129: collection not loaded:"
|
||||
" unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_count_duplicate_ids(self):
|
||||
|
@ -2727,13 +2744,11 @@ class TestQueryCount(TestcaseBase):
|
|||
collection_w.query(expr=default_expr, output_fields=[ct.default_count_output],
|
||||
partition_names=[ct.default_partition_name],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: 0}]}
|
||||
)
|
||||
check_items={exp_res: [{count: 0}]})
|
||||
collection_w.query(expr=default_expr, output_fields=[ct.default_count_output],
|
||||
partition_names=[p1.name, ct.default_partition_name],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: half}]}
|
||||
)
|
||||
check_items={exp_res: [{count: half}]})
|
||||
|
||||
# drop p1 partition
|
||||
p1.release()
|
||||
|
@ -2741,14 +2756,12 @@ class TestQueryCount(TestcaseBase):
|
|||
collection_w.query(expr=default_expr, output_fields=[ct.default_count_output],
|
||||
partition_names=[p1.name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f'partition name: {p1.name} not found'}
|
||||
)
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": f'partition name {p1.name} not found'})
|
||||
collection_w.query(expr=default_expr, output_fields=[ct.default_count_output],
|
||||
partition_names=[ct.default_partition_name],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: 0}]}
|
||||
)
|
||||
check_items={exp_res: [{count: 0}]})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_count_partition_duplicate(self):
|
||||
|
@ -3070,8 +3083,9 @@ class TestQueryCount(TestcaseBase):
|
|||
|
||||
# release collection and alias drop partition
|
||||
collection_w_alias.drop_partition(p_name, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "cannot drop the collection via alias"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "partition cannot be dropped, "
|
||||
"partition is loaded, please release it first"})
|
||||
self.partition_wrap.init_partition(collection_w_alias.collection, p_name)
|
||||
self.partition_wrap.release()
|
||||
|
||||
|
|
|
@ -247,14 +247,18 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 2. search with invalid dim
|
||||
log.info("test_search_param_invalid_dim: searching with invalid dim")
|
||||
wrong_dim = 129
|
||||
vectors = [[random.random() for _ in range(wrong_dim)]
|
||||
for _ in range(default_nq)]
|
||||
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, default_limit, default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "The dimension of query entities "
|
||||
"is different from schema"})
|
||||
check_items={"err_code": 65538,
|
||||
"err_msg": 'failed to search: attempt #0: failed to search/query'
|
||||
' delegator 1 for channel by-dev-rootcoord-dml_1_4'
|
||||
'44857573610608343v0: fail to Search, QueryNode ID=1, '
|
||||
'reason=worker(1) query failed: UnknownError: Assert '
|
||||
'"field_meta.get_sizeof() == element.line_sizeof_" '
|
||||
'at /go/src/github.com/milvus-io/milvus/internal/core/'
|
||||
'src/query/Plan.cpp:52'})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
|
||||
|
@ -267,13 +271,16 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
collection_w = self.init_collection_general(prefix)[0]
|
||||
# 2. search with invalid field
|
||||
invalid_search_field = get_invalid_fields_type
|
||||
log.info("test_search_param_invalid_field_type: searching with "
|
||||
"invalid field: %s" % invalid_search_field)
|
||||
log.info("test_search_param_invalid_field_type: searching with invalid field: %s"
|
||||
% invalid_search_field)
|
||||
error1 = {"err_code": 65535, "err_msg": "failed to search: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444857573608382363: collection not "
|
||||
"loaded: unrecoverable error"}
|
||||
error2 = {"err_code": 1, "err_msg": f"`anns_field` value {get_invalid_fields_type} is illegal"}
|
||||
error = error2 if get_invalid_fields_type in [[], 1, [1, "2", 3], (1,), {1: 1}] else error1
|
||||
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
|
||||
default_limit, default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)})
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
|
||||
|
@ -291,9 +298,9 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
|
||||
default_limit, default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "Field %s doesn't exist in schema"
|
||||
% invalid_search_field})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to create query plan: failed to get field schema "
|
||||
"by name: %s not found" % invalid_search_field})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
|
||||
|
@ -305,16 +312,16 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 1. initialize with data
|
||||
collection_w = self.init_collection_general(prefix)[0]
|
||||
# 2. search with invalid metric_type
|
||||
log.info(
|
||||
"test_search_param_invalid_metric_type: searching with invalid metric_type")
|
||||
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
|
||||
invalid_metric = get_invalid_metric_type
|
||||
search_params = {"metric_type": invalid_metric,
|
||||
"params": {"nprobe": 10}}
|
||||
search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}}
|
||||
collection_w.search(vectors[:default_nq], default_search_field, search_params,
|
||||
default_limit, default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "metric type not found"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard leaders "
|
||||
"from QueryCoord: collection=444818512783277152: collection "
|
||||
"not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("index, params",
|
||||
|
@ -337,7 +344,11 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
collection_w.load()
|
||||
# 3. search
|
||||
invalid_search_params = cf.gen_invalid_search_params_type()
|
||||
message = "Search params check failed"
|
||||
message = ("failed to search: attempt #0: failed to search/query delegator 1 for channel "
|
||||
"by-dev-rootcoord-dml_8_444857573608382882v0: fail to Search, QueryNode ID=1, "
|
||||
"reason=worker(1) query failed: UnknownError: => failed to search: invalid param "
|
||||
"in json: invalid json key invalid_key: attempt #1: no available shard delegator "
|
||||
"found: service unavailable")
|
||||
for invalid_search_param in invalid_search_params:
|
||||
if index == invalid_search_param["index_type"]:
|
||||
search_params = {"metric_type": "L2",
|
||||
|
@ -346,7 +357,7 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
search_params, default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
check_items={"err_code": 65538,
|
||||
"err_msg": message})
|
||||
|
||||
@pytest.mark.skip("not fixed yet")
|
||||
|
@ -408,13 +419,13 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 2. search with invalid limit (topK)
|
||||
log.info("test_search_param_invalid_limit_value: searching with "
|
||||
"invalid limit (topK) = %s" % limit)
|
||||
err_msg = "limit %d is too large!" % limit
|
||||
err_msg = f"topk [{limit}] is invalid, top k should be in range [1, 16384], but got {limit}"
|
||||
if limit == 0:
|
||||
err_msg = "`limit` value 0 is illegal"
|
||||
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
|
||||
limit, default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": err_msg})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -489,8 +500,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, default_limit, invalid_search_expr,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "invalid expression %s"
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to create query plan: cannot parse expression: %s"
|
||||
% invalid_search_expr})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -521,16 +532,14 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
method: test search invalid bool
|
||||
expected: searched failed
|
||||
"""
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, is_all_data_type=True)[0]
|
||||
log.info(
|
||||
"test_search_with_expression: searching with expression: %s" % expression)
|
||||
collection_w = self.init_collection_general(prefix, True, is_all_data_type=True)[0]
|
||||
log.info("test_search_with_expression: searching with expression: %s" % expression)
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, default_limit, expression,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "failed to create query plan: cannot parse "
|
||||
"expression: %s" % expression})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to create query plan: predicate is not a "
|
||||
"boolean expression: %s, data type: Bool" % expression})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("expression", ["int64 like 33", "float LIKE 33"])
|
||||
|
@ -614,9 +623,10 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
collection_w.search(vectors, default_search_field,
|
||||
default_search_params, default_limit, default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "collection %s was not loaded "
|
||||
"into memory" % collection_w.name})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard leaders from"
|
||||
" QueryCoord: collection=444818512783277916: collection not"
|
||||
" loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_release_partition(self):
|
||||
|
@ -629,10 +639,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
"""
|
||||
# 1. initialize with data
|
||||
partition_num = 1
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, 10, partition_num, is_index=False)[0]
|
||||
collection_w.create_index(
|
||||
ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
collection_w = self.init_collection_general(prefix, True, 10, partition_num, is_index=False)[0]
|
||||
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
par = collection_w.partitions
|
||||
par_name = par[partition_num].name
|
||||
par[partition_num].load()
|
||||
|
@ -645,8 +653,10 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
default_search_params, limit, default_search_exp,
|
||||
[par_name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "partition has been released"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard "
|
||||
"leaders from QueryCoord: collection=444857573608588384: "
|
||||
"collection not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.skip("enable this later using session/strong consistency")
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
@ -736,16 +746,14 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
"""
|
||||
# 1. initialize with data
|
||||
partition_num = 1
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, 1000, partition_num, is_index=False)[0]
|
||||
collection_w = self.init_collection_general(prefix, True, 1000, partition_num, is_index=False)[0]
|
||||
# 2. delete partitions
|
||||
log.info("test_search_partition_deleted: deleting a partition")
|
||||
par = collection_w.partitions
|
||||
deleted_par_name = par[partition_num].name
|
||||
collection_w.drop_partition(deleted_par_name)
|
||||
log.info("test_search_partition_deleted: deleted a partition")
|
||||
collection_w.create_index(
|
||||
ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
collection_w.load()
|
||||
# 3. search after delete partitions
|
||||
log.info("test_search_partition_deleted: searching deleted partition")
|
||||
|
@ -753,8 +761,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
default_search_params, default_limit, default_search_exp,
|
||||
[deleted_par_name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "PartitonName: %s not found" % deleted_par_name})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "partition name search_partition_0 not found"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("index, params",
|
||||
|
@ -782,13 +790,17 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 3. search
|
||||
log.info("test_search_different_index_invalid_params: Searching after "
|
||||
"creating index-%s" % index)
|
||||
msg = ("failed to search: attempt #0: failed to search/query delegator 1 for channel "
|
||||
"by-dev-rootcoord-dml_10_444857573608789760v0: fail to Search, QueryNode ID=1, "
|
||||
"reason=worker(1) query failed: UnknownError: [json.exception.type_error.302] "
|
||||
"type must be number, but is string: attempt #1: no available shard delegator "
|
||||
"found: service unavailable")
|
||||
search_params = cf.gen_invalid_search_param(index)
|
||||
collection_w.search(vectors, default_search_field,
|
||||
search_params[0], default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "Search params check failed"})
|
||||
check_items={"err_code": 65538, "err_msg": msg})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_index_partition_not_existed(self):
|
||||
|
@ -800,17 +812,15 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 1. initialize with data
|
||||
collection_w = self.init_collection_general(prefix, is_index=False)[0]
|
||||
# 2. create index
|
||||
default_index = {"index_type": "IVF_FLAT",
|
||||
"params": {"nlist": 128}, "metric_type": "L2"}
|
||||
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
|
||||
collection_w.create_index("float_vector", default_index)
|
||||
# 3. search the non exist partition
|
||||
partition_name = "search_non_exist"
|
||||
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
|
||||
default_limit, default_search_exp, [
|
||||
partition_name],
|
||||
default_limit, default_search_exp, [partition_name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "PartitonName: %s not found" % partition_name})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "partition name %s not found" % partition_name})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("nq", [16385])
|
||||
|
@ -865,17 +875,18 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
expected: raise exception and report error
|
||||
"""
|
||||
# 1. initialize with binary data
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, is_binary=True)[0]
|
||||
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
|
||||
# 2. search and assert
|
||||
query_raw_vector, binary_vectors = cf.gen_binary_vectors(
|
||||
2, default_dim)
|
||||
query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim)
|
||||
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
|
||||
msg = ("failed to search: attempt #0: failed to search/query delegator 1 for channel "
|
||||
"by-dev-rootcoord-dml_4_444857573608384003v0: fail to Search, QueryNode ID=1, "
|
||||
"reason=collection:444857573608384003, metric type not match: expected=JACCARD, "
|
||||
"actual=L2: invalid parameter: attempt #1: no available shard delegator found: service unavailable")
|
||||
collection_w.search(binary_vectors[:default_nq], "binary_vector",
|
||||
search_params, default_limit, "int64 >= 0",
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "Data type and metric type mis-match"})
|
||||
check_items={"err_code": 65538, "err_msg": msg})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_with_output_fields_not_exist(self):
|
||||
|
@ -893,8 +904,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
default_search_params, default_limit,
|
||||
default_search_exp, output_fields=["int63"],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "Field int63 not exist"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "field int63 not exist"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip(reason="Now support output vector field")
|
||||
|
@ -958,8 +969,8 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
default_search_params, default_limit,
|
||||
default_search_exp, output_fields=output_fields,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"Field {output_fields[-1]} not exist"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": f"field {output_fields[-1]} not exist"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("ignore_growing", ct.get_invalid_strs[2:8])
|
||||
|
@ -1045,14 +1056,16 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
log.info("test_range_search_invalid_radius: Range searching collection %s" %
|
||||
collection_w.name)
|
||||
radius = get_invalid_range_search_paras
|
||||
range_search_params = {"metric_type": "L2", "params": {
|
||||
"nprobe": 10, "radius": radius, "range_filter": 0}}
|
||||
range_search_params = {"metric_type": "L2",
|
||||
"params": {"nprobe": 10, "radius": radius, "range_filter": 0}}
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
range_search_params, default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"type must be number"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard "
|
||||
"leaders from QueryCoord: collection=444857573608586463:"
|
||||
" collection not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_range_search_invalid_range_filter(self, get_invalid_range_search_paras):
|
||||
|
@ -1067,14 +1080,16 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
log.info("test_range_search_invalid_range_filter: Range searching collection %s" %
|
||||
collection_w.name)
|
||||
range_filter = get_invalid_range_search_paras
|
||||
range_search_params = {"metric_type": "L2", "params": {
|
||||
"nprobe": 10, "radius": 1, "range_filter": range_filter}}
|
||||
range_search_params = {"metric_type": "L2",
|
||||
"params": {"nprobe": 10, "radius": 1, "range_filter": range_filter}}
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
range_search_params, default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"type must be number"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get"
|
||||
" shard leaders from QueryCoord: collection=444857573608586774"
|
||||
": collection not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_range_search_invalid_radius_range_filter_L2(self):
|
||||
|
@ -1088,14 +1103,15 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 2. range search
|
||||
log.info("test_range_search_invalid_radius_range_filter_L2: Range searching collection %s" %
|
||||
collection_w.name)
|
||||
range_search_params = {"metric_type": "L2", "params": {
|
||||
"nprobe": 10, "radius": 1, "range_filter": 10}}
|
||||
range_search_params = {"metric_type": "L2", "params": {"nprobe": 10, "radius": 1, "range_filter": 10}}
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
range_search_params, default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"range_filter must less than radius except IP"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444818512783278558: collection not loaded:"
|
||||
" unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_range_search_invalid_radius_range_filter_IP(self):
|
||||
|
@ -1109,14 +1125,16 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
# 2. range search
|
||||
log.info("test_range_search_invalid_radius_range_filter_IP: Range searching collection %s" %
|
||||
collection_w.name)
|
||||
range_search_params = {"metric_type": "IP", "params": {
|
||||
"nprobe": 10, "radius": 10, "range_filter": 1}}
|
||||
range_search_params = {"metric_type": "IP",
|
||||
"params": {"nprobe": 10, "radius": 10, "range_filter": 1}}
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
range_search_params, default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"range_filter must more than radius when IP"})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to search: attempt #0: fail to get shard leaders from "
|
||||
"QueryCoord: collection=444818512783279076: collection not "
|
||||
"loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip(reason="annoy not supported any more")
|
||||
|
@ -1229,26 +1247,27 @@ class TestCollectionSearchInvalid(TestcaseBase):
|
|||
enable_dynamic_field=True)[0]
|
||||
|
||||
# create index
|
||||
index_params_one = {"index_type": "IVF_SQ8",
|
||||
"metric_type": "COSINE", "params": {"nlist": 64}}
|
||||
index_params_one = {"index_type": "IVF_SQ8", "metric_type": "COSINE", "params": {"nlist": 64}}
|
||||
collection_w.create_index(
|
||||
ct.default_float_vec_field_name, index_params_one, index_name=index_name1)
|
||||
index_params_two = {}
|
||||
collection_w.create_index(
|
||||
ct.default_string_field_name, index_params=index_params_two, index_name=index_name2)
|
||||
collection_w.create_index(ct.default_string_field_name, index_params=index_params_two, index_name=index_name2)
|
||||
assert collection_w.has_index(index_name=index_name2)
|
||||
collection_w.load()
|
||||
# delete entity
|
||||
expr = 'float >= int64'
|
||||
# search with id 0 vectors
|
||||
vectors = [[random.random() for _ in range(default_dim)]
|
||||
for _ in range(default_nq)]
|
||||
msg = ("failed to search: attempt #0: failed to search/query delegator 3 for channel by-dev-rootcoord-dml_15_"
|
||||
"444818512783279330v0: fail to Search, QueryNode ID=3, reason=worker(3) query failed: UnknownError: "
|
||||
"=> unsupported right datatype JSON of compare expr: attempt #1: no available shard delegator found: s"
|
||||
"ervice unavailable")
|
||||
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, default_limit,
|
||||
expr,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": f"unsupported left datatype of compare expr"})
|
||||
check_items={"err_code": 65538,
|
||||
"err_msg": msg})
|
||||
|
||||
|
||||
class TestCollectionSearch(TestcaseBase):
|
||||
|
@ -3552,13 +3571,14 @@ class TestCollectionSearch(TestcaseBase):
|
|||
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, auto_id=auto_id)[0:4]
|
||||
# 2. search
|
||||
log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name)
|
||||
error1 = {"err_code": 65535, "err_msg": "field %s not exist" % invalid_output_fields[0]}
|
||||
error2 = {"err_code": 1, "err_msg": "`output_fields` value %s is illegal" % invalid_output_fields[0]}
|
||||
error = error2 if invalid_output_fields == [""] else error1
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, default_limit,
|
||||
default_search_exp,
|
||||
output_fields=invalid_output_fields,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "field %s is not exist" % invalid_output_fields[0]})
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_multi_collections(self, nb, nq, dim, auto_id, _async):
|
||||
|
@ -4227,20 +4247,17 @@ class TestSearchBase(TestcaseBase):
|
|||
"""
|
||||
top_k = 16385 # max top k is 16384
|
||||
nq = get_nq
|
||||
collection_w, data, _, insert_ids = self.init_collection_general(
|
||||
prefix, insert_data=True, nb=nq)[0:4]
|
||||
collection_w, data, _, insert_ids = self.init_collection_general(prefix, insert_data=True, nb=nq)[0:4]
|
||||
collection_w.load()
|
||||
if top_k <= max_top_k:
|
||||
res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params,
|
||||
top_k)
|
||||
res, _ = collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k)
|
||||
assert len(res[0]) <= top_k
|
||||
else:
|
||||
collection_w.search(vectors[:nq], default_search_field, default_search_params,
|
||||
top_k,
|
||||
collection_w.search(vectors[:nq], default_search_field, default_search_params, top_k,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "no Available QueryNode result, "
|
||||
"filter reason limit %s is too large," % top_k})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": f"topk [{top_k}] is invalid, top k should be in range"
|
||||
f" [1, 16384], but got {top_k}"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("index, params",
|
||||
|
@ -4885,20 +4902,19 @@ class TestSearchString(TestcaseBase):
|
|||
"""
|
||||
# 1. initialize with data
|
||||
collection_w, _, _, insert_ids = \
|
||||
self.init_collection_general(
|
||||
prefix, True, auto_id=auto_id, dim=default_dim)[0:4]
|
||||
self.init_collection_general(prefix, True, auto_id=auto_id, dim=default_dim)[0:4]
|
||||
# 2. search
|
||||
log.info("test_search_string_with_invalid_expr: searching collection %s" %
|
||||
collection_w.name)
|
||||
vectors = [[random.random() for _ in range(default_dim)]
|
||||
for _ in range(default_nq)]
|
||||
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, default_limit,
|
||||
default_invaild_string_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "failed to create query plan: type mismatch"}
|
||||
)
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "failed to create query plan: cannot parse expression: "
|
||||
"varchar >= 0, error: comparisons between VarChar, "
|
||||
"element_type: None and Int64 elementType: None are not supported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("expression", cf.gen_normal_string_expressions([ct.default_string_field_name]))
|
||||
|
@ -5749,7 +5765,7 @@ class TestSearchPaginationInvalid(TestcaseBase):
|
|||
"err_msg": "offset [%s] is invalid" % offset})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("offset", [-1, 16386])
|
||||
@pytest.mark.parametrize("offset", [-1, 16385])
|
||||
def test_search_pagination_with_invalid_offset_value(self, offset):
|
||||
"""
|
||||
target: test search pagination with invalid offset value
|
||||
|
@ -5757,20 +5773,17 @@ class TestSearchPaginationInvalid(TestcaseBase):
|
|||
expected: raise exception
|
||||
"""
|
||||
# 1. initialize
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, dim=default_dim)[0]
|
||||
collection_w = self.init_collection_general(prefix, True, dim=default_dim)[0]
|
||||
# 2. search
|
||||
search_param = {"metric_type": "COSINE",
|
||||
"params": {"nprobe": 10}, "offset": offset}
|
||||
vectors = [[random.random() for _ in range(default_dim)]
|
||||
for _ in range(default_nq)]
|
||||
search_param = {"metric_type": "COSINE", "params": {"nprobe": 10}, "offset": offset}
|
||||
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
search_param, default_limit,
|
||||
default_search_exp,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "offset [%d] is invalid, should be in range "
|
||||
"[1, 16385], but got %d" % (offset, offset)})
|
||||
"[1, 16384], but got %d" % (offset, offset)})
|
||||
|
||||
|
||||
class TestSearchDiskann(TestcaseBase):
|
||||
|
@ -5881,18 +5894,13 @@ class TestSearchDiskann(TestcaseBase):
|
|||
"""
|
||||
# 1. initialize with data
|
||||
collection_w, _, _, insert_ids = \
|
||||
self.init_collection_general(
|
||||
prefix, True, auto_id=auto_id, dim=dim, is_index=False)[0:4]
|
||||
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim, is_index=False)[0:4]
|
||||
# 2. create index
|
||||
default_index = {"index_type": "DISKANN",
|
||||
"metric_type": "L2", "params": {}}
|
||||
collection_w.create_index(
|
||||
ct.default_float_vec_field_name, default_index)
|
||||
default_index = {"index_type": "DISKANN", "metric_type": "L2", "params": {}}
|
||||
collection_w.create_index(ct.default_float_vec_field_name, default_index)
|
||||
collection_w.load()
|
||||
default_search_params = {"metric_type": "L2",
|
||||
"params": {"search_list": search_list}}
|
||||
vectors = [[random.random() for _ in range(dim)]
|
||||
for _ in range(default_nq)]
|
||||
default_search_params = {"metric_type": "L2", "params": {"search_list": search_list}}
|
||||
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
|
||||
output_fields = [default_int64_field_name,
|
||||
default_float_field_name, default_string_field_name]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
|
@ -5900,9 +5908,8 @@ class TestSearchDiskann(TestcaseBase):
|
|||
default_search_exp,
|
||||
output_fields=output_fields,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "fail to search on all shard leaders"}
|
||||
)
|
||||
check_items={"err_code": 65538,
|
||||
"err_msg": "fail to search on all shard leaders"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("limit", [20])
|
||||
|
@ -5917,27 +5924,21 @@ class TestSearchDiskann(TestcaseBase):
|
|||
"""
|
||||
# 1. initialize with data
|
||||
collection_w, _, _, insert_ids = \
|
||||
self.init_collection_general(
|
||||
prefix, True, auto_id=auto_id, dim=dim, is_index=False)[0:4]
|
||||
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim, is_index=False)[0:4]
|
||||
# 2. create index
|
||||
default_index = {"index_type": "DISKANN",
|
||||
"metric_type": "L2", "params": {}}
|
||||
collection_w.create_index(
|
||||
ct.default_float_vec_field_name, default_index)
|
||||
default_index = {"index_type": "DISKANN", "metric_type": "L2", "params": {}}
|
||||
collection_w.create_index(ct.default_float_vec_field_name, default_index)
|
||||
collection_w.load()
|
||||
default_search_params = {"metric_type": "L2",
|
||||
"params": {"search_list": search_list}}
|
||||
vectors = [[random.random() for _ in range(dim)]
|
||||
for _ in range(default_nq)]
|
||||
output_fields = [default_int64_field_name,
|
||||
default_float_field_name, default_string_field_name]
|
||||
default_search_params = {"metric_type": "L2", "params": {"search_list": search_list}}
|
||||
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
|
||||
output_fields = [default_int64_field_name, default_float_field_name, default_string_field_name]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, limit,
|
||||
default_search_exp,
|
||||
output_fields=output_fields,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "fail to search on all shard leaders"})
|
||||
check_items={"err_code": 65538,
|
||||
"err_msg": "UnknownError"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("limit", [6553])
|
||||
|
@ -5952,27 +5953,22 @@ class TestSearchDiskann(TestcaseBase):
|
|||
"""
|
||||
# 1. initialize with data
|
||||
collection_w, _, _, insert_ids = \
|
||||
self.init_collection_general(
|
||||
prefix, True, auto_id=auto_id, dim=dim, is_index=False)[0:4]
|
||||
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim, is_index=False)[0:4]
|
||||
# 2. create index
|
||||
default_index = {"index_type": "DISKANN",
|
||||
"metric_type": "L2", "params": {}}
|
||||
collection_w.create_index(
|
||||
ct.default_float_vec_field_name, default_index)
|
||||
default_index = {"index_type": "DISKANN", "metric_type": "L2", "params": {}}
|
||||
collection_w.create_index(ct.default_float_vec_field_name, default_index)
|
||||
collection_w.load()
|
||||
default_search_params = {"metric_type": "L2",
|
||||
"params": {"search_list": search_list}}
|
||||
vectors = [[random.random() for _ in range(dim)]
|
||||
for _ in range(default_nq)]
|
||||
output_fields = [default_int64_field_name,
|
||||
default_float_field_name, default_string_field_name]
|
||||
default_search_params = {"metric_type": "L2", "params": {"search_list": search_list}}
|
||||
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
|
||||
output_fields = [default_int64_field_name, default_float_field_name, default_string_field_name]
|
||||
collection_w.search(vectors[:default_nq], default_search_field,
|
||||
default_search_params, limit,
|
||||
default_search_exp,
|
||||
output_fields=output_fields,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "fail to search on all shard leaders"})
|
||||
check_items={"err_code": 65538,
|
||||
"err_msg": "failed to search: attempt #0: failed to search/query "
|
||||
"delegator 1 for channel by-dev-.."})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_search_with_diskann_with_string_pk(self, dim, enable_dynamic_field):
|
||||
|
@ -7982,8 +7978,7 @@ class TestCollectionLoadOperation(TestcaseBase):
|
|||
expected: No exception
|
||||
"""
|
||||
# insert data
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, 200, partition_num=1, is_index=False)[0]
|
||||
collection_w = self.init_collection_general(prefix, True, 200, partition_num=1, is_index=False)[0]
|
||||
partition_w1, partition_w2 = collection_w.partitions
|
||||
collection_w.create_index(default_search_field, default_index_params)
|
||||
# load && release
|
||||
|
@ -7995,18 +7990,17 @@ class TestCollectionLoadOperation(TestcaseBase):
|
|||
collection_w.delete(f"int64 in {delete_ids}")
|
||||
# search on collection, partition1, partition2
|
||||
collection_w.search(vectors[:1], field_name, default_search_params, 200,
|
||||
partition_names=[
|
||||
partition_w1.name, partition_w2.name],
|
||||
partition_names=[partition_w1.name, partition_w2.name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: 'not loaded'})
|
||||
check_items={ct.err_code: 65535, ct.err_msg: 'not loaded'})
|
||||
collection_w.search(vectors[:1], field_name, default_search_params, 200,
|
||||
partition_names=[partition_w1.name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: 'not loaded'})
|
||||
check_items={ct.err_code: 65535, ct.err_msg: 'not loaded'})
|
||||
collection_w.search(vectors[:1], field_name, default_search_params, 200,
|
||||
partition_names=[partition_w2.name],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: 'not found'})
|
||||
check_items={ct.err_code: 65535, ct.err_msg: 'not found'})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_compact_load_collection_release_partition(self):
|
||||
|
@ -8727,8 +8721,7 @@ class TestCollectionLoadOperation(TestcaseBase):
|
|||
expected: No exception
|
||||
"""
|
||||
# init the collection
|
||||
collection_w = self.init_collection_general(
|
||||
prefix, True, 200, partition_num=1, is_index=False)[0]
|
||||
collection_w = self.init_collection_general(prefix, True, 200, partition_num=1, is_index=False)[0]
|
||||
partition_w1, partition_w2 = collection_w.partitions
|
||||
collection_w.create_index(default_search_field, default_index_params)
|
||||
# load and release
|
||||
|
@ -8738,8 +8731,10 @@ class TestCollectionLoadOperation(TestcaseBase):
|
|||
# search on collection
|
||||
collection_w.search(vectors[:1], field_name, default_search_params, 200,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "fail to get shard leaders from QueryCoord: collection not loaded"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: "failed to search: attempt #0: fail to get shard leaders "
|
||||
"from QueryCoord: collection=444857573614268173: "
|
||||
"collection not loaded: unrecoverable error"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.xfail(reason="issue #24446")
|
||||
|
|
|
@ -94,7 +94,8 @@ class TestUtilityParams(TestcaseBase):
|
|||
self.utility_wrap.has_collection(
|
||||
c_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"})
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "collection name should not be empty: invalid parameter"})
|
||||
# elif not isinstance(c_name, str): self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res,
|
||||
# check_items={ct.err_code: 1, ct.err_msg: "illegal"})
|
||||
|
||||
|
@ -112,7 +113,8 @@ class TestUtilityParams(TestcaseBase):
|
|||
self.utility_wrap.has_partition(
|
||||
c_name, p_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "collection name should not be empty: invalid parameter"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_has_partition_name_invalid(self, get_invalid_partition_name):
|
||||
|
@ -134,9 +136,11 @@ class TestUtilityParams(TestcaseBase):
|
|||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_drop_collection_name_invalid(self, get_invalid_collection_name):
|
||||
self._connect()
|
||||
error = f'`collection_name` value {get_invalid_collection_name} is illegal'
|
||||
error1 = {ct.err_code: 1, ct.err_msg: f"`collection_name` value {get_invalid_collection_name} is illegal"}
|
||||
error2 = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {get_invalid_collection_name}."}
|
||||
error = error1 if get_invalid_collection_name in [[], 1, [1, '2', 3], (1,), {1: 1}, None, ""] else error2
|
||||
self.utility_wrap.drop_collection(get_invalid_collection_name, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: error})
|
||||
check_items=error)
|
||||
|
||||
# TODO: enable
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -299,7 +303,7 @@ class TestUtilityParams(TestcaseBase):
|
|||
self.utility_wrap.wait_for_loading_complete(
|
||||
collection_w.name, partition_names=[ct.default_tag],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 15, ct.err_msg: f'partitionID of partitionName:{ct.default_tag} can not be find'})
|
||||
check_items={ct.err_code: 200, ct.err_msg: f'partition={ct.default_tag}: partition not found'})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_drop_collection_not_existed(self):
|
||||
|
@ -584,9 +588,11 @@ class TestUtilityParams(TestcaseBase):
|
|||
new_collection_name = get_invalid_value_collection_name
|
||||
self.utility_wrap.rename_collection(old_collection_name, new_collection_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 9,
|
||||
"err_msg": "collection {} was not "
|
||||
"loaded into memory)".format(collection_w.name)})
|
||||
check_items={"err_code": 1100,
|
||||
"err_msg": "Invalid collection name: %s. the first "
|
||||
"character of a collection name must be an "
|
||||
"underscore or letter: invalid parameter"
|
||||
% new_collection_name})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_rename_collection_not_existed_collection(self):
|
||||
|
@ -617,10 +623,10 @@ class TestUtilityParams(TestcaseBase):
|
|||
old_collection_name = collection_w.name
|
||||
self.utility_wrap.rename_collection(old_collection_name, old_collection_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "duplicated new collection name :{} with other "
|
||||
"collection name or alias".format(
|
||||
collection_w.name)})
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "duplicated new collection name default:{}"
|
||||
" with other collection name or"
|
||||
" alias".format(collection_w.name)})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_rename_collection_existed_collection_alias(self):
|
||||
|
@ -636,8 +642,8 @@ class TestUtilityParams(TestcaseBase):
|
|||
self.utility_wrap.create_alias(old_collection_name, alias)
|
||||
self.utility_wrap.rename_collection(old_collection_name, alias,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 1,
|
||||
"err_msg": "duplicated new collection name :{} with "
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "duplicated new collection name default:{} with "
|
||||
"other collection name or alias".format(alias)})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
@ -798,6 +804,7 @@ class TestUtilityBase(TestcaseBase):
|
|||
assert len(res) == 0
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_index_process_collection_not_existed(self):
|
||||
"""
|
||||
target: test building_process
|
||||
|
@ -826,6 +833,7 @@ class TestUtilityBase(TestcaseBase):
|
|||
assert res == exp_res
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_index_process_collection_insert_no_index(self):
|
||||
"""
|
||||
target: test building_process
|
||||
|
@ -887,6 +895,7 @@ class TestUtilityBase(TestcaseBase):
|
|||
raise MilvusException(1, f"Index build completed in more than 5s")
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("issue #27624")
|
||||
def test_wait_index_collection_not_existed(self):
|
||||
"""
|
||||
target: test wait_index
|
||||
|
@ -948,9 +957,9 @@ class TestUtilityBase(TestcaseBase):
|
|||
assert collection_w.num_entities == ct.default_nb
|
||||
self.utility_wrap.loading_progress(collection_w.name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: 'fail to show collections from '
|
||||
'the querycoord, no data'})
|
||||
check_items={ct.err_code: 101,
|
||||
ct.err_msg: 'collection= : '
|
||||
'collection not loaded'})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("nb", [ct.default_nb, 5000])
|
||||
|
|
Loading…
Reference in New Issue