mirror of https://github.com/milvus-io/milvus.git
test: add search iterator cases and alter collection properties (#39406)
/kind improvment --------- Signed-off-by: laurazhao0611 <laurazhao@zilliz.com> Co-authored-by: laurazhao0611 <laurazhao@zilliz.com>pull/39503/head
parent
40e6fcd868
commit
41352e40e4
|
@ -171,6 +171,20 @@ class TestMilvusClientV2Base(Base):
|
|||
limit=limit, output_fields=output_fields, search_params=search_params,
|
||||
**kwargs).run()
|
||||
return res, check_result
|
||||
|
||||
@trace()
|
||||
def hybrid_search(self, client, collection_name, reqs, rerank, limit=10,
|
||||
output_fields=None, timeout=None, partition_names=None,
|
||||
check_task=None, check_items=None, **kwargs):
|
||||
timeout = TIMEOUT if timeout is None else timeout
|
||||
# kwargs.update({"timeout": timeout})
|
||||
func_name = sys._getframe().f_code.co_name
|
||||
res, check = api_request([client.hybrid_search, collection_name, reqs, rerank, limit,
|
||||
output_fields, timeout, partition_names], **kwargs)
|
||||
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
|
||||
collection_name=collection_name, reqs=reqs, rerank=rerank, limit=limit,
|
||||
output_fields=output_fields, timeout=timeout, partition_names=partition_names, **kwargs).run()
|
||||
return res, check_result
|
||||
|
||||
@trace()
|
||||
def query(self, client, collection_name, timeout=None, check_task=None, check_items=None, **kwargs):
|
||||
|
|
|
@ -1137,3 +1137,157 @@ class TestMilvusClientUsingDatabaseInvalid(TestMilvusClientV2Base):
|
|||
expected: drop successfully
|
||||
"""
|
||||
pass
|
||||
|
||||
class TestMilvusClientCollectionPropertiesInvalid(TestMilvusClientV2Base):
|
||||
""" Test case of alter/drop collection properties """
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("alter_name", ["%$#", "test", " "])
|
||||
def test_milvus_client_alter_collection_properties_invalid_collection_name(self, alter_name):
|
||||
"""
|
||||
target: test alter collection properties with invalid collection name
|
||||
method: alter collection properties with non-existent collection name
|
||||
expected: raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
# alter collection properties
|
||||
properties = {'mmap.enabled': True}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"collection not found[database=default][collection={alter_name}]"}
|
||||
self.alter_collection_properties(client, alter_name, properties,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("properties", [""])
|
||||
def test_milvus_client_alter_collection_properties_invalid_properties(self, properties):
|
||||
"""
|
||||
target: test alter collection properties with invalid properties
|
||||
method: alter collection properties with invalid properties
|
||||
expected: raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, id_type="string", max_length=ct.default_length)
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 0})
|
||||
error = {ct.err_code: 1, ct.err_msg: f"`properties` value {properties} is illegal"}
|
||||
self.alter_collection_properties(client, collection_name, properties,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
#TODO properties with non-existent params
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("drop_name", ["%$#", "test", " "])
|
||||
def test_milvus_client_drop_collection_properties_invalid_collection_name(self, drop_name):
|
||||
"""
|
||||
target: test drop collection properties with invalid collection name
|
||||
method: drop collection properties with non-existent collection name
|
||||
expected: raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
# drop collection properties
|
||||
properties = {'mmap.enabled': True}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"collection not found[database=default][collection={drop_name}]"}
|
||||
self.drop_collection_properties(client, drop_name, properties,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("property_keys", ["", {}, []])
|
||||
def test_milvus_client_drop_collection_properties_invalid_properties(self, property_keys):
|
||||
"""
|
||||
target: test drop collection properties with invalid properties
|
||||
method: drop collection properties with invalid properties
|
||||
expected: raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, id_type="string", max_length=ct.default_length)
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 0})
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"The collection properties to alter and keys to delete must not be empty at the same time"}
|
||||
self.drop_collection_properties(client, collection_name, property_keys,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
#TODO properties with non-existent params
|
||||
|
||||
|
||||
class TestMilvusClientCollectionPropertiesValid(TestMilvusClientV2Base):
|
||||
""" Test case of alter/drop collection properties """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are valid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_milvus_client_collection_alter_collection_properties(self):
|
||||
"""
|
||||
target: test alter collection
|
||||
method: alter collection
|
||||
expected: alter successfully
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim)
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.release_collection(client, collection_name)
|
||||
properties = {"mmap.enabled": True}
|
||||
self.alter_collection_properties(client, collection_name, properties)
|
||||
describe = self.describe_collection(client, collection_name)[0].get("properties")
|
||||
assert describe["mmap.enabled"] == 'True'
|
||||
self.release_collection(client, collection_name)
|
||||
properties = {"mmap.enabled": False}
|
||||
self.alter_collection_properties(client, collection_name, properties)
|
||||
describe = self.describe_collection(client, collection_name)[0].get("properties")
|
||||
assert describe["mmap.enabled"] == 'False'
|
||||
#TODO add case that confirm the parameter is actually valid
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_milvus_client_collection_drop_collection_properties(self):
|
||||
"""
|
||||
target: test drop collection
|
||||
method: drop collection
|
||||
expected: drop successfully
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim)
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.release_collection(client, collection_name)
|
||||
properties = {"mmap.enabled": True}
|
||||
self.alter_collection_properties(client, collection_name, properties)
|
||||
describe = self.describe_collection(client, collection_name)[0].get("properties")
|
||||
assert describe["mmap.enabled"] == 'True'
|
||||
property_keys = ["mmap.enabled"]
|
||||
self.drop_collection_properties(client, collection_name, property_keys)
|
||||
describe = self.describe_collection(client, collection_name)[0].get("properties")
|
||||
assert "mmap.enabled" not in describe
|
||||
#TODO add case that confirm the parameter is actually invalid
|
||||
self.drop_collection(client, collection_name)
|
|
@ -10,6 +10,7 @@ from common.constants import *
|
|||
from pymilvus import DataType
|
||||
|
||||
prefix = "client_search"
|
||||
partition_prefix = "client_partition"
|
||||
epsilon = ct.epsilon
|
||||
default_nb = ct.default_nb
|
||||
default_nb_medium = ct.default_nb_medium
|
||||
|
@ -552,3 +553,603 @@ class TestMilvusClientSearchValid(TestMilvusClientV2Base):
|
|||
'params': cf.get_search_params_params('IVF_FLAT')}
|
||||
self.search(client, collection_name, data=[search_vector], filter='id >= 10',
|
||||
search_params=search_params, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
class TestMilvusClientSearchIteratorInvalid(TestMilvusClientV2Base):
|
||||
""" Test case of search iterator """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("ambiguous error info")
|
||||
def test_search_iterator_collection_not_existed(self):
|
||||
"""
|
||||
target: test search iterator
|
||||
method: search iterator with nonexistent collection name
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str("nonexistent")
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: f"collection not found[database=default]"
|
||||
f"[collection={collection_name}]"}
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
insert_ids = [i for i in range(default_nb)]
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=5,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("data", ["str", [[1,2],[3,4]]])
|
||||
def test_search_iterator_with_multiple_vectors(self, data):
|
||||
"""
|
||||
target: test search iterator with multiple vectors
|
||||
method: run search iterator with multiple vectors
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"search_iterator_v2 does not support processing multiple vectors simultaneously"}
|
||||
self.search_iterator(client, collection_name, data,
|
||||
batch_size=5,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("data", [[]])
|
||||
def test_search_iterator_with_empty_data(self, data):
|
||||
"""
|
||||
target: test search iterator with empty vector
|
||||
method: run search iterator with empty vector
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"The vector data for search cannot be empty"}
|
||||
self.search_iterator(client, collection_name, data,
|
||||
batch_size=5,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("batch_size", [-1])
|
||||
def test_search_iterator_with_invalid_batch_size(self, batch_size):
|
||||
"""
|
||||
target: test search iterator with invalid batch size
|
||||
method: run search iterator with invalid batch size
|
||||
expected: Raise exception
|
||||
"""
|
||||
#These are two inappropriate error messages:
|
||||
#1.5: `limit` value 1.5 is illegal
|
||||
#"1": '<' not supported between instances of 'str' and 'int'
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"batch size cannot be less than zero"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=batch_size,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("expr", ["invalidexpr"])
|
||||
def test_search_iterator_with_invalid_expr(self, expr):
|
||||
"""
|
||||
target: test search iterator with invalid expr
|
||||
method: run search iterator with invalid expr
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"failed to create query plan: predicate is not a boolean expression: invalidexpr, "
|
||||
f"data type: JSON: invalid parameter"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
filter=expr,
|
||||
batch_size=20,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("limit", [-10])
|
||||
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/39066")
|
||||
def test_search_iterator_with_invalid_limit(self, limit):
|
||||
"""
|
||||
target: test search iterator with invalid limit
|
||||
method: run search iterator with invalid limit
|
||||
expected: Raise exception
|
||||
note: limit param of search_iterator will be deprecated in the future
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"`limit` value {limit} is illegal"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=5,
|
||||
limit=limit,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("output_fields", ["id"])
|
||||
@pytest.mark.skip("A field that does not currently exist will simply have no effect, "
|
||||
"but it would be better if an error were reported.")
|
||||
def test_search_iterator_with_invalid_output(self, output_fields):
|
||||
"""
|
||||
target: test search iterator with nonexistent output field
|
||||
method: run search iterator with nonexistent output field
|
||||
expected: Raise exception
|
||||
actual: have no error, just have no effect
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"`output_fields` value {output_fields} is illegal"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=5,
|
||||
limit=10,
|
||||
output_fields=output_fields,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("search_params", ["tt"])
|
||||
@pytest.mark.skip("A param that does not currently exist will simply have no effect, "
|
||||
"but it would be better if an error were reported.")
|
||||
def test_search_iterator_with_invalid_search_params(self, search_params):
|
||||
"""
|
||||
target: test search iterator with nonexistent search_params key
|
||||
method: run search iterator with nonexistent search_params key
|
||||
expected: Raise exception
|
||||
actual: have no error, just have no effect
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"'str' object has no attribute 'get'"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=5,
|
||||
limit=10,
|
||||
output_fields=["id", "float", "varchar"],
|
||||
search_params=search_params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("partition_name", ["client_partition_85Jv3Pf3"])
|
||||
def test_search_iterator_with_invalid_partition_name(self, partition_name):
|
||||
"""
|
||||
target: test search iterator with invalid partition name
|
||||
method: run search iterator with invalid partition name
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
self.create_partition(client, collection_name, partition_name)
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2,
|
||||
"num_partitions": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"`partition_name_array` value {partition_name} is illegal"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
partition_names=partition_name,
|
||||
batch_size=5,
|
||||
limit=10,
|
||||
output_fields=["id", "float", "varchar"],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("partition_name", ["nonexistent"])
|
||||
def test_search_iterator_with_nonexistent_partition_name(self, partition_name):
|
||||
"""
|
||||
target: test search iterator with invalid partition name
|
||||
method: run search iterator with invalid partition name
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"partition name {partition_name} not found"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
partition_names=[partition_name],
|
||||
batch_size=5,
|
||||
limit=10,
|
||||
output_fields=["id", "float", "varchar"],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("anns_field", ["nonexistent", ])
|
||||
def test_search_iterator_with_nonexistent_anns_field(self, anns_field):
|
||||
"""
|
||||
target: test search iterator with nonexistent anns field
|
||||
method: run search iterator with nonexistent anns field
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"failed to create query plan: failed to get field schema by name: "
|
||||
f"fieldName({anns_field}) not found: invalid parameter"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=5,
|
||||
limit=10,
|
||||
anns_field=anns_field,
|
||||
output_fields=["id", "float", "varchar"],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("round_decimal", ["tt"])
|
||||
def test_search_iterator_with_invalid_round_decimal(self, round_decimal):
|
||||
"""
|
||||
target: test search iterator with invalid round_decimal
|
||||
method: run search iterator with invalid round_decimal
|
||||
expected: Raise exception
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"`round_decimal` value {round_decimal} is illegal"}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=5,
|
||||
limit=10,
|
||||
round_decimal=round_decimal,
|
||||
output_fields=["id", "float", "varchar"],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
class TestMilvusClientSearchIteratorValid(TestMilvusClientV2Base):
|
||||
""" Test case of search iterator """
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
def test_search_iterator_normal(self):
|
||||
"""
|
||||
target: test search iterator normal
|
||||
method: 1. search iterator
|
||||
2. check the result, expect pk
|
||||
expected: search successfully
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim, consistency_level="Bounded")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 2})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
self.flush(client, collection_name)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
insert_ids = [i for i in range(default_nb)]
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=40,
|
||||
limit=-1,
|
||||
check_task=CheckTasks.check_search_iterator,
|
||||
check_items={"batch_size": 40,
|
||||
"limit": -1})
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.parametrize("metric_type", ["COSINE", "IP"])
|
||||
@pytest.mark.parametrize("params", [{"radius": 0.8, "range_filter": 1}])
|
||||
def test_search_iterator_with_different_metric_type_with_params(self, metric_type, params):
|
||||
"""
|
||||
target: test search iterator with COSINE and IP metric types and search params
|
||||
method: 1. search iterator
|
||||
2. check the result, expect pk
|
||||
expected: search successfully
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim,
|
||||
metric_type=metric_type, consistency_level="Strong")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 0})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
insert_ids = [i for i in range(default_nb)]
|
||||
search_params = {"metric_type": metric_type, "params": params}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=100,
|
||||
search_params=search_params,
|
||||
check_task=CheckTasks.check_search_iterator,
|
||||
check_items={"metric_type": metric_type,
|
||||
"radius": 0.8,
|
||||
"range_filter": 1})
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.parametrize("metric_type", ["L2"])
|
||||
@pytest.mark.parametrize("params", [{"radius": 0.8, "range_filter": 1}])
|
||||
def test_search_iterator_with_L2_metric_type_with_params(self, metric_type, params):
|
||||
"""
|
||||
target: test search iterator with L2 metric type and search params
|
||||
method: 1. search iterator
|
||||
2. check the result, expect pk
|
||||
expected: search successfully
|
||||
"""
|
||||
client = self._client()
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
self.using_database(client, "default")
|
||||
# 1. create collection
|
||||
self.create_collection(client, collection_name, default_dim,
|
||||
metric_type=metric_type, consistency_level="Strong")
|
||||
collections = self.list_collections(client)[0]
|
||||
assert collection_name in collections
|
||||
self.describe_collection(client, collection_name,
|
||||
check_task=CheckTasks.check_describe_collection_property,
|
||||
check_items={"collection_name": collection_name,
|
||||
"dim": default_dim,
|
||||
"consistency_level": 0})
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
self.insert(client, collection_name, rows)
|
||||
# 3. search
|
||||
vectors_to_search = rng.random((1, default_dim))
|
||||
insert_ids = [i for i in range(default_nb)]
|
||||
search_params = {"metric_type": metric_type, "params": params}
|
||||
self.search_iterator(client, collection_name, vectors_to_search,
|
||||
batch_size=100,
|
||||
search_params=search_params,
|
||||
check_task=CheckTasks.check_search_iterator,
|
||||
check_items={"metric_type": metric_type,
|
||||
"radius": 0.8,
|
||||
"range_filter": 1})
|
||||
self.release_collection(client, collection_name)
|
||||
self.drop_collection(client, collection_name)
|
Loading…
Reference in New Issue